id stringlengths 25 30 | content stringlengths 14 942k | max_stars_repo_path stringlengths 49 55 |
|---|---|---|
crossvul-cpp_data_good_3486_2 | /*
* ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
*
* ARMv7 support: Jean Pihet <jpihet@mvista.com>
* 2010 (c) MontaVista Software, LLC.
*
* Copied from ARMv6 code, with the low level code inspired
* by the ARMv7 Oprofile code.
*
* Cortex-A8 has up to 4 configurable performance counters and
* a single cycle counter.
* Cortex-A9 has up to 31 configurable performance counters and
* a single cycle counter.
*
* All counters can be enabled/disabled and IRQ masked separately. The cycle
* counter and all 4 performance counters together can be reset separately.
*/
#ifdef CONFIG_CPU_V7
/* Common ARMv7 event types */
enum armv7_perf_types {
ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
ARMV7_PERFCTR_IFETCH_MISS = 0x01,
ARMV7_PERFCTR_ITLB_MISS = 0x02,
ARMV7_PERFCTR_DCACHE_REFILL = 0x03,
ARMV7_PERFCTR_DCACHE_ACCESS = 0x04,
ARMV7_PERFCTR_DTLB_REFILL = 0x05,
ARMV7_PERFCTR_DREAD = 0x06,
ARMV7_PERFCTR_DWRITE = 0x07,
ARMV7_PERFCTR_EXC_TAKEN = 0x09,
ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
ARMV7_PERFCTR_CID_WRITE = 0x0B,
/* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
* It counts:
* - all branch instructions,
* - instructions that explicitly write the PC,
* - exception generating instructions.
*/
ARMV7_PERFCTR_PC_WRITE = 0x0C,
ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12,
ARMV7_PERFCTR_CPU_CYCLES = 0xFF
};
/* ARMv7 Cortex-A8 specific event types */
enum armv7_a8_perf_types {
ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
ARMV7_PERFCTR_L2_ACCESS = 0x43,
ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
ARMV7_PERFCTR_L2_NEON = 0x4E,
ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
ARMV7_PERFCTR_L1_INST = 0x50,
ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
ARMV7_PERFCTR_OP_EXECUTED = 0x55,
ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
ARMV7_PERFCTR_CYCLES_INST = 0x57,
ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
ARMV7_PERFCTR_PMU_EVENTS = 0x72,
};
/* ARMv7 Cortex-A9 specific event types */
enum armv7_a9_perf_types {
ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
ARMV7_PERFCTR_DATA_EVICTION = 0x65,
ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
ARMV7_PERFCTR_ISB_INST = 0x90,
ARMV7_PERFCTR_DSB_INST = 0x91,
ARMV7_PERFCTR_DMB_INST = 0x92,
ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
};
/*
* Cortex-A8 HW events mapping
*
* The hardware events that we support. We do support cache operations but
* we have harvard caches and no way to combine instruction and data
* accesses/misses in hardware.
*/
static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
};
static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* The performance counters don't differentiate between read
* and write accesses/misses so this isn't strictly correct,
* but it's the best we can do. Writes and reads get
* combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
/*
* Only ITLB misses and DTLB refills are supported.
* If users want the DTLB refills misses a raw counter
* must be used.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
/*
* Cortex-A9 HW events mapping
*/
static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] =
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
};
static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* The performance counters don't differentiate between read
* and write accesses/misses so this isn't strictly correct,
* but it's the best we can do. Writes and reads get
* combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
/*
* Only ITLB misses and DTLB refills are supported.
* If users want the DTLB refills misses a raw counter
* must be used.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
/*
* Perf Events counters
*/
enum armv7_counters {
ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */
ARMV7_COUNTER0 = 2, /* First event counter */
};
/*
* The cycle counter is ARMV7_CYCLE_COUNTER.
* The first event counter is ARMV7_COUNTER0.
* The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
*/
#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
/*
* ARMv7 low level PMNC access
*/
/*
* Per-CPU PMNC: config reg
*/
#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
#define ARMV7_PMNC_N_MASK 0x1f
#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
/*
* Available counters
*/
#define ARMV7_CNT0 0 /* First event counter */
#define ARMV7_CCNT 31 /* Cycle counter */
/* Perf Event to low level counters mapping */
#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
/*
* CNTENS: counters enable reg
*/
#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
/*
* CNTENC: counters disable reg
*/
#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
/*
* INTENS: counters overflow interrupt enable reg
*/
#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_INTENS_C (1 << ARMV7_CCNT)
/*
* INTENC: counters overflow interrupt disable reg
*/
#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_INTENC_C (1 << ARMV7_CCNT)
/*
* EVTSEL: Event selection reg
*/
#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
/*
* SELECT: Counter selection reg
*/
#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
/*
* FLAG: counters overflow flag status reg
*/
#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
#define ARMV7_FLAG_C (1 << ARMV7_CCNT)
#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
static inline unsigned long armv7_pmnc_read(void)
{
u32 val;
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
return val;
}
static inline void armv7_pmnc_write(unsigned long val)
{
val &= ARMV7_PMNC_MASK;
isb();
asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
}
static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
{
return pmnc & ARMV7_OVERFLOWED_MASK;
}
static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
enum armv7_counters counter)
{
int ret = 0;
if (counter == ARMV7_CYCLE_COUNTER)
ret = pmnc & ARMV7_FLAG_C;
else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
ret = pmnc & ARMV7_FLAG_P(counter);
else
pr_err("CPU%u checking wrong counter %d overflow status\n",
smp_processor_id(), counter);
return ret;
}
static inline int armv7_pmnc_select_counter(unsigned int idx)
{
u32 val;
if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
pr_err("CPU%u selecting wrong PMNC counter"
" %d\n", smp_processor_id(), idx);
return -1;
}
val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
isb();
return idx;
}
static inline u32 armv7pmu_read_counter(int idx)
{
unsigned long value = 0;
if (idx == ARMV7_CYCLE_COUNTER)
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mrc p15, 0, %0, c9, c13, 2"
: "=r" (value));
} else
pr_err("CPU%u reading wrong counter %d\n",
smp_processor_id(), idx);
return value;
}
static inline void armv7pmu_write_counter(int idx, u32 value)
{
if (idx == ARMV7_CYCLE_COUNTER)
asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
if (armv7_pmnc_select_counter(idx) == idx)
asm volatile("mcr p15, 0, %0, c9, c13, 2"
: : "r" (value));
} else
pr_err("CPU%u writing wrong counter %d\n",
smp_processor_id(), idx);
}
static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
{
if (armv7_pmnc_select_counter(idx) == idx) {
val &= ARMV7_EVTSEL_MASK;
asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
}
}
static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
{
u32 val;
if ((idx != ARMV7_CYCLE_COUNTER) &&
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
pr_err("CPU%u enabling wrong PMNC counter"
" %d\n", smp_processor_id(), idx);
return -1;
}
if (idx == ARMV7_CYCLE_COUNTER)
val = ARMV7_CNTENS_C;
else
val = ARMV7_CNTENS_P(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
return idx;
}
static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
{
u32 val;
if ((idx != ARMV7_CYCLE_COUNTER) &&
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
pr_err("CPU%u disabling wrong PMNC counter"
" %d\n", smp_processor_id(), idx);
return -1;
}
if (idx == ARMV7_CYCLE_COUNTER)
val = ARMV7_CNTENC_C;
else
val = ARMV7_CNTENC_P(idx);
asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
return idx;
}
static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
{
u32 val;
if ((idx != ARMV7_CYCLE_COUNTER) &&
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
pr_err("CPU%u enabling wrong PMNC counter"
" interrupt enable %d\n", smp_processor_id(), idx);
return -1;
}
if (idx == ARMV7_CYCLE_COUNTER)
val = ARMV7_INTENS_C;
else
val = ARMV7_INTENS_P(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
return idx;
}
static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
{
u32 val;
if ((idx != ARMV7_CYCLE_COUNTER) &&
((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
pr_err("CPU%u disabling wrong PMNC counter"
" interrupt enable %d\n", smp_processor_id(), idx);
return -1;
}
if (idx == ARMV7_CYCLE_COUNTER)
val = ARMV7_INTENC_C;
else
val = ARMV7_INTENC_P(idx);
asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
return idx;
}
static inline u32 armv7_pmnc_getreset_flags(void)
{
u32 val;
/* Read */
asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
/* Write to clear flags */
val &= ARMV7_FLAG_MASK;
asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
return val;
}
#ifdef DEBUG
static void armv7_pmnc_dump_regs(void)
{
u32 val;
unsigned int cnt;
printk(KERN_INFO "PMNC registers dump:\n");
asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
printk(KERN_INFO "PMNC =0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
printk(KERN_INFO "CNTENS=0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
printk(KERN_INFO "INTENS=0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
printk(KERN_INFO "FLAGS =0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
printk(KERN_INFO "SELECT=0x%08x\n", val);
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
printk(KERN_INFO "CCNT =0x%08x\n", val);
for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
armv7_pmnc_select_counter(cnt);
asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
printk(KERN_INFO "CNT[%d] count =0x%08x\n",
cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
}
}
#endif
static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags;
/*
* Enable counter and interrupt, and set the counter to count
* the event that we're interested in.
*/
raw_spin_lock_irqsave(&pmu_lock, flags);
/*
* Disable counter
*/
armv7_pmnc_disable_counter(idx);
/*
* Set event (if destined for PMNx counters)
* We don't need to set the event if it's a cycle count
*/
if (idx != ARMV7_CYCLE_COUNTER)
armv7_pmnc_write_evtsel(idx, hwc->config_base);
/*
* Enable interrupt for this counter
*/
armv7_pmnc_enable_intens(idx);
/*
* Enable counter
*/
armv7_pmnc_enable_counter(idx);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags;
/*
* Disable counter and interrupt
*/
raw_spin_lock_irqsave(&pmu_lock, flags);
/*
* Disable counter
*/
armv7_pmnc_disable_counter(idx);
/*
* Disable interrupt for this counter
*/
armv7_pmnc_disable_intens(idx);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
{
unsigned long pmnc;
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
/*
* Get and reset the IRQ flags
*/
pmnc = armv7_pmnc_getreset_flags();
/*
* Did an overflow occur?
*/
if (!armv7_pmnc_has_overflowed(pmnc))
return IRQ_NONE;
/*
* Handle the counter(s) overflow(s)
*/
regs = get_irq_regs();
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
*/
if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx);
}
/*
* Handle the pending perf events.
*
* Note: this call *must* be run with interrupts disabled. For
* platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
irq_work_run();
return IRQ_HANDLED;
}
static void armv7pmu_start(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&pmu_lock, flags);
/* Enable all counters */
armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void armv7pmu_stop(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&pmu_lock, flags);
/* Disable all counters */
armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
struct hw_perf_event *event)
{
int idx;
/* Always place a cycle counter into the cycle counter. */
if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
return ARMV7_CYCLE_COUNTER;
} else {
/*
* For anything other than a cycle counter, try and use
* the events counters
*/
for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
if (!test_and_set_bit(idx, cpuc->used_mask))
return idx;
}
/* The counters are all in use. */
return -EAGAIN;
}
}
static void armv7pmu_reset(void *info)
{
u32 idx, nb_cnt = armpmu->num_events;
/* The counter and interrupt enable registers are unknown at reset. */
for (idx = 1; idx < nb_cnt; ++idx)
armv7pmu_disable_event(NULL, idx);
/* Initialize & Reset PMNC: C and P bits */
armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
}
static struct arm_pmu armv7pmu = {
.handle_irq = armv7pmu_handle_irq,
.enable = armv7pmu_enable_event,
.disable = armv7pmu_disable_event,
.read_counter = armv7pmu_read_counter,
.write_counter = armv7pmu_write_counter,
.get_event_idx = armv7pmu_get_event_idx,
.start = armv7pmu_start,
.stop = armv7pmu_stop,
.reset = armv7pmu_reset,
.raw_event_mask = 0xFF,
.max_period = (1LLU << 32) - 1,
};
static u32 __init armv7_read_num_pmnc_events(void)
{
u32 nb_cnt;
/* Read the nb of CNTx counters supported from PMNC */
nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
/* Add the CPU cycles counter and return */
return nb_cnt + 1;
}
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA8;
armv7pmu.name = "ARMv7 Cortex-A8";
armv7pmu.cache_map = &armv7_a8_perf_cache_map;
armv7pmu.event_map = &armv7_a8_perf_map;
armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
static const struct arm_pmu *__init armv7_a9_pmu_init(void)
{
armv7pmu.id = ARM_PERF_PMU_ID_CA9;
armv7pmu.name = "ARMv7 Cortex-A9";
armv7pmu.cache_map = &armv7_a9_perf_cache_map;
armv7pmu.event_map = &armv7_a9_perf_map;
armv7pmu.num_events = armv7_read_num_pmnc_events();
return &armv7pmu;
}
#else
static const struct arm_pmu *__init armv7_a8_pmu_init(void)
{
return NULL;
}
static const struct arm_pmu *__init armv7_a9_pmu_init(void)
{
return NULL;
}
#endif /* CONFIG_CPU_V7 */
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3486_2 |
crossvul-cpp_data_bad_944_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO GGGGG RRRR IIIII FFFFF Y Y %
% MM MM O O G R R I F Y Y %
% M M M O O G GGG RRRR I FFF Y %
% M M O O G G R R I F Y %
% M M OOO GGGG R R IIIII F Y %
% %
% %
% MagickWand Module Methods %
% %
% Software Design %
% Cristy %
% March 2000 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use the mogrify program to resize an image, blur, crop, despeckle, dither,
% draw on, flip, join, re-sample, and much more. This tool is similiar to
% convert except that the original image file is overwritten (unless you
% change the file suffix with the -format option) with any changes you
% request.
%
*/
/*
Include declarations.
*/
#include "MagickWand/studio.h"
#include "MagickWand/MagickWand.h"
#include "MagickWand/magick-wand-private.h"
#include "MagickWand/mogrify-private.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/image-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_HAVE_UTIME_H)
#include <utime.h>
#endif
/*
Constant declaration.
*/
static const char
MogrifyAlphaColor[] = "#bdbdbd", /* gray */
MogrifyBackgroundColor[] = "#ffffff", /* white */
MogrifyBorderColor[] = "#dfdfdf"; /* gray */
/*
Define declarations.
*/
#define UndefinedCompressionQuality 0UL
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C o m m a n d G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickCommandGenesis() applies image processing options to an image as
% prescribed by command line options.
%
% It wiil look for special options like "-debug", "-bench", and
% "-distribute-cache" that needs to be applied even before the main
% processing begins, and may completely overrule normal command processing.
% Such 'Genesis' Options can only be given on the CLI, (not in a script)
% and are typically ignored (as they have been handled) if seen later.
%
% The format of the MagickCommandGenesis method is:
%
% MagickBooleanType MagickCommandGenesis(ImageInfo *image_info,
% MagickCommand command,int argc,char **argv,char **metadata,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o command: Choose from ConvertImageCommand, IdentifyImageCommand,
% MogrifyImageCommand, CompositeImageCommand, CompareImagesCommand,
% ConjureImageCommand, StreamImageCommand, ImportImageCommand,
% DisplayImageCommand, or AnimateImageCommand.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o metadata: any metadata is returned here.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MagickCommandGenesis(ImageInfo *image_info,
MagickCommand command,int argc,char **argv,char **metadata,
ExceptionInfo *exception)
{
char
client_name[MaxTextExtent],
*option;
double
duration,
serial;
MagickBooleanType
concurrent,
regard_warnings,
status;
register ssize_t
i;
size_t
iterations,
number_threads;
ssize_t
n;
(void) setlocale(LC_ALL,"");
(void) setlocale(LC_NUMERIC,"C");
GetPathComponent(argv[0],TailPath,client_name);
(void) SetClientName(client_name);
concurrent=MagickFalse;
duration=(-1.0);
iterations=1;
status=MagickTrue;
regard_warnings=MagickFalse;
for (i=1; i < (ssize_t) (argc-1); i++)
{
option=argv[i];
if ((strlen(option) == 1) || ((*option != '-') && (*option != '+')))
continue;
if (LocaleCompare("-bench",option) == 0)
iterations=StringToUnsignedLong(argv[++i]);
if (LocaleCompare("-concurrent",option) == 0)
concurrent=MagickTrue;
if (LocaleCompare("-debug",option) == 0)
(void) SetLogEventMask(argv[++i]);
if (LocaleCompare("-distribute-cache",option) == 0)
{
DistributePixelCacheServer(StringToInteger(argv[++i]),exception);
exit(0);
}
if (LocaleCompare("-duration",option) == 0)
duration=StringToDouble(argv[++i],(char **) NULL);
if (LocaleCompare("-regard-warnings",option) == 0)
regard_warnings=MagickTrue;
}
if (iterations == 1)
{
char
*text;
text=(char *) NULL;
status=command(image_info,argc,argv,&text,exception);
if (exception->severity != UndefinedException)
{
if ((exception->severity > ErrorException) ||
(regard_warnings != MagickFalse))
status=MagickFalse;
CatchException(exception);
}
if (text != (char *) NULL)
{
if (metadata != (char **) NULL)
(void) ConcatenateString(&(*metadata),text);
text=DestroyString(text);
}
return(status);
}
number_threads=GetOpenMPMaximumThreads();
serial=0.0;
for (n=1; n <= (ssize_t) number_threads; n++)
{
double
e,
parallel,
user_time;
TimerInfo
*timer;
(void) SetMagickResourceLimit(ThreadResource,(MagickSizeType) n);
timer=AcquireTimerInfo();
if (concurrent == MagickFalse)
{
for (i=0; i < (ssize_t) iterations; i++)
{
char
*text;
text=(char *) NULL;
if (status == MagickFalse)
continue;
if (duration > 0)
{
if (GetElapsedTime(timer) > duration)
continue;
(void) ContinueTimer(timer);
}
status=command(image_info,argc,argv,&text,exception);
if (exception->severity != UndefinedException)
{
if ((exception->severity > ErrorException) ||
(regard_warnings != MagickFalse))
status=MagickFalse;
CatchException(exception);
}
if (text != (char *) NULL)
{
if (metadata != (char **) NULL)
(void) ConcatenateString(&(*metadata),text);
text=DestroyString(text);
}
}
}
else
{
SetOpenMPNested(1);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp parallel for shared(status)
#endif
for (i=0; i < (ssize_t) iterations; i++)
{
char
*text;
text=(char *) NULL;
if (status == MagickFalse)
continue;
if (duration > 0)
{
if (GetElapsedTime(timer) > duration)
continue;
(void) ContinueTimer(timer);
}
status=command(image_info,argc,argv,&text,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_MagickCommandGenesis)
#endif
{
if (exception->severity != UndefinedException)
{
if ((exception->severity > ErrorException) ||
(regard_warnings != MagickFalse))
status=MagickFalse;
CatchException(exception);
}
if (text != (char *) NULL)
{
if (metadata != (char **) NULL)
(void) ConcatenateString(&(*metadata),text);
text=DestroyString(text);
}
}
}
}
user_time=GetUserTime(timer);
parallel=GetElapsedTime(timer);
e=1.0;
if (n == 1)
serial=parallel;
else
e=((1.0/(1.0/((serial/(serial+parallel))+(1.0-(serial/(serial+parallel)))/
(double) n)))-(1.0/(double) n))/(1.0-1.0/(double) n);
(void) FormatLocaleFile(stderr,
" Performance[%.20g]: %.20gi %0.3fips %0.6fe %0.6fu %lu:%02lu.%03lu\n",
(double) n,(double) iterations,(double) iterations/parallel,e,user_time,
(unsigned long) (parallel/60.0),(unsigned long) floor(fmod(parallel,
60.0)),(unsigned long) (1000.0*(parallel-floor(parallel))+0.5));
timer=DestroyTimerInfo(timer);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImage() applies simple single image processing options to a single
% image that may be part of a large list, but also handles any 'region'
% image handling.
%
% The image in the list may be modified in three different ways...
%
% * directly modified (EG: -negate, -gamma, -level, -annotate, -draw),
% * replaced by a new image (EG: -spread, -resize, -rotate, -morphology)
% * replace by a list of images (only the -separate option!)
%
% In each case the result is returned into the list, and a pointer to the
% modified image (last image added if replaced by a list of images) is
% returned.
%
% ASIDE: The -crop is present but restricted to non-tile single image crops
%
% This means if all the images are being processed (such as by
% MogrifyImages(), next image to be processed will be as per the pointer
% (*image)->next. Also the image list may grow as a result of some specific
% operations but as images are never merged or deleted, it will never shrink
% in length. Typically the list will remain the same length.
%
% WARNING: As the image pointed to may be replaced, the first image in the
% list may also change. GetFirstImageInList() should be used by caller if
% they wish return the Image pointer to the first image in list.
%
%
% The format of the MogrifyImage method is:
%
% MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc,
% const char **argv,Image **image)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Image *GetImageCache(const ImageInfo *image_info,const char *path,
ExceptionInfo *exception)
{
char
key[MagickPathExtent];
ExceptionInfo
*sans_exception;
Image
*image;
ImageInfo
*read_info;
/*
Read an image into a image cache (for repeated usage) if not already in
cache. Then return the image that is in the cache.
*/
(void) FormatLocaleString(key,MagickPathExtent,"cache:%s",path);
sans_exception=AcquireExceptionInfo();
image=(Image *) GetImageRegistry(ImageRegistryType,key,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (image != (Image *) NULL)
return(image);
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(read_info->filename,path,MagickPathExtent);
image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
if (image != (Image *) NULL)
(void) SetImageRegistry(ImageRegistryType,key,image,exception);
return(image);
}
static inline MagickBooleanType IsPathWritable(const char *path)
{
if (IsPathAccessible(path) == MagickFalse)
return(MagickFalse);
if (access_utf8(path,W_OK) != 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType MonitorProgress(const char *text,
const MagickOffsetType offset,const MagickSizeType extent,
void *wand_unused(client_data))
{
char
message[MagickPathExtent],
tag[MagickPathExtent];
const char
*locale_message;
register char
*p;
magick_unreferenced(client_data);
if ((extent <= 1) || (offset < 0) || (offset >= (MagickOffsetType) extent))
return(MagickTrue);
if ((offset != (MagickOffsetType) (extent-1)) && ((offset % 50) != 0))
return(MagickTrue);
(void) CopyMagickString(tag,text,MagickPathExtent);
p=strrchr(tag,'/');
if (p != (char *) NULL)
*p='\0';
(void) FormatLocaleString(message,MagickPathExtent,"Monitor/%s",tag);
locale_message=GetLocaleMessage(message);
if (locale_message == message)
locale_message=tag;
if (p == (char *) NULL)
(void) FormatLocaleFile(stderr,"%s: %ld of %lu, %02ld%% complete\r",
locale_message,(long) offset,(unsigned long) extent,(long)
(100L*offset/(extent-1)));
else
(void) FormatLocaleFile(stderr,"%s[%s]: %ld of %lu, %02ld%% complete\r",
locale_message,p+1,(long) offset,(unsigned long) extent,(long)
(100L*offset/(extent-1)));
if (offset == (MagickOffsetType) (extent-1))
(void) FormatLocaleFile(stderr,"\n");
(void) fflush(stderr);
return(MagickTrue);
}
static Image *SparseColorOption(const Image *image,
const SparseColorMethod method,const char *arguments,
const MagickBooleanType color_from_image,ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p;
double
*sparse_arguments;
Image
*sparse_image;
PixelInfo
color;
MagickBooleanType
error;
register size_t
x;
size_t
number_arguments,
number_colors;
/*
SparseColorOption() parses the complex -sparse-color argument into an an
array of floating point values then calls SparseColorImage(). Argument is
a complex mix of floating-point pixel coodinates, and color specifications
(or direct floating point numbers). The number of floats needed to
represent a color varies depending on the current channel setting.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Limit channels according to image - and add up number of color channel.
*/
number_colors=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
number_colors++;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
number_colors++;
/*
Read string, to determine number of arguments needed,
*/
p=arguments;
x=0;
while( *p != '\0' )
{
GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == ',' ) continue;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
if ( color_from_image ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "'%s': %s", "sparse-color",
"Color arg given, when colors are coming from image");
return( (Image *) NULL);
}
x += number_colors; /* color argument */
}
else {
x++; /* floating point argument */
}
}
error=MagickTrue;
if ( color_from_image ) {
/* just the control points are being given */
error = ( x % 2 != 0 ) ? MagickTrue : MagickFalse;
number_arguments=(x/2)*(2+number_colors);
}
else {
/* control points and color values */
error = ( x % (2+number_colors) != 0 ) ? MagickTrue : MagickFalse;
number_arguments=x;
}
if ( error ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "'%s': %s", "sparse-color",
"Invalid number of Arguments");
return( (Image *) NULL);
}
/* Allocate and fill in the floating point arguments */
sparse_arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*sparse_arguments));
if (sparse_arguments == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),ResourceLimitError,
" MemoryAllocationFailed\n""%s","SparseColorOption");
return( (Image *) NULL);
}
(void) memset(sparse_arguments,0,number_arguments*
sizeof(*sparse_arguments));
p=arguments;
x=0;
while( *p != '\0' && x < number_arguments ) {
/* X coordinate */
token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "'%s': %s", "sparse-color",
"Color found, instead of X-coord");
error = MagickTrue;
break;
}
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
/* Y coordinate */
token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "'%s': %s", "sparse-color",
"Color found, instead of Y-coord");
error = MagickTrue;
break;
}
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
/* color values for this control point */
#if 0
if ( (color_from_image ) {
/* get color from image */
/* HOW??? */
}
else
#endif
{
/* color name or function given in string argument */
token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
/* Color string given */
(void) QueryColorCompliance(token,AllCompliance,&color,exception);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
sparse_arguments[x++] = QuantumScale*color.red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
sparse_arguments[x++] = QuantumScale*color.green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
sparse_arguments[x++] = QuantumScale*color.blue;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
sparse_arguments[x++] = QuantumScale*color.black;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
sparse_arguments[x++] = QuantumScale*color.alpha;
}
else {
/* Colors given as a set of floating point values - experimental */
/* NB: token contains the first floating point value to use! */
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
}
}
}
if ( number_arguments != x && !error ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
" InvalidArgument","'%s': %s","sparse-color","Argument Parsing Error");
sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments);
return( (Image *) NULL);
}
if ( error )
return( (Image *) NULL);
/* Call the Interpolation function with the parsed arguments */
sparse_image=SparseColorImage(image,method,number_arguments,sparse_arguments,
exception);
sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments);
return( sparse_image );
}
WandExport MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc,
const char **argv,Image **image,ExceptionInfo *exception)
{
CompositeOperator
compose;
const char
*format,
*option;
double
attenuate;
DrawInfo
*draw_info;
GeometryInfo
geometry_info;
ImageInfo
*mogrify_info;
MagickStatusType
status;
PixelInfo
fill;
MagickStatusType
flags;
PixelInterpolateMethod
interpolate_method;
QuantizeInfo
*quantize_info;
RectangleInfo
geometry,
region_geometry;
register ssize_t
i;
/*
Initialize method variables.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (argc < 0)
return(MagickTrue);
mogrify_info=CloneImageInfo(image_info);
draw_info=CloneDrawInfo(mogrify_info,(DrawInfo *) NULL);
quantize_info=AcquireQuantizeInfo(mogrify_info);
SetGeometryInfo(&geometry_info);
GetPixelInfo(*image,&fill);
fill=(*image)->background_color;
attenuate=1.0;
compose=(*image)->compose;
interpolate_method=UndefinedInterpolatePixel;
format=GetImageOption(mogrify_info,"format");
SetGeometry(*image,®ion_geometry);
/*
Transmogrify the image.
*/
for (i=0; i < (ssize_t) argc; i++)
{
Image
*mogrify_image;
ssize_t
count;
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=MagickMax(ParseCommandOption(MagickCommandOptions,MagickFalse,option),
0L);
if ((i+count) >= (ssize_t) argc)
break;
status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception);
mogrify_image=(Image *) NULL;
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adaptive-blur",option+1) == 0)
{
/*
Adaptive blur image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=AdaptiveBlurImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("adaptive-resize",option+1) == 0)
{
/*
Adaptive resize image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=AdaptiveResizeImage(*image,geometry.width,
geometry.height,exception);
break;
}
if (LocaleCompare("adaptive-sharpen",option+1) == 0)
{
/*
Adaptive sharpen image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=AdaptiveSharpenImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("affine",option+1) == 0)
{
/*
Affine matrix.
*/
if (*option == '+')
{
GetAffineMatrix(&draw_info->affine);
break;
}
(void) ParseAffineGeometry(argv[i+1],&draw_info->affine,exception);
break;
}
if (LocaleCompare("alpha",option+1) == 0)
{
AlphaChannelOption
alpha_type;
(void) SyncImageSettings(mogrify_info,*image,exception);
alpha_type=(AlphaChannelOption) ParseCommandOption(
MagickAlphaChannelOptions,MagickFalse,argv[i+1]);
(void) SetImageAlphaChannel(*image,alpha_type,exception);
break;
}
if (LocaleCompare("annotate",option+1) == 0)
{
char
*text,
geometry_str[MagickPathExtent];
/*
Annotate image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
SetGeometryInfo(&geometry_info);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
text=InterpretImageProperties(mogrify_info,*image,argv[i+2],
exception);
if (text == (char *) NULL)
break;
(void) CloneString(&draw_info->text,text);
text=DestroyString(text);
(void) FormatLocaleString(geometry_str,MagickPathExtent,"%+f%+f",
geometry_info.xi,geometry_info.psi);
(void) CloneString(&draw_info->geometry,geometry_str);
draw_info->affine.sx=cos(DegreesToRadians(
fmod(geometry_info.rho,360.0)));
draw_info->affine.rx=sin(DegreesToRadians(
fmod(geometry_info.rho,360.0)));
draw_info->affine.ry=(-sin(DegreesToRadians(
fmod(geometry_info.sigma,360.0))));
draw_info->affine.sy=cos(DegreesToRadians(
fmod(geometry_info.sigma,360.0)));
(void) AnnotateImage(*image,draw_info,exception);
break;
}
if (LocaleCompare("antialias",option+1) == 0)
{
draw_info->stroke_antialias=(*option == '-') ? MagickTrue :
MagickFalse;
draw_info->text_antialias=(*option == '-') ? MagickTrue :
MagickFalse;
break;
}
if (LocaleCompare("attenuate",option+1) == 0)
{
if (*option == '+')
{
attenuate=1.0;
break;
}
attenuate=StringToDouble(argv[i+1],(char **) NULL);
break;
}
if (LocaleCompare("auto-gamma",option+1) == 0)
{
/*
Auto Adjust Gamma of image based on its mean
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) AutoGammaImage(*image,exception);
break;
}
if (LocaleCompare("auto-level",option+1) == 0)
{
/*
Perfectly Normalize (max/min stretch) the image
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) AutoLevelImage(*image,exception);
break;
}
if (LocaleCompare("auto-orient",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=AutoOrientImage(*image,(*image)->orientation,
exception);
break;
}
if (LocaleCompare("auto-threshold",option+1) == 0)
{
AutoThresholdMethod
method;
(void) SyncImageSettings(mogrify_info,*image,exception);
method=(AutoThresholdMethod) ParseCommandOption(
MagickAutoThresholdOptions,MagickFalse,argv[i+1]);
(void) AutoThresholdImage(*image,method,exception);
break;
}
break;
}
case 'b':
{
if (LocaleCompare("black-threshold",option+1) == 0)
{
/*
Black threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) BlackThresholdImage(*image,argv[i+1],exception);
break;
}
if (LocaleCompare("blue-shift",option+1) == 0)
{
/*
Blue shift image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
geometry_info.rho=1.5;
if (*option == '-')
flags=ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=BlueShiftImage(*image,geometry_info.rho,exception);
break;
}
if (LocaleCompare("blur",option+1) == 0)
{
/*
Gaussian blur image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.0;
mogrify_image=BlurImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("border",option+1) == 0)
{
/*
Surround image with a border of solid color.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=BorderImage(*image,&geometry,compose,exception);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
if (*option == '+')
{
(void) QueryColorCompliance(MogrifyBorderColor,AllCompliance,
&draw_info->border_color,exception);
break;
}
(void) QueryColorCompliance(argv[i+1],AllCompliance,
&draw_info->border_color,exception);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
(void) QueryColorCompliance(argv[i+1],AllCompliance,
&draw_info->undercolor,exception);
break;
}
if (LocaleCompare("brightness-contrast",option+1) == 0)
{
double
brightness,
contrast;
/*
Brightness / contrast image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
brightness=geometry_info.rho;
contrast=0.0;
if ((flags & SigmaValue) != 0)
contrast=geometry_info.sigma;
(void) BrightnessContrastImage(*image,brightness,contrast,
exception);
break;
}
break;
}
case 'c':
{
if (LocaleCompare("canny",option+1) == 0)
{
/*
Detect edges in the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.10;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.30;
if ((flags & PercentValue) != 0)
{
geometry_info.xi/=100.0;
geometry_info.psi/=100.0;
}
mogrify_image=CannyEdgeImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception);
break;
}
if (LocaleCompare("cdl",option+1) == 0)
{
char
*color_correction_collection;
/*
Color correct with a color decision list.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
color_correction_collection=FileToString(argv[i+1],~0UL,exception);
if (color_correction_collection == (char *) NULL)
break;
(void) ColorDecisionListImage(*image,color_correction_collection,
exception);
break;
}
if (LocaleCompare("channel",option+1) == 0)
{
ChannelType
channel;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
(void) SetPixelChannelMask(*image,DefaultChannels);
break;
}
channel=(ChannelType) ParseChannelOption(argv[i+1]);
(void) SetPixelChannelMask(*image,channel);
break;
}
if (LocaleCompare("charcoal",option+1) == 0)
{
/*
Charcoal image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=1.0;
mogrify_image=CharcoalImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("chop",option+1) == 0)
{
/*
Chop the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ChopImage(*image,&geometry,exception);
break;
}
if (LocaleCompare("clahe",option+1) == 0)
{
/*
Contrast limited adaptive histogram equalization.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
(void) CLAHEImage(*image,geometry.width,geometry.height,
(size_t) geometry.x,geometry_info.psi,exception);
break;
}
if (LocaleCompare("clip",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
(void) SetImageMask(*image,WritePixelMask,(Image *) NULL,
exception);
break;
}
(void) ClipImage(*image,exception);
break;
}
if (LocaleCompare("clip-mask",option+1) == 0)
{
Image
*clip_mask;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
/*
Remove a mask.
*/
(void) SetImageMask(*image,WritePixelMask,(Image *) NULL,
exception);
break;
}
/*
Set the image mask.
*/
clip_mask=GetImageCache(mogrify_info,argv[i+1],exception);
if (clip_mask == (Image *) NULL)
break;
(void) SetImageMask(*image,WritePixelMask,clip_mask,exception);
clip_mask=DestroyImage(clip_mask);
break;
}
if (LocaleCompare("clip-path",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ClipImagePath(*image,argv[i+1],*option == '-' ? MagickTrue :
MagickFalse,exception);
break;
}
if (LocaleCompare("colorize",option+1) == 0)
{
/*
Colorize the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=ColorizeImage(*image,argv[i+1],&fill,exception);
break;
}
if (LocaleCompare("color-matrix",option+1) == 0)
{
KernelInfo
*kernel;
(void) SyncImageSettings(mogrify_info,*image,exception);
kernel=AcquireKernelInfo(argv[i+1],exception);
if (kernel == (KernelInfo *) NULL)
break;
/* FUTURE: check on size of the matrix */
mogrify_image=ColorMatrixImage(*image,kernel,exception);
kernel=DestroyKernelInfo(kernel);
break;
}
if (LocaleCompare("colors",option+1) == 0)
{
/*
Reduce the number of colors in the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
quantize_info->number_colors=StringToUnsignedLong(argv[i+1]);
if (quantize_info->number_colors == 0)
break;
if (((*image)->storage_class == DirectClass) ||
(*image)->colors > quantize_info->number_colors)
(void) QuantizeImage(quantize_info,*image,exception);
else
(void) CompressImageColormap(*image,exception);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
ColorspaceType
colorspace;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
(void) TransformImageColorspace(*image,sRGBColorspace,
exception);
break;
}
colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
(void) TransformImageColorspace(*image,colorspace,exception);
break;
}
if (LocaleCompare("compose",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("connected-components",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=ConnectedComponentsImage(*image,(size_t)
StringToInteger(argv[i+1]),(CCObjectInfo **) NULL,exception);
break;
}
if (LocaleCompare("contrast",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ContrastImage(*image,(*option == '-') ? MagickTrue :
MagickFalse,exception);
break;
}
if (LocaleCompare("contrast-stretch",option+1) == 0)
{
double
black_point,
white_point;
/*
Contrast stretch image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
black_point=geometry_info.rho;
white_point=(flags & SigmaValue) != 0 ? geometry_info.sigma :
black_point;
if ((flags & PercentValue) != 0)
{
black_point*=(double) (*image)->columns*(*image)->rows/100.0;
white_point*=(double) (*image)->columns*(*image)->rows/100.0;
}
white_point=(double) (*image)->columns*(*image)->rows-
white_point;
(void) ContrastStretchImage(*image,black_point,white_point,
exception);
break;
}
if (LocaleCompare("convolve",option+1) == 0)
{
double
gamma;
KernelInfo
*kernel_info;
register ssize_t
j;
size_t
extent;
(void) SyncImageSettings(mogrify_info,*image,exception);
kernel_info=AcquireKernelInfo(argv[i+1],exception);
if (kernel_info == (KernelInfo *) NULL)
break;
extent=kernel_info->width*kernel_info->height;
gamma=0.0;
for (j=0; j < (ssize_t) extent; j++)
gamma+=kernel_info->values[j];
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
for (j=0; j < (ssize_t) extent; j++)
kernel_info->values[j]*=gamma;
mogrify_image=MorphologyImage(*image,CorrelateMorphology,1,
kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("crop",option+1) == 0)
{
/*
Crop a image to a smaller size
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=CropImageToTiles(*image,argv[i+1],exception);
break;
}
if (LocaleCompare("cycle",option+1) == 0)
{
/*
Cycle an image colormap.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) CycleColormapImage(*image,(ssize_t) StringToLong(argv[i+1]),
exception);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("decipher",option+1) == 0)
{
StringInfo
*passkey;
/*
Decipher pixels.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
passkey=FileToStringInfo(argv[i+1],~0UL,exception);
if (passkey != (StringInfo *) NULL)
{
(void) PasskeyDecipherImage(*image,passkey,exception);
passkey=DestroyStringInfo(passkey);
}
break;
}
if (LocaleCompare("density",option+1) == 0)
{
/*
Set image density.
*/
(void) CloneString(&draw_info->density,argv[i+1]);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
(void) SetImageDepth(*image,MAGICKCORE_QUANTUM_DEPTH,exception);
break;
}
(void) SetImageDepth(*image,StringToUnsignedLong(argv[i+1]),
exception);
break;
}
if (LocaleCompare("deskew",option+1) == 0)
{
double
threshold;
/*
Straighten the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
threshold=40.0*QuantumRange/100.0;
else
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
mogrify_image=DeskewImage(*image,threshold,exception);
break;
}
if (LocaleCompare("despeckle",option+1) == 0)
{
/*
Reduce the speckles within an image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=DespeckleImage(*image,exception);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
(void) CloneString(&draw_info->server_name,argv[i+1]);
break;
}
if (LocaleCompare("distort",option+1) == 0)
{
char
*args,
token[MagickPathExtent];
const char
*p;
DistortMethod
method;
double
*arguments;
register ssize_t
x;
size_t
number_arguments;
/*
Distort image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
method=(DistortMethod) ParseCommandOption(MagickDistortOptions,
MagickFalse,argv[i+1]);
if (method == ResizeDistortion)
{
double
resize_args[2];
/*
Special Case - Argument is actually a resize geometry!
Convert that to an appropriate distortion argument array.
*/
(void) ParseRegionGeometry(*image,argv[i+2],&geometry,
exception);
resize_args[0]=(double) geometry.width;
resize_args[1]=(double) geometry.height;
mogrify_image=DistortImage(*image,method,(size_t)2,
resize_args,MagickTrue,exception);
break;
}
args=InterpretImageProperties(mogrify_info,*image,argv[i+2],
exception);
if (args == (char *) NULL)
break;
p=(char *) args;
for (x=0; *p != '\0'; x++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
}
number_arguments=(size_t) x;
arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*arguments));
if (arguments == (double *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,
"MemoryAllocationFailed",(*image)->filename);
(void) memset(arguments,0,number_arguments*
sizeof(*arguments));
p=(char *) args;
for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arguments[x]=StringToDouble(token,(char **) NULL);
}
args=DestroyString(args);
mogrify_image=DistortImage(*image,method,number_arguments,arguments,
(*option == '+') ? MagickTrue : MagickFalse,exception);
arguments=(double *) RelinquishMagickMemory(arguments);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
quantize_info->dither_method=NoDitherMethod;
break;
}
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("draw",option+1) == 0)
{
/*
Draw image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) CloneString(&draw_info->primitive,argv[i+1]);
(void) DrawImage(*image,draw_info,exception);
break;
}
break;
}
case 'e':
{
if (LocaleCompare("edge",option+1) == 0)
{
/*
Enhance edges in the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=EdgeImage(*image,geometry_info.rho,exception);
break;
}
if (LocaleCompare("emboss",option+1) == 0)
{
/*
Emboss image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=EmbossImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("encipher",option+1) == 0)
{
StringInfo
*passkey;
/*
Encipher pixels.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
passkey=FileToStringInfo(argv[i+1],~0UL,exception);
if (passkey != (StringInfo *) NULL)
{
(void) PasskeyEncipherImage(*image,passkey,exception);
passkey=DestroyStringInfo(passkey);
}
break;
}
if (LocaleCompare("encoding",option+1) == 0)
{
(void) CloneString(&draw_info->encoding,argv[i+1]);
break;
}
if (LocaleCompare("enhance",option+1) == 0)
{
/*
Enhance image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=EnhanceImage(*image,exception);
break;
}
if (LocaleCompare("equalize",option+1) == 0)
{
/*
Equalize image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) EqualizeImage(*image,exception);
break;
}
if (LocaleCompare("evaluate",option+1) == 0)
{
double
constant;
MagickEvaluateOperator
op;
(void) SyncImageSettings(mogrify_info,*image,exception);
op=(MagickEvaluateOperator) ParseCommandOption(
MagickEvaluateOptions,MagickFalse,argv[i+1]);
constant=StringToDoubleInterval(argv[i+2],(double) QuantumRange+
1.0);
(void) EvaluateImage(*image,op,constant,exception);
break;
}
if (LocaleCompare("extent",option+1) == 0)
{
/*
Set the image extent.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
if (geometry.width == 0)
geometry.width=(*image)->columns;
if (geometry.height == 0)
geometry.height=(*image)->rows;
mogrify_image=ExtentImage(*image,&geometry,exception);
break;
}
break;
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
if (*option == '+')
{
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
break;
}
(void) CloneString(&draw_info->family,argv[i+1]);
break;
}
if (LocaleCompare("features",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageArtifact(*image,"identify:features");
break;
}
(void) SetImageArtifact(*image,"identify:features",argv[i+1]);
(void) SetImageArtifact(*image,"verbose","true");
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
ExceptionInfo
*sans;
PixelInfo
color;
GetPixelInfo(*image,&fill);
if (*option == '+')
{
(void) QueryColorCompliance("none",AllCompliance,&fill,
exception);
draw_info->fill=fill;
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
break;
}
sans=AcquireExceptionInfo();
status=QueryColorCompliance(argv[i+1],AllCompliance,&color,sans);
sans=DestroyExceptionInfo(sans);
if (status == MagickFalse)
draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1],
exception);
else
draw_info->fill=fill=color;
break;
}
if (LocaleCompare("flip",option+1) == 0)
{
/*
Flip image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=FlipImage(*image,exception);
break;
}
if (LocaleCompare("floodfill",option+1) == 0)
{
PixelInfo
target;
/*
Floodfill image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParsePageGeometry(*image,argv[i+1],&geometry,exception);
(void) QueryColorCompliance(argv[i+2],AllCompliance,&target,
exception);
(void) FloodfillPaintImage(*image,draw_info,&target,geometry.x,
geometry.y,*option == '-' ? MagickFalse : MagickTrue,exception);
break;
}
if (LocaleCompare("flop",option+1) == 0)
{
/*
Flop image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=FlopImage(*image,exception);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
if (*option == '+')
{
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
break;
}
(void) CloneString(&draw_info->font,argv[i+1]);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
format=argv[i+1];
break;
}
if (LocaleCompare("frame",option+1) == 0)
{
FrameInfo
frame_info;
/*
Surround image with an ornamental border.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
frame_info.width=geometry.width;
frame_info.height=geometry.height;
frame_info.outer_bevel=geometry.x;
frame_info.inner_bevel=geometry.y;
frame_info.x=(ssize_t) frame_info.width;
frame_info.y=(ssize_t) frame_info.height;
frame_info.width=(*image)->columns+2*frame_info.width;
frame_info.height=(*image)->rows+2*frame_info.height;
mogrify_image=FrameImage(*image,&frame_info,compose,exception);
break;
}
if (LocaleCompare("function",option+1) == 0)
{
char
*arguments,
token[MagickPathExtent];
const char
*p;
double
*parameters;
MagickFunction
function;
register ssize_t
x;
size_t
number_parameters;
/*
Function Modify Image Values
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
function=(MagickFunction) ParseCommandOption(MagickFunctionOptions,
MagickFalse,argv[i+1]);
arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2],
exception);
if (arguments == (char *) NULL)
break;
p=(char *) arguments;
for (x=0; *p != '\0'; x++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
}
number_parameters=(size_t) x;
parameters=(double *) AcquireQuantumMemory(number_parameters,
sizeof(*parameters));
if (parameters == (double *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,
"MemoryAllocationFailed",(*image)->filename);
(void) memset(parameters,0,number_parameters*
sizeof(*parameters));
p=(char *) arguments;
for (x=0; (x < (ssize_t) number_parameters) && (*p != '\0'); x++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
parameters[x]=StringToDouble(token,(char **) NULL);
}
arguments=DestroyString(arguments);
(void) FunctionImage(*image,function,number_parameters,parameters,
exception);
parameters=(double *) RelinquishMagickMemory(parameters);
break;
}
break;
}
case 'g':
{
if (LocaleCompare("gamma",option+1) == 0)
{
/*
Gamma image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
(*image)->gamma=StringToDouble(argv[i+1],(char **) NULL);
else
(void) GammaImage(*image,StringToDouble(argv[i+1],(char **) NULL),
exception);
break;
}
if ((LocaleCompare("gaussian-blur",option+1) == 0) ||
(LocaleCompare("gaussian",option+1) == 0))
{
/*
Gaussian blur image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=GaussianBlurImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("geometry",option+1) == 0)
{
/*
Record Image offset, Resize last image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
if ((*image)->geometry != (char *) NULL)
(*image)->geometry=DestroyString((*image)->geometry);
break;
}
flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
(void) CloneString(&(*image)->geometry,argv[i+1]);
else
mogrify_image=ResizeImage(*image,geometry.width,geometry.height,
(*image)->filter,exception);
break;
}
if (LocaleCompare("gravity",option+1) == 0)
{
if (*option == '+')
{
draw_info->gravity=UndefinedGravity;
break;
}
draw_info->gravity=(GravityType) ParseCommandOption(
MagickGravityOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("grayscale",option+1) == 0)
{
PixelIntensityMethod
method;
(void) SyncImageSettings(mogrify_info,*image,exception);
method=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,argv[i+1]);
(void) GrayscaleImage(*image,method,exception);
break;
}
break;
}
case 'h':
{
if (LocaleCompare("highlight-color",option+1) == 0)
{
(void) SetImageArtifact(*image,"compare:highlight-color",argv[i+1]);
break;
}
if (LocaleCompare("hough-lines",option+1) == 0)
{
/*
Detect edges in the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
if ((flags & XiValue) == 0)
geometry_info.xi=40;
mogrify_image=HoughLineImage(*image,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,(size_t) geometry_info.xi,exception);
break;
}
break;
}
case 'i':
{
if (LocaleCompare("identify",option+1) == 0)
{
char
*text;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (format == (char *) NULL)
{
(void) IdentifyImage(*image,stdout,mogrify_info->verbose,
exception);
break;
}
text=InterpretImageProperties(mogrify_info,*image,format,
exception);
if (text == (char *) NULL)
break;
(void) fputs(text,stdout);
text=DestroyString(text);
break;
}
if (LocaleCompare("implode",option+1) == 0)
{
/*
Implode image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=ImplodeImage(*image,geometry_info.rho,
interpolate_method,exception);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("0",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->interline_spacing=geometry_info.rho;
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
interpolate_method=(PixelInterpolateMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("0",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->interword_spacing=geometry_info.rho;
break;
}
if (LocaleCompare("interpolative-resize",option+1) == 0)
{
/*
Interpolative resize image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=InterpolativeResizeImage(*image,geometry.width,
geometry.height,interpolate_method,exception);
break;
}
break;
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("0",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->kerning=geometry_info.rho;
break;
}
if (LocaleCompare("kuwahara",option+1) == 0)
{
/*
Edge preserving blur.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho-0.5;
mogrify_image=KuwaharaImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("lat",option+1) == 0)
{
/*
Local adaptive threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
mogrify_image=AdaptiveThresholdImage(*image,(size_t)
geometry_info.rho,(size_t) geometry_info.sigma,(double)
geometry_info.xi,exception);
break;
}
if (LocaleCompare("level",option+1) == 0)
{
double
black_point,
gamma,
white_point;
/*
Parse levels.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
black_point=geometry_info.rho;
white_point=(double) QuantumRange;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
gamma=1.0;
if ((flags & XiValue) != 0)
gamma=geometry_info.xi;
if ((flags & PercentValue) != 0)
{
black_point*=(double) (QuantumRange/100.0);
white_point*=(double) (QuantumRange/100.0);
}
if ((flags & SigmaValue) == 0)
white_point=(double) QuantumRange-black_point;
if ((*option == '+') || ((flags & AspectValue) != 0))
(void) LevelizeImage(*image,black_point,white_point,gamma,
exception);
else
(void) LevelImage(*image,black_point,white_point,gamma,
exception);
break;
}
if (LocaleCompare("level-colors",option+1) == 0)
{
char
token[MagickPathExtent];
const char
*p;
PixelInfo
black_point,
white_point;
p=(const char *) argv[i+1];
GetNextToken(p,&p,MagickPathExtent,token); /* get black point color */
if ((isalpha((int) *token) != 0) || ((*token == '#') != 0))
(void) QueryColorCompliance(token,AllCompliance,
&black_point,exception);
else
(void) QueryColorCompliance("#000000",AllCompliance,
&black_point,exception);
if (isalpha((int) token[0]) || (token[0] == '#'))
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == '\0')
white_point=black_point; /* set everything to that color */
else
{
if ((isalpha((int) *token) == 0) && ((*token == '#') == 0))
GetNextToken(p,&p,MagickPathExtent,token); /* Get white point color. */
if ((isalpha((int) *token) != 0) || ((*token == '#') != 0))
(void) QueryColorCompliance(token,AllCompliance,
&white_point,exception);
else
(void) QueryColorCompliance("#ffffff",AllCompliance,
&white_point,exception);
}
(void) LevelImageColors(*image,&black_point,&white_point,
*option == '+' ? MagickTrue : MagickFalse,exception);
break;
}
if (LocaleCompare("linear-stretch",option+1) == 0)
{
double
black_point,
white_point;
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
black_point=geometry_info.rho;
white_point=(double) (*image)->columns*(*image)->rows;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
black_point*=(double) (*image)->columns*(*image)->rows/100.0;
white_point*=(double) (*image)->columns*(*image)->rows/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) (*image)->columns*(*image)->rows-
black_point;
(void) LinearStretchImage(*image,black_point,white_point,exception);
break;
}
if (LocaleCompare("liquid-rescale",option+1) == 0)
{
/*
Liquid rescale image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
if ((flags & XValue) == 0)
geometry.x=1;
if ((flags & YValue) == 0)
geometry.y=0;
mogrify_image=LiquidRescaleImage(*image,geometry.width,
geometry.height,1.0*geometry.x,1.0*geometry.y,exception);
break;
}
if (LocaleCompare("local-contrast",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & RhoValue) == 0)
geometry_info.rho=10;
if ((flags & SigmaValue) == 0)
geometry_info.sigma=12.5;
mogrify_image=LocalContrastImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("lowlight-color",option+1) == 0)
{
(void) SetImageArtifact(*image,"compare:lowlight-color",argv[i+1]);
break;
}
break;
}
case 'm':
{
if (LocaleCompare("magnify",option+1) == 0)
{
/*
Double image size.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=MagnifyImage(*image,exception);
break;
}
if (LocaleCompare("map",option+1) == 0)
{
Image
*remap_image;
/*
Transform image colors to match this set of colors.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
break;
remap_image=GetImageCache(mogrify_info,argv[i+1],exception);
if (remap_image == (Image *) NULL)
break;
(void) RemapImage(quantize_info,*image,remap_image,exception);
remap_image=DestroyImage(remap_image);
break;
}
if (LocaleCompare("mask",option+1) == 0)
{
Image
*mask;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
/*
Remove a mask.
*/
(void) SetImageMask(*image,WritePixelMask,(Image *) NULL,
exception);
break;
}
/*
Set the image mask.
*/
mask=GetImageCache(mogrify_info,argv[i+1],exception);
if (mask == (Image *) NULL)
break;
(void) SetImageMask(*image,WritePixelMask,mask,exception);
mask=DestroyImage(mask);
break;
}
if (LocaleCompare("matte",option+1) == 0)
{
(void) SetImageAlphaChannel(*image,(*option == '-') ?
SetAlphaChannel : DeactivateAlphaChannel,exception);
break;
}
if (LocaleCompare("mean-shift",option+1) == 0)
{
/*
Detect edges in the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
if ((flags & XiValue) == 0)
geometry_info.xi=0.10*QuantumRange;
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
mogrify_image=MeanShiftImage(*image,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,geometry_info.xi,exception);
break;
}
if (LocaleCompare("median",option+1) == 0)
{
/*
Median filter image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=StatisticImage(*image,MedianStatistic,(size_t)
geometry_info.rho,(size_t) geometry_info.sigma,exception);
break;
}
if (LocaleCompare("mode",option+1) == 0)
{
/*
Mode image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=StatisticImage(*image,ModeStatistic,(size_t)
geometry_info.rho,(size_t) geometry_info.sigma,exception);
break;
}
if (LocaleCompare("modulate",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ModulateImage(*image,argv[i+1],exception);
break;
}
if (LocaleCompare("moments",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageArtifact(*image,"identify:moments");
break;
}
(void) SetImageArtifact(*image,"identify:moments",argv[i+1]);
(void) SetImageArtifact(*image,"verbose","true");
break;
}
if (LocaleCompare("monitor",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageProgressMonitor(*image,
(MagickProgressMonitor) NULL,(void *) NULL);
break;
}
(void) SetImageProgressMonitor(*image,MonitorProgress,
(void *) NULL);
break;
}
if (LocaleCompare("monochrome",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) SetImageType(*image,BilevelType,exception);
break;
}
if (LocaleCompare("morphology",option+1) == 0)
{
char
token[MagickPathExtent];
const char
*p;
KernelInfo
*kernel;
MorphologyMethod
method;
ssize_t
iterations;
/*
Morphological Image Operation
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
p=argv[i+1];
GetNextToken(p,&p,MagickPathExtent,token);
method=(MorphologyMethod) ParseCommandOption(
MagickMorphologyOptions,MagickFalse,token);
iterations=1L;
GetNextToken(p,&p,MagickPathExtent,token);
if ((*p == ':') || (*p == ','))
GetNextToken(p,&p,MagickPathExtent,token);
if ((*p != '\0'))
iterations=(ssize_t) StringToLong(p);
kernel=AcquireKernelInfo(argv[i+2],exception);
if (kernel == (KernelInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnabletoParseKernel","morphology");
status=MagickFalse;
break;
}
mogrify_image=MorphologyImage(*image,method,iterations,kernel,
exception);
kernel=DestroyKernelInfo(kernel);
break;
}
if (LocaleCompare("motion-blur",option+1) == 0)
{
/*
Motion blur image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=MotionBlurImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,exception);
break;
}
break;
}
case 'n':
{
if (LocaleCompare("negate",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) NegateImage(*image,*option == '+' ? MagickTrue :
MagickFalse,exception);
break;
}
if (LocaleCompare("noise",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '-')
{
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=StatisticImage(*image,NonpeakStatistic,(size_t)
geometry_info.rho,(size_t) geometry_info.sigma,exception);
}
else
{
NoiseType
noise;
noise=(NoiseType) ParseCommandOption(MagickNoiseOptions,
MagickFalse,argv[i+1]);
mogrify_image=AddNoiseImage(*image,noise,attenuate,exception);
}
break;
}
if (LocaleCompare("normalize",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) NormalizeImage(*image,exception);
break;
}
break;
}
case 'o':
{
if (LocaleCompare("opaque",option+1) == 0)
{
PixelInfo
target;
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) QueryColorCompliance(argv[i+1],AllCompliance,&target,
exception);
(void) OpaquePaintImage(*image,&target,&fill,*option == '-' ?
MagickFalse : MagickTrue,exception);
break;
}
if (LocaleCompare("ordered-dither",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) OrderedDitherImage(*image,argv[i+1],exception);
break;
}
break;
}
case 'p':
{
if (LocaleCompare("paint",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=OilPaintImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("perceptible",option+1) == 0)
{
/*
Perceptible image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) PerceptibleImage(*image,StringToDouble(argv[i+1],
(char **) NULL),exception);
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("12",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->pointsize=geometry_info.rho;
break;
}
if (LocaleCompare("polaroid",option+1) == 0)
{
const char
*caption;
double
angle;
RandomInfo
*random_info;
/*
Simulate a Polaroid picture.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
random_info=AcquireRandomInfo();
angle=22.5*(GetPseudoRandomValue(random_info)-0.5);
random_info=DestroyRandomInfo(random_info);
if (*option == '-')
{
SetGeometryInfo(&geometry_info);
flags=ParseGeometry(argv[i+1],&geometry_info);
angle=geometry_info.rho;
}
caption=GetImageProperty(*image,"caption",exception);
mogrify_image=PolaroidImage(*image,draw_info,caption,angle,
interpolate_method,exception);
break;
}
if (LocaleCompare("posterize",option+1) == 0)
{
/*
Posterize image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) PosterizeImage(*image,StringToUnsignedLong(argv[i+1]),
quantize_info->dither_method,exception);
break;
}
if (LocaleCompare("preview",option+1) == 0)
{
PreviewType
preview_type;
/*
Preview image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
preview_type=UndefinedPreview;
else
preview_type=(PreviewType) ParseCommandOption(
MagickPreviewOptions,MagickFalse,argv[i+1]);
mogrify_image=PreviewImage(*image,preview_type,exception);
break;
}
if (LocaleCompare("profile",option+1) == 0)
{
const char
*name;
const StringInfo
*profile;
Image
*profile_image;
ImageInfo
*profile_info;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
/*
Remove a profile from the image.
*/
(void) ProfileImage(*image,argv[i+1],(const unsigned char *)
NULL,0,exception);
break;
}
/*
Associate a profile with the image.
*/
profile_info=CloneImageInfo(mogrify_info);
profile=GetImageProfile(*image,"iptc");
if (profile != (StringInfo *) NULL)
profile_info->profile=(void *) CloneStringInfo(profile);
profile_image=GetImageCache(profile_info,argv[i+1],exception);
profile_info=DestroyImageInfo(profile_info);
if (profile_image == (Image *) NULL)
{
StringInfo
*file_data;
profile_info=CloneImageInfo(mogrify_info);
(void) CopyMagickString(profile_info->filename,argv[i+1],
MagickPathExtent);
file_data=FileToStringInfo(profile_info->filename,~0UL,
exception);
if (file_data != (StringInfo *) NULL)
{
(void) SetImageInfo(profile_info,0,exception);
(void) ProfileImage(*image,profile_info->magick,
GetStringInfoDatum(file_data),
GetStringInfoLength(file_data),exception);
file_data=DestroyStringInfo(file_data);
}
profile_info=DestroyImageInfo(profile_info);
break;
}
ResetImageProfileIterator(profile_image);
name=GetNextImageProfile(profile_image);
while (name != (const char *) NULL)
{
profile=GetImageProfile(profile_image,name);
if (profile != (StringInfo *) NULL)
(void) ProfileImage(*image,name,GetStringInfoDatum(profile),
(size_t) GetStringInfoLength(profile),exception);
name=GetNextImageProfile(profile_image);
}
profile_image=DestroyImage(profile_image);
break;
}
break;
}
case 'q':
{
if (LocaleCompare("quantize",option+1) == 0)
{
if (*option == '+')
{
quantize_info->colorspace=UndefinedColorspace;
break;
}
quantize_info->colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("rotational-blur",option+1) == 0)
{
/*
Rotational blur image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=RotationalBlurImage(*image,geometry_info.rho,
exception);
break;
}
if (LocaleCompare("raise",option+1) == 0)
{
/*
Surround image with a raise of solid color.
*/
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
(void) RaiseImage(*image,&geometry,*option == '-' ? MagickTrue :
MagickFalse,exception);
break;
}
if (LocaleCompare("random-threshold",option+1) == 0)
{
/*
Random threshold image.
*/
double
min_threshold,
max_threshold;
(void) SyncImageSettings(mogrify_info,*image,exception);
min_threshold=0.0;
max_threshold=(double) QuantumRange;
flags=ParseGeometry(argv[i+1],&geometry_info);
min_threshold=geometry_info.rho;
max_threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
max_threshold=min_threshold;
if (strchr(argv[i+1],'%') != (char *) NULL)
{
max_threshold*=(double) (0.01*QuantumRange);
min_threshold*=(double) (0.01*QuantumRange);
}
(void) RandomThresholdImage(*image,min_threshold,max_threshold,
exception);
break;
}
if (LocaleCompare("range-threshold",option+1) == 0)
{
/*
Range threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
if ((flags & XiValue) == 0)
geometry_info.xi=geometry_info.sigma;
if ((flags & PsiValue) == 0)
geometry_info.psi=geometry_info.xi;
if (strchr(argv[i+1],'%') != (char *) NULL)
{
geometry_info.rho*=(double) (0.01*QuantumRange);
geometry_info.sigma*=(double) (0.01*QuantumRange);
geometry_info.xi*=(double) (0.01*QuantumRange);
geometry_info.psi*=(double) (0.01*QuantumRange);
}
(void) RangeThresholdImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception);
break;
}
if (LocaleCompare("read-mask",option+1) == 0)
{
Image
*mask;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
/*
Remove a mask.
*/
(void) SetImageMask(*image,ReadPixelMask,(Image *) NULL,
exception);
break;
}
/*
Set the image mask.
*/
mask=GetImageCache(mogrify_info,argv[i+1],exception);
if (mask == (Image *) NULL)
break;
(void) SetImageMask(*image,ReadPixelMask,mask,exception);
mask=DestroyImage(mask);
break;
}
if (LocaleCompare("region",option+1) == 0)
{
/*
Apply read mask as defined by a region geometry.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
(void) SetImageRegionMask(*image,WritePixelMask,
(const RectangleInfo *) NULL,exception);
break;
}
(void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
(void) SetImageRegionMask(*image,WritePixelMask,&geometry,
exception);
break;
}
if (LocaleCompare("render",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
draw_info->render=(*option == '+') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("remap",option+1) == 0)
{
Image
*remap_image;
/*
Transform image colors to match this set of colors.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
break;
remap_image=GetImageCache(mogrify_info,argv[i+1],exception);
if (remap_image == (Image *) NULL)
break;
(void) RemapImage(quantize_info,*image,remap_image,exception);
remap_image=DestroyImage(remap_image);
break;
}
if (LocaleCompare("repage",option+1) == 0)
{
if (*option == '+')
{
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
break;
}
(void) ResetImagePage(*image,argv[i+1]);
break;
}
if (LocaleCompare("resample",option+1) == 0)
{
/*
Resample image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=ResampleImage(*image,geometry_info.rho,
geometry_info.sigma,(*image)->filter,exception);
break;
}
if (LocaleCompare("resize",option+1) == 0)
{
/*
Resize image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ResizeImage(*image,geometry.width,geometry.height,
(*image)->filter,exception);
break;
}
if (LocaleCompare("roll",option+1) == 0)
{
/*
Roll image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
if ((flags & PercentValue) != 0)
{
geometry.x*=(double) (*image)->columns/100.0;
geometry.y*=(double) (*image)->rows/100.0;
}
mogrify_image=RollImage(*image,geometry.x,geometry.y,exception);
break;
}
if (LocaleCompare("rotate",option+1) == 0)
{
char
*rotation;
/*
Check for conditional image rotation.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (strchr(argv[i+1],'>') != (char *) NULL)
if ((*image)->columns <= (*image)->rows)
break;
if (strchr(argv[i+1],'<') != (char *) NULL)
if ((*image)->columns >= (*image)->rows)
break;
/*
Rotate image.
*/
rotation=ConstantString(argv[i+1]);
(void) SubstituteString(&rotation,">","");
(void) SubstituteString(&rotation,"<","");
(void) ParseGeometry(rotation,&geometry_info);
rotation=DestroyString(rotation);
mogrify_image=RotateImage(*image,geometry_info.rho,exception);
break;
}
break;
}
case 's':
{
if (LocaleCompare("sample",option+1) == 0)
{
/*
Sample image with pixel replication.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=SampleImage(*image,geometry.width,geometry.height,
exception);
break;
}
if (LocaleCompare("scale",option+1) == 0)
{
/*
Resize image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ScaleImage(*image,geometry.width,geometry.height,
exception);
break;
}
if (LocaleCompare("selective-blur",option+1) == 0)
{
/*
Selectively blur pixels within a contrast threshold.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
mogrify_image=SelectiveBlurImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,exception);
break;
}
if (LocaleCompare("separate",option+1) == 0)
{
/*
Break channels into separate images.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=SeparateImages(*image,exception);
break;
}
if (LocaleCompare("sepia-tone",option+1) == 0)
{
double
threshold;
/*
Sepia-tone image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
mogrify_image=SepiaToneImage(*image,threshold,exception);
break;
}
if (LocaleCompare("segment",option+1) == 0)
{
/*
Segment image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
(void) SegmentImage(*image,(*image)->colorspace,
mogrify_info->verbose,geometry_info.rho,geometry_info.sigma,
exception);
break;
}
if (LocaleCompare("set",option+1) == 0)
{
char
*value;
/*
Set image option.
*/
if (*option == '+')
{
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
(void) DeleteImageRegistry(argv[i+1]+9);
else
if (LocaleNCompare(argv[i+1],"option:",7) == 0)
{
(void) DeleteImageOption(mogrify_info,argv[i+1]+7);
(void) DeleteImageArtifact(*image,argv[i+1]+7);
}
else
(void) DeleteImageProperty(*image,argv[i+1]);
break;
}
value=InterpretImageProperties(mogrify_info,*image,argv[i+2],
exception);
if (value == (char *) NULL)
break;
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
(void) SetImageRegistry(StringRegistryType,argv[i+1]+9,value,
exception);
else
if (LocaleNCompare(argv[i+1],"option:",7) == 0)
{
(void) SetImageOption(image_info,argv[i+1]+7,value);
(void) SetImageOption(mogrify_info,argv[i+1]+7,value);
(void) SetImageArtifact(*image,argv[i+1]+7,value);
}
else
(void) SetImageProperty(*image,argv[i+1],value,exception);
value=DestroyString(value);
break;
}
if (LocaleCompare("shade",option+1) == 0)
{
/*
Shade image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=ShadeImage(*image,(*option == '-') ? MagickTrue :
MagickFalse,geometry_info.rho,geometry_info.sigma,exception);
break;
}
if (LocaleCompare("shadow",option+1) == 0)
{
/*
Shadow image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=4.0;
if ((flags & PsiValue) == 0)
geometry_info.psi=4.0;
mogrify_image=ShadowImage(*image,geometry_info.rho,
geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5),
(ssize_t) ceil(geometry_info.psi-0.5),exception);
break;
}
if (LocaleCompare("sharpen",option+1) == 0)
{
/*
Sharpen image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.0;
mogrify_image=SharpenImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("shave",option+1) == 0)
{
/*
Shave the image edges.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ShaveImage(*image,&geometry,exception);
break;
}
if (LocaleCompare("shear",option+1) == 0)
{
/*
Shear image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=ShearImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("sigmoidal-contrast",option+1) == 0)
{
/*
Sigmoidal non-linearity contrast control.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=(double) QuantumRange/2.0;
if ((flags & PercentValue) != 0)
geometry_info.sigma=(double) QuantumRange*geometry_info.sigma/
100.0;
(void) SigmoidalContrastImage(*image,(*option == '-') ?
MagickTrue : MagickFalse,geometry_info.rho,geometry_info.sigma,
exception);
break;
}
if (LocaleCompare("sketch",option+1) == 0)
{
/*
Sketch image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=SketchImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,exception);
break;
}
if (LocaleCompare("solarize",option+1) == 0)
{
double
threshold;
(void) SyncImageSettings(mogrify_info,*image,exception);
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
(void) SolarizeImage(*image,threshold,exception);
break;
}
if (LocaleCompare("sparse-color",option+1) == 0)
{
SparseColorMethod
method;
char
*arguments;
/*
Sparse Color Interpolated Gradient
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
method=(SparseColorMethod) ParseCommandOption(
MagickSparseColorOptions,MagickFalse,argv[i+1]);
arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2],
exception);
if (arguments == (char *) NULL)
break;
mogrify_image=SparseColorOption(*image,method,arguments,
option[0] == '+' ? MagickTrue : MagickFalse,exception);
arguments=DestroyString(arguments);
break;
}
if (LocaleCompare("splice",option+1) == 0)
{
/*
Splice a solid color into the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=SpliceImage(*image,&geometry,exception);
break;
}
if (LocaleCompare("spread",option+1) == 0)
{
/*
Spread an image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=SpreadImage(*image,interpolate_method,
geometry_info.rho,exception);
break;
}
if (LocaleCompare("statistic",option+1) == 0)
{
StatisticType
type;
(void) SyncImageSettings(mogrify_info,*image,exception);
type=(StatisticType) ParseCommandOption(MagickStatisticOptions,
MagickFalse,argv[i+1]);
(void) ParseGeometry(argv[i+2],&geometry_info);
mogrify_image=StatisticImage(*image,type,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,exception);
break;
}
if (LocaleCompare("stretch",option+1) == 0)
{
if (*option == '+')
{
draw_info->stretch=UndefinedStretch;
break;
}
draw_info->stretch=(StretchType) ParseCommandOption(
MagickStretchOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("strip",option+1) == 0)
{
/*
Strip image of profiles and comments.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) StripImage(*image,exception);
break;
}
if (LocaleCompare("stroke",option+1) == 0)
{
ExceptionInfo
*sans;
PixelInfo
color;
if (*option == '+')
{
(void) QueryColorCompliance("none",AllCompliance,
&draw_info->stroke,exception);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(
draw_info->stroke_pattern);
break;
}
sans=AcquireExceptionInfo();
status=QueryColorCompliance(argv[i+1],AllCompliance,&color,sans);
sans=DestroyExceptionInfo(sans);
if (status == MagickFalse)
draw_info->stroke_pattern=GetImageCache(mogrify_info,argv[i+1],
exception);
else
draw_info->stroke=color;
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
draw_info->stroke_width=StringToDouble(argv[i+1],(char **) NULL);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
if (*option == '+')
{
draw_info->style=UndefinedStyle;
break;
}
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("swirl",option+1) == 0)
{
/*
Swirl image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=SwirlImage(*image,geometry_info.rho,
interpolate_method,exception);
break;
}
break;
}
case 't':
{
if (LocaleCompare("threshold",option+1) == 0)
{
double
threshold;
/*
Threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
threshold=(double) QuantumRange/2;
else
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
(void) BilevelImage(*image,threshold,exception);
break;
}
if (LocaleCompare("thumbnail",option+1) == 0)
{
/*
Thumbnail image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ThumbnailImage(*image,geometry.width,geometry.height,
exception);
break;
}
if (LocaleCompare("tile",option+1) == 0)
{
if (*option == '+')
{
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
break;
}
draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1],
exception);
break;
}
if (LocaleCompare("tint",option+1) == 0)
{
/*
Tint the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=TintImage(*image,argv[i+1],&fill,exception);
break;
}
if (LocaleCompare("transform",option+1) == 0)
{
/*
Affine transform image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=AffineTransformImage(*image,&draw_info->affine,
exception);
break;
}
if (LocaleCompare("transparent",option+1) == 0)
{
PixelInfo
target;
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) QueryColorCompliance(argv[i+1],AllCompliance,&target,
exception);
(void) TransparentPaintImage(*image,&target,(Quantum)
TransparentAlpha,*option == '-' ? MagickFalse : MagickTrue,
exception);
break;
}
if (LocaleCompare("transpose",option+1) == 0)
{
/*
Transpose image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=TransposeImage(*image,exception);
break;
}
if (LocaleCompare("transverse",option+1) == 0)
{
/*
Transverse image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=TransverseImage(*image,exception);
break;
}
if (LocaleCompare("treedepth",option+1) == 0)
{
quantize_info->tree_depth=StringToUnsignedLong(argv[i+1]);
break;
}
if (LocaleCompare("trim",option+1) == 0)
{
/*
Trim image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=TrimImage(*image,exception);
break;
}
if (LocaleCompare("type",option+1) == 0)
{
ImageType
type;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
type=UndefinedType;
else
type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
argv[i+1]);
(*image)->type=UndefinedType;
(void) SetImageType(*image,type,exception);
break;
}
break;
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
(void) QueryColorCompliance(argv[i+1],AllCompliance,
&draw_info->undercolor,exception);
break;
}
if (LocaleCompare("unique",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageArtifact(*image,"identify:unique-colors");
break;
}
(void) SetImageArtifact(*image,"identify:unique-colors","true");
(void) SetImageArtifact(*image,"verbose","true");
break;
}
if (LocaleCompare("unique-colors",option+1) == 0)
{
/*
Unique image colors.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=UniqueImageColors(*image,exception);
break;
}
if (LocaleCompare("unsharp",option+1) == 0)
{
/*
Unsharp mask image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=1.0;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.05;
mogrify_image=UnsharpMaskImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,geometry_info.psi,
exception);
break;
}
break;
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
(void) SetImageArtifact(*image,option+1,
*option == '+' ? "false" : "true");
break;
}
if (LocaleCompare("vignette",option+1) == 0)
{
/*
Vignette image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.1*(*image)->columns;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.1*(*image)->rows;
if ((flags & PercentValue) != 0)
{
geometry_info.xi*=(double) (*image)->columns/100.0;
geometry_info.psi*=(double) (*image)->rows/100.0;
}
mogrify_image=VignetteImage(*image,geometry_info.rho,
geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5),
(ssize_t) ceil(geometry_info.psi-0.5),exception);
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageVirtualPixelMethod(*image,
UndefinedVirtualPixelMethod,exception);
break;
}
(void) SetImageVirtualPixelMethod(*image,(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,
argv[i+1]),exception);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("wave",option+1) == 0)
{
/*
Wave image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=WaveImage(*image,geometry_info.rho,
geometry_info.sigma,interpolate_method,exception);
break;
}
if (LocaleCompare("wavelet-denoise",option+1) == 0)
{
/*
Wavelet denoise image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & PercentValue) != 0)
{
geometry_info.rho=QuantumRange*geometry_info.rho/100.0;
geometry_info.sigma=QuantumRange*geometry_info.sigma/100.0;
}
if ((flags & SigmaValue) == 0)
geometry_info.sigma=0.0;
mogrify_image=WaveletDenoiseImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("weight",option+1) == 0)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,
argv[i+1]);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(argv[i+1]);
draw_info->weight=(size_t) weight;
break;
}
if (LocaleCompare("white-threshold",option+1) == 0)
{
/*
White threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) WhiteThresholdImage(*image,argv[i+1],exception);
break;
}
if (LocaleCompare("write-mask",option+1) == 0)
{
Image
*mask;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
/*
Remove a mask.
*/
(void) SetImageMask(*image,WritePixelMask,(Image *) NULL,
exception);
break;
}
/*
Set the image mask.
*/
mask=GetImageCache(mogrify_info,argv[i+1],exception);
if (mask == (Image *) NULL)
break;
(void) SetImageMask(*image,WritePixelMask,mask,exception);
mask=DestroyImage(mask);
break;
}
break;
}
default:
break;
}
/*
Replace current image with any image that was generated
*/
if (mogrify_image != (Image *) NULL)
ReplaceImageInListReturnLast(image,mogrify_image);
i+=count;
}
/*
Free resources.
*/
quantize_info=DestroyQuantizeInfo(quantize_info);
draw_info=DestroyDrawInfo(draw_info);
mogrify_info=DestroyImageInfo(mogrify_info);
status=(MagickStatusType) (exception->severity < ErrorException ? 1 : 0);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e C o m m a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImageCommand() transforms an image or a sequence of images. These
% transforms include image scaling, image rotation, color reduction, and
% others. The transmogrified image overwrites the original image.
%
% The format of the MogrifyImageCommand method is:
%
% MagickBooleanType MogrifyImageCommand(ImageInfo *image_info,int argc,
% const char **argv,char **metadata,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o argc: the number of elements in the argument vector.
%
% o argv: A text array containing the command line arguments.
%
% o metadata: any metadata is returned here.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType MogrifyUsage(void)
{
static const char
channel_operators[] =
" -channel-fx expression\n"
" exchange, extract, or transfer one or more image channels\n"
" -separate separate an image channel into a grayscale image",
miscellaneous[] =
" -debug events display copious debugging information\n"
" -distribute-cache port\n"
" distributed pixel cache spanning one or more servers\n"
" -help print program options\n"
" -list type print a list of supported option arguments\n"
" -log format format of debugging information\n"
" -version print version information",
operators[] =
" -adaptive-blur geometry\n"
" adaptively blur pixels; decrease effect near edges\n"
" -adaptive-resize geometry\n"
" adaptively resize image using 'mesh' interpolation\n"
" -adaptive-sharpen geometry\n"
" adaptively sharpen pixels; increase effect near edges\n"
" -alpha option on, activate, off, deactivate, set, opaque, copy\n"
" transparent, extract, background, or shape\n"
" -annotate geometry text\n"
" annotate the image with text\n"
" -auto-gamma automagically adjust gamma level of image\n"
" -auto-level automagically adjust color levels of image\n"
" -auto-orient automagically orient (rotate) image\n"
" -auto-threshold method\n"
" automatically perform image thresholding\n"
" -bench iterations measure performance\n"
" -black-threshold value\n"
" force all pixels below the threshold into black\n"
" -blue-shift simulate a scene at nighttime in the moonlight\n"
" -blur geometry reduce image noise and reduce detail levels\n"
" -border geometry surround image with a border of color\n"
" -bordercolor color border color\n"
" -brightness-contrast geometry\n"
" improve brightness / contrast of the image\n"
" -canny geometry detect edges in the image\n"
" -cdl filename color correct with a color decision list\n"
" -channel mask set the image channel mask\n"
" -charcoal geometry simulate a charcoal drawing\n"
" -chop geometry remove pixels from the image interior\n"
" -clahe geometry contrast limited adaptive histogram equalization\n"
" -clamp keep pixel values in range (0-QuantumRange)\n"
" -clip clip along the first path from the 8BIM profile\n"
" -clip-mask filename associate a clip mask with the image\n"
" -clip-path id clip along a named path from the 8BIM profile\n"
" -colorize value colorize the image with the fill color\n"
" -color-matrix matrix apply color correction to the image\n"
" -connected-components connectivity\n"
" connected-components uniquely labeled\n"
" -contrast enhance or reduce the image contrast\n"
" -contrast-stretch geometry\n"
" improve contrast by 'stretching' the intensity range\n"
" -convolve coefficients\n"
" apply a convolution kernel to the image\n"
" -cycle amount cycle the image colormap\n"
" -decipher filename convert cipher pixels to plain pixels\n"
" -deskew threshold straighten an image\n"
" -despeckle reduce the speckles within an image\n"
" -distort method args\n"
" distort images according to given method ad args\n"
" -draw string annotate the image with a graphic primitive\n"
" -edge radius apply a filter to detect edges in the image\n"
" -encipher filename convert plain pixels to cipher pixels\n"
" -emboss radius emboss an image\n"
" -enhance apply a digital filter to enhance a noisy image\n"
" -equalize perform histogram equalization to an image\n"
" -evaluate operator value\n"
" evaluate an arithmetic, relational, or logical expression\n"
" -extent geometry set the image size\n"
" -extract geometry extract area from image\n"
" -fft implements the discrete Fourier transform (DFT)\n"
" -flip flip image vertically\n"
" -floodfill geometry color\n"
" floodfill the image with color\n"
" -flop flop image horizontally\n"
" -frame geometry surround image with an ornamental border\n"
" -function name parameters\n"
" apply function over image values\n"
" -gamma value level of gamma correction\n"
" -gaussian-blur geometry\n"
" reduce image noise and reduce detail levels\n"
" -geometry geometry preferred size or location of the image\n"
" -grayscale method convert image to grayscale\n"
" -hough-lines geometry\n"
" identify lines in the image\n"
" -identify identify the format and characteristics of the image\n"
" -ift implements the inverse discrete Fourier transform (DFT)\n"
" -implode amount implode image pixels about the center\n"
" -interpolative-resize geometry\n"
" resize image using interpolation\n"
" -kuwahara geometry edge preserving noise reduction filter\n"
" -lat geometry local adaptive thresholding\n"
" -level value adjust the level of image contrast\n"
" -level-colors color,color\n"
" level image with the given colors\n"
" -linear-stretch geometry\n"
" improve contrast by 'stretching with saturation'\n"
" -liquid-rescale geometry\n"
" rescale image with seam-carving\n"
" -local-contrast geometry\n"
" enhance local contrast\n"
" -magnify double the size of the image with pixel art scaling\n"
" -mean-shift geometry delineate arbitrarily shaped clusters in the image\n"
" -median geometry apply a median filter to the image\n"
" -mode geometry make each pixel the 'predominant color' of the\n"
" neighborhood\n"
" -modulate value vary the brightness, saturation, and hue\n"
" -monochrome transform image to black and white\n"
" -morphology method kernel\n"
" apply a morphology method to the image\n"
" -motion-blur geometry\n"
" simulate motion blur\n"
" -negate replace every pixel with its complementary color \n"
" -noise geometry add or reduce noise in an image\n"
" -normalize transform image to span the full range of colors\n"
" -opaque color change this color to the fill color\n"
" -ordered-dither NxN\n"
" add a noise pattern to the image with specific\n"
" amplitudes\n"
" -paint radius simulate an oil painting\n"
" -perceptible epsilon\n"
" pixel value less than |epsilon| become epsilon or\n"
" -epsilon\n"
" -polaroid angle simulate a Polaroid picture\n"
" -posterize levels reduce the image to a limited number of color levels\n"
" -profile filename add, delete, or apply an image profile\n"
" -quantize colorspace reduce colors in this colorspace\n"
" -raise value lighten/darken image edges to create a 3-D effect\n"
" -random-threshold low,high\n"
" random threshold the image\n"
" -range-threshold values\n"
" perform either hard or soft thresholding within some range of values in an image\n"
" -region geometry apply options to a portion of the image\n"
" -render render vector graphics\n"
" -repage geometry size and location of an image canvas\n"
" -resample geometry change the resolution of an image\n"
" -resize geometry resize the image\n"
" -roll geometry roll an image vertically or horizontally\n"
" -rotate degrees apply Paeth rotation to the image\n"
" -rotational-blur angle\n"
" rotational blur the image\n"
" -sample geometry scale image with pixel sampling\n"
" -scale geometry scale the image\n"
" -segment values segment an image\n"
" -selective-blur geometry\n"
" selectively blur pixels within a contrast threshold\n"
" -sepia-tone threshold\n"
" simulate a sepia-toned photo\n"
" -set property value set an image property\n"
" -shade degrees shade the image using a distant light source\n"
" -shadow geometry simulate an image shadow\n"
" -sharpen geometry sharpen the image\n"
" -shave geometry shave pixels from the image edges\n"
" -shear geometry slide one edge of the image along the X or Y axis\n"
" -sigmoidal-contrast geometry\n"
" increase the contrast without saturating highlights or\n"
" shadows\n"
" -sketch geometry simulate a pencil sketch\n"
" -solarize threshold negate all pixels above the threshold level\n"
" -sparse-color method args\n"
" fill in a image based on a few color points\n"
" -splice geometry splice the background color into the image\n"
" -spread radius displace image pixels by a random amount\n"
" -statistic type radius\n"
" replace each pixel with corresponding statistic from the neighborhood\n"
" -strip strip image of all profiles and comments\n"
" -swirl degrees swirl image pixels about the center\n"
" -threshold value threshold the image\n"
" -thumbnail geometry create a thumbnail of the image\n"
" -tile filename tile image when filling a graphic primitive\n"
" -tint value tint the image with the fill color\n"
" -transform affine transform image\n"
" -transparent color make this color transparent within the image\n"
" -transpose flip image vertically and rotate 90 degrees\n"
" -transverse flop image horizontally and rotate 270 degrees\n"
" -trim trim image edges\n"
" -type type image type\n"
" -unique-colors discard all but one of any pixel color\n"
" -unsharp geometry sharpen the image\n"
" -vignette geometry soften the edges of the image in vignette style\n"
" -wave geometry alter an image along a sine wave\n"
" -wavelet-denoise threshold\n"
" removes noise from the image using a wavelet transform\n"
" -white-threshold value\n"
" force all pixels above the threshold into white",
sequence_operators[] =
" -affinity filename transform image colors to match this set of colors\n"
" -append append an image sequence\n"
" -clut apply a color lookup table to the image\n"
" -coalesce merge a sequence of images\n"
" -combine combine a sequence of images\n"
" -compare mathematically and visually annotate the difference between an image and its reconstruction\n"
" -complex operator perform complex mathematics on an image sequence\n"
" -composite composite image\n"
" -copy geometry offset\n"
" copy pixels from one area of an image to another\n"
" -crop geometry cut out a rectangular region of the image\n"
" -deconstruct break down an image sequence into constituent parts\n"
" -evaluate-sequence operator\n"
" evaluate an arithmetic, relational, or logical expression\n"
" -flatten flatten a sequence of images\n"
" -fx expression apply mathematical expression to an image channel(s)\n"
" -hald-clut apply a Hald color lookup table to the image\n"
" -layers method optimize, merge, or compare image layers\n"
" -morph value morph an image sequence\n"
" -mosaic create a mosaic from an image sequence\n"
" -poly terms build a polynomial from the image sequence and the corresponding\n"
" terms (coefficients and degree pairs).\n"
" -print string interpret string and print to console\n"
" -process arguments process the image with a custom image filter\n"
" -smush geometry smush an image sequence together\n"
" -write filename write images to this file",
settings[] =
" -adjoin join images into a single multi-image file\n"
" -affine matrix affine transform matrix\n"
" -alpha option activate, deactivate, reset, or set the alpha channel\n"
" -antialias remove pixel-aliasing\n"
" -authenticate password\n"
" decipher image with this password\n"
" -attenuate value lessen (or intensify) when adding noise to an image\n"
" -background color background color\n"
" -bias value add bias when convolving an image\n"
" -black-point-compensation\n"
" use black point compensation\n"
" -blue-primary point chromaticity blue primary point\n"
" -bordercolor color border color\n"
" -caption string assign a caption to an image\n"
" -colors value preferred number of colors in the image\n"
" -colorspace type alternate image colorspace\n"
" -comment string annotate image with comment\n"
" -compose operator set image composite operator\n"
" -compress type type of pixel compression when writing the image\n"
" -define format:option=value\n"
" define one or more image format options\n"
" -delay value display the next image after pausing\n"
" -density geometry horizontal and vertical density of the image\n"
" -depth value image depth\n"
" -direction type render text right-to-left or left-to-right\n"
" -display server get image or font from this X server\n"
" -dispose method layer disposal method\n"
" -dither method apply error diffusion to image\n"
" -encoding type text encoding type\n"
" -endian type endianness (MSB or LSB) of the image\n"
" -family name render text with this font family\n"
" -features distance analyze image features (e.g. contrast, correlation)\n"
" -fill color color to use when filling a graphic primitive\n"
" -filter type use this filter when resizing an image\n"
" -font name render text with this font\n"
" -format \"string\" output formatted image characteristics\n"
" -fuzz distance colors within this distance are considered equal\n"
" -gravity type horizontal and vertical text placement\n"
" -green-primary point chromaticity green primary point\n"
" -intensity method method to generate an intensity value from a pixel\n"
" -intent type type of rendering intent when managing the image color\n"
" -interlace type type of image interlacing scheme\n"
" -interline-spacing value\n"
" set the space between two text lines\n"
" -interpolate method pixel color interpolation method\n"
" -interword-spacing value\n"
" set the space between two words\n"
" -kerning value set the space between two letters\n"
" -label string assign a label to an image\n"
" -limit type value pixel cache resource limit\n"
" -loop iterations add Netscape loop extension to your GIF animation\n"
" -matte store matte channel if the image has one\n"
" -mattecolor color frame color\n"
" -monitor monitor progress\n"
" -orient type image orientation\n"
" -page geometry size and location of an image canvas (setting)\n"
" -path path write images to this path on disk\n"
" -ping efficiently determine image attributes\n"
" -pointsize value font point size\n"
" -precision value maximum number of significant digits to print\n"
" -preview type image preview type\n"
" -quality value JPEG/MIFF/PNG compression level\n"
" -quiet suppress all warning messages\n"
" -read-mask filename associate a read mask with the image\n"
" -red-primary point chromaticity red primary point\n"
" -regard-warnings pay attention to warning messages\n"
" -remap filename transform image colors to match this set of colors\n"
" -respect-parentheses settings remain in effect until parenthesis boundary\n"
" -sampling-factor geometry\n"
" horizontal and vertical sampling factor\n"
" -scene value image scene number\n"
" -seed value seed a new sequence of pseudo-random numbers\n"
" -size geometry width and height of image\n"
" -stretch type render text with this font stretch\n"
" -stroke color graphic primitive stroke color\n"
" -strokewidth value graphic primitive stroke width\n"
" -style type render text with this font style\n"
" -synchronize synchronize image to storage device\n"
" -taint declare the image as modified\n"
" -texture filename name of texture to tile onto the image background\n"
" -tile-offset geometry\n"
" tile offset\n"
" -treedepth value color tree depth\n"
" -transparent-color color\n"
" transparent color\n"
" -undercolor color annotation bounding box color\n"
" -units type the units of image resolution\n"
" -verbose print detailed information about the image\n"
" -view FlashPix viewing transforms\n"
" -virtual-pixel method\n"
" virtual pixel access method\n"
" -weight type render text with this font weight\n"
" -white-point point chromaticity white point\n"
" -write-mask filename associate a write mask with the image",
stack_operators[] =
" -delete indexes delete the image from the image sequence\n"
" -duplicate count,indexes\n"
" duplicate an image one or more times\n"
" -insert index insert last image into the image sequence\n"
" -reverse reverse image sequence\n"
" -swap indexes swap two images in the image sequence";
ListMagickVersion(stdout);
(void) printf("Usage: %s [options ...] file [ [options ...] file ...]\n",
GetClientName());
(void) printf("\nImage Settings:\n");
(void) puts(settings);
(void) printf("\nImage Operators:\n");
(void) puts(operators);
(void) printf("\nImage Channel Operators:\n");
(void) puts(channel_operators);
(void) printf("\nImage Sequence Operators:\n");
(void) puts(sequence_operators);
(void) printf("\nImage Stack Operators:\n");
(void) puts(stack_operators);
(void) printf("\nMiscellaneous Options:\n");
(void) puts(miscellaneous);
(void) printf(
"\nBy default, the image format of 'file' is determined by its magic\n");
(void) printf(
"number. To specify a particular image format, precede the filename\n");
(void) printf(
"with an image format name and a colon (i.e. ps:image) or specify the\n");
(void) printf(
"image type as the filename suffix (i.e. image.ps). Specify 'file' as\n");
(void) printf("'-' for standard input or output.\n");
return(MagickFalse);
}
WandExport MagickBooleanType MogrifyImageCommand(ImageInfo *image_info,
int argc,char **argv,char **wand_unused(metadata),ExceptionInfo *exception)
{
#define DestroyMogrify() \
{ \
if (format != (char *) NULL) \
format=DestroyString(format); \
if (path != (char *) NULL) \
path=DestroyString(path); \
DestroyImageStack(); \
for (i=0; i < (ssize_t) argc; i++) \
argv[i]=DestroyString(argv[i]); \
argv=(char **) RelinquishMagickMemory(argv); \
}
#define ThrowMogrifyException(asperity,tag,option) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),asperity,tag,"`%s'", \
option); \
DestroyMogrify(); \
return(MagickFalse); \
}
#define ThrowMogrifyInvalidArgumentException(option,argument) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),OptionError, \
"InvalidArgument","'%s': %s",argument,option); \
DestroyMogrify(); \
return(MagickFalse); \
}
char
*format,
*option,
*path;
Image
*image;
ImageStack
image_stack[MaxImageStackDepth+1];
MagickBooleanType
global_colormap;
MagickBooleanType
fire,
pend,
respect_parenthesis;
MagickStatusType
status;
register ssize_t
i;
ssize_t
j,
k;
wand_unreferenced(metadata);
/*
Set defaults.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(exception != (ExceptionInfo *) NULL);
if (argc == 2)
{
option=argv[1];
if ((LocaleCompare("version",option+1) == 0) ||
(LocaleCompare("-version",option+1) == 0))
{
ListMagickVersion(stdout);
return(MagickTrue);
}
}
if (argc < 2)
return(MogrifyUsage());
format=(char *) NULL;
path=(char *) NULL;
global_colormap=MagickFalse;
k=0;
j=1;
NewImageStack();
option=(char *) NULL;
pend=MagickFalse;
respect_parenthesis=MagickFalse;
status=MagickTrue;
/*
Parse command line.
*/
ReadCommandlLine(argc,&argv);
status=ExpandFilenames(&argc,&argv);
if (status == MagickFalse)
ThrowMogrifyException(ResourceLimitError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
for (i=1; i < (ssize_t) argc; i++)
{
option=argv[i];
if (LocaleCompare(option,"(") == 0)
{
FireImageStack(MagickFalse,MagickTrue,pend);
if (k == MaxImageStackDepth)
ThrowMogrifyException(OptionError,"ParenthesisNestedTooDeeply",
option);
PushImageStack();
continue;
}
if (LocaleCompare(option,")") == 0)
{
FireImageStack(MagickFalse,MagickTrue,MagickTrue);
if (k == 0)
ThrowMogrifyException(OptionError,"UnableToParseExpression",option);
PopImageStack();
continue;
}
if (IsCommandOption(option) == MagickFalse)
{
char
backup_filename[MagickPathExtent],
*filename;
Image
*images;
struct stat
properties;
/*
Option is a file name: begin by reading image from specified file.
*/
FireImageStack(MagickFalse,MagickFalse,pend);
filename=argv[i];
if ((LocaleCompare(filename,"--") == 0) && (i < (ssize_t) (argc-1)))
filename=argv[++i];
images=ReadImages(image_info,filename,exception);
status&=(images != (Image *) NULL) &&
(exception->severity < ErrorException);
if (images == (Image *) NULL)
continue;
properties=(*GetBlobProperties(images));
if (format != (char *) NULL)
(void) CopyMagickString(images->filename,images->magick_filename,
MagickPathExtent);
if (path != (char *) NULL)
{
GetPathComponent(option,TailPath,filename);
(void) FormatLocaleString(images->filename,MagickPathExtent,
"%s%c%s",path,*DirectorySeparator,filename);
}
if (format != (char *) NULL)
AppendImageFormat(format,images->filename);
AppendImageStack(images);
FinalizeImageSettings(image_info,image,MagickFalse);
if (global_colormap != MagickFalse)
{
QuantizeInfo
*quantize_info;
quantize_info=AcquireQuantizeInfo(image_info);
(void) RemapImages(quantize_info,images,(Image *) NULL,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
*backup_filename='\0';
if ((LocaleCompare(image->filename,"-") != 0) &&
(IsPathWritable(image->filename) != MagickFalse))
{
/*
Rename image file as backup.
*/
(void) CopyMagickString(backup_filename,image->filename,
MagickPathExtent);
for (j=0; j < 6; j++)
{
(void) ConcatenateMagickString(backup_filename,"~",
MagickPathExtent);
if (IsPathAccessible(backup_filename) == MagickFalse)
break;
}
if ((IsPathAccessible(backup_filename) != MagickFalse) ||
(rename_utf8(image->filename,backup_filename) != 0))
*backup_filename='\0';
}
/*
Write transmogrified image to disk.
*/
image_info->synchronize=MagickTrue;
status&=WriteImages(image_info,image,image->filename,exception);
if (status != MagickFalse)
{
#if defined(MAGICKCORE_HAVE_UTIME)
{
MagickBooleanType
preserve_timestamp;
preserve_timestamp=IsStringTrue(GetImageOption(image_info,
"preserve-timestamp"));
if (preserve_timestamp != MagickFalse)
{
struct utimbuf
timestamp;
timestamp.actime=properties.st_atime;
timestamp.modtime=properties.st_mtime;
(void) utime(image->filename,×tamp);
}
}
#endif
if (*backup_filename != '\0')
(void) remove_utf8(backup_filename);
}
RemoveAllImageStack();
continue;
}
pend=image != (Image *) NULL ? MagickTrue : MagickFalse;
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adaptive-blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("adaptive-resize",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("adaptive-sharpen",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("affine",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("alpha",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickAlphaChannelOptions,MagickFalse,
argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,
"UnrecognizedAlphaChannelOption",argv[i]);
break;
}
if (LocaleCompare("annotate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
i++;
break;
}
if (LocaleCompare("antialias",option+1) == 0)
break;
if (LocaleCompare("append",option+1) == 0)
break;
if (LocaleCompare("attenuate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("authenticate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("auto-gamma",option+1) == 0)
break;
if (LocaleCompare("auto-level",option+1) == 0)
break;
if (LocaleCompare("auto-orient",option+1) == 0)
break;
if (LocaleCompare("auto-threshold",option+1) == 0)
{
ssize_t
method;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
method=ParseCommandOption(MagickAutoThresholdOptions,MagickFalse,
argv[i]);
if (method < 0)
ThrowMogrifyException(OptionError,"UnrecognizedThresholdMethod",
argv[i]);
break;
}
if (LocaleCompare("average",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'b':
{
if (LocaleCompare("background",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("bias",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("black-point-compensation",option+1) == 0)
break;
if (LocaleCompare("black-threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("blue-primary",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("blue-shift",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("border",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("brightness-contrast",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'c':
{
if (LocaleCompare("cache",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("canny",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("caption",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("channel",option+1) == 0)
{
ssize_t
channel;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
channel=ParseChannelOption(argv[i]);
if (channel < 0)
ThrowMogrifyException(OptionError,"UnrecognizedChannelType",
argv[i]);
break;
}
if (LocaleCompare("channel-fx",option+1) == 0)
{
ssize_t
channel;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
channel=ParsePixelChannelOption(argv[i]);
if (channel < 0)
ThrowMogrifyException(OptionError,"UnrecognizedChannelType",
argv[i]);
break;
}
if (LocaleCompare("cdl",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("charcoal",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("chop",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("clahe",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("clamp",option+1) == 0)
break;
if (LocaleCompare("clip",option+1) == 0)
break;
if (LocaleCompare("clip-mask",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("clut",option+1) == 0)
break;
if (LocaleCompare("coalesce",option+1) == 0)
break;
if (LocaleCompare("colorize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("color-matrix",option+1) == 0)
{
KernelInfo
*kernel_info;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
kernel_info=AcquireKernelInfo(argv[i],exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("colors",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
ssize_t
colorspace;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,
argv[i]);
if (colorspace < 0)
ThrowMogrifyException(OptionError,"UnrecognizedColorspace",
argv[i]);
break;
}
if (LocaleCompare("combine",option+1) == 0)
{
ssize_t
colorspace;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,
argv[i]);
if (colorspace < 0)
ThrowMogrifyException(OptionError,"UnrecognizedColorspace",
argv[i]);
break;
}
if (LocaleCompare("compare",option+1) == 0)
break;
if (LocaleCompare("comment",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("composite",option+1) == 0)
break;
if (LocaleCompare("compress",option+1) == 0)
{
ssize_t
compress;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
compress=ParseCommandOption(MagickCompressOptions,MagickFalse,
argv[i]);
if (compress < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageCompression",
argv[i]);
break;
}
if (LocaleCompare("concurrent",option+1) == 0)
break;
if (LocaleCompare("connected-components",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("contrast",option+1) == 0)
break;
if (LocaleCompare("contrast-stretch",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("convolve",option+1) == 0)
{
KernelInfo
*kernel_info;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
kernel_info=AcquireKernelInfo(argv[i],exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("copy",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("crop",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("cycle",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'd':
{
if (LocaleCompare("decipher",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("deconstruct",option+1) == 0)
break;
if (LocaleCompare("debug",option+1) == 0)
{
ssize_t
event;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
event=ParseCommandOption(MagickLogEventOptions,MagickFalse,argv[i]);
if (event < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEventType",
argv[i]);
(void) SetLogEventMask(argv[i]);
break;
}
if (LocaleCompare("define",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (*option == '+')
{
const char
*define;
define=GetImageOption(image_info,argv[i]);
if (define == (const char *) NULL)
ThrowMogrifyException(OptionError,"NoSuchOption",argv[i]);
break;
}
break;
}
if (LocaleCompare("delay",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("delete",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("density",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("deskew",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("despeckle",option+1) == 0)
break;
if (LocaleCompare("dft",option+1) == 0)
break;
if (LocaleCompare("direction",option+1) == 0)
{
ssize_t
direction;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
argv[i]);
if (direction < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDirectionType",
argv[i]);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("dispose",option+1) == 0)
{
ssize_t
dispose;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse,
argv[i]);
if (dispose < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDisposeMethod",
argv[i]);
break;
}
if (LocaleCompare("distort",option+1) == 0)
{
ssize_t
op;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickDistortOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDistortMethod",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
ssize_t
method;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
method=ParseCommandOption(MagickDitherOptions,MagickFalse,argv[i]);
if (method < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDitherMethod",
argv[i]);
break;
}
if (LocaleCompare("draw",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("duplicate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("duration",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'e':
{
if (LocaleCompare("edge",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("emboss",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("encipher",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("encoding",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("endian",option+1) == 0)
{
ssize_t
endian;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
endian=ParseCommandOption(MagickEndianOptions,MagickFalse,argv[i]);
if (endian < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEndianType",
argv[i]);
break;
}
if (LocaleCompare("enhance",option+1) == 0)
break;
if (LocaleCompare("equalize",option+1) == 0)
break;
if (LocaleCompare("evaluate",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("evaluate-sequence",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator",
argv[i]);
break;
}
if (LocaleCompare("extent",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("extract",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("features",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("filter",option+1) == 0)
{
ssize_t
filter;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
filter=ParseCommandOption(MagickFilterOptions,MagickFalse,argv[i]);
if (filter < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageFilter",
argv[i]);
break;
}
if (LocaleCompare("flatten",option+1) == 0)
break;
if (LocaleCompare("flip",option+1) == 0)
break;
if (LocaleCompare("flop",option+1) == 0)
break;
if (LocaleCompare("floodfill",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
(void) CopyMagickString(argv[i]+1,"sans",MagickPathExtent);
(void) CloneString(&format,(char *) NULL);
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
(void) CloneString(&format,argv[i]);
(void) CopyMagickString(image_info->filename,format,
MagickPathExtent);
(void) ConcatenateMagickString(image_info->filename,":",
MagickPathExtent);
(void) SetImageInfo(image_info,0,exception);
if (*image_info->magick == '\0')
ThrowMogrifyException(OptionError,"UnrecognizedImageFormat",
format);
break;
}
if (LocaleCompare("frame",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("function",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickFunctionOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedFunction",argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("fuzz",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("fx",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'g':
{
if (LocaleCompare("gamma",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if ((LocaleCompare("gaussian-blur",option+1) == 0) ||
(LocaleCompare("gaussian",option+1) == 0))
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("geometry",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("gravity",option+1) == 0)
{
ssize_t
gravity;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,
argv[i]);
if (gravity < 0)
ThrowMogrifyException(OptionError,"UnrecognizedGravityType",
argv[i]);
break;
}
if (LocaleCompare("grayscale",option+1) == 0)
{
ssize_t
method;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
method=ParseCommandOption(MagickPixelIntensityOptions,MagickFalse,
argv[i]);
if (method < 0)
ThrowMogrifyException(OptionError,"UnrecognizedIntensityMethod",
argv[i]);
break;
}
if (LocaleCompare("green-primary",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'h':
{
if (LocaleCompare("hald-clut",option+1) == 0)
break;
if ((LocaleCompare("help",option+1) == 0) ||
(LocaleCompare("-help",option+1) == 0))
return(MogrifyUsage());
if (LocaleCompare("hough-lines",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'i':
{
if (LocaleCompare("identify",option+1) == 0)
break;
if (LocaleCompare("idft",option+1) == 0)
break;
if (LocaleCompare("implode",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("intensity",option+1) == 0)
{
ssize_t
intensity;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
intensity=ParseCommandOption(MagickPixelIntensityOptions,
MagickFalse,argv[i]);
if (intensity < 0)
ThrowMogrifyException(OptionError,
"UnrecognizedPixelIntensityMethod",argv[i]);
break;
}
if (LocaleCompare("intent",option+1) == 0)
{
ssize_t
intent;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
intent=ParseCommandOption(MagickIntentOptions,MagickFalse,argv[i]);
if (intent < 0)
ThrowMogrifyException(OptionError,"UnrecognizedIntentType",
argv[i]);
break;
}
if (LocaleCompare("interlace",option+1) == 0)
{
ssize_t
interlace;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
interlace=ParseCommandOption(MagickInterlaceOptions,MagickFalse,
argv[i]);
if (interlace < 0)
ThrowMogrifyException(OptionError,"UnrecognizedInterlaceType",
argv[i]);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
ssize_t
interpolate;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
interpolate=ParseCommandOption(MagickInterpolateOptions,MagickFalse,
argv[i]);
if (interpolate < 0)
ThrowMogrifyException(OptionError,"UnrecognizedInterpolateMethod",
argv[i]);
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("kuwahara",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'l':
{
if (LocaleCompare("label",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("lat",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
}
if (LocaleCompare("layers",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickLayerOptions,MagickFalse,argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,"UnrecognizedLayerMethod",
argv[i]);
break;
}
if (LocaleCompare("level",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("level-colors",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("limit",option+1) == 0)
{
char
*p;
double
value;
ssize_t
resource;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
resource=ParseCommandOption(MagickResourceOptions,MagickFalse,
argv[i]);
if (resource < 0)
ThrowMogrifyException(OptionError,"UnrecognizedResourceType",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
value=StringToDouble(argv[i],&p);
(void) value;
if ((p == argv[i]) && (LocaleCompare("unlimited",argv[i]) != 0))
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("liquid-rescale",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("list",option+1) == 0)
{
ssize_t
list;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i]);
if (list < 0)
ThrowMogrifyException(OptionError,"UnrecognizedListType",argv[i]);
status=MogrifyImageInfo(image_info,(int) (i-j+1),(const char **)
argv+j,exception);
return(status == 0 ? MagickFalse : MagickTrue);
}
if (LocaleCompare("log",option+1) == 0)
{
if (*option == '+')
break;
i++;
if ((i == (ssize_t) argc) ||
(strchr(argv[i],'%') == (char *) NULL))
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("loop",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'm':
{
if (LocaleCompare("magnify",option+1) == 0)
break;
if (LocaleCompare("map",option+1) == 0)
{
global_colormap=(*option == '+') ? MagickTrue : MagickFalse;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("mask",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("matte",option+1) == 0)
break;
if (LocaleCompare("mattecolor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("maximum",option+1) == 0)
break;
if (LocaleCompare("mean-shift",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("median",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickMetricOptions,MagickTrue,argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,"UnrecognizedMetricType",
argv[i]);
break;
}
if (LocaleCompare("minimum",option+1) == 0)
break;
if (LocaleCompare("modulate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("mode",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("monitor",option+1) == 0)
break;
if (LocaleCompare("monochrome",option+1) == 0)
break;
if (LocaleCompare("morph",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("morphology",option+1) == 0)
{
char
token[MagickPathExtent];
KernelInfo
*kernel_info;
ssize_t
op;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
GetNextToken(argv[i],(const char **) NULL,MagickPathExtent,token);
op=ParseCommandOption(MagickMorphologyOptions,MagickFalse,token);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedMorphologyMethod",
token);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
kernel_info=AcquireKernelInfo(argv[i],exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("mosaic",option+1) == 0)
break;
if (LocaleCompare("motion-blur",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'n':
{
if (LocaleCompare("negate",option+1) == 0)
break;
if (LocaleCompare("noise",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (*option == '+')
{
ssize_t
noise;
noise=ParseCommandOption(MagickNoiseOptions,MagickFalse,
argv[i]);
if (noise < 0)
ThrowMogrifyException(OptionError,"UnrecognizedNoiseType",
argv[i]);
break;
}
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("noop",option+1) == 0)
break;
if (LocaleCompare("normalize",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'o':
{
if (LocaleCompare("opaque",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("ordered-dither",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("orient",option+1) == 0)
{
ssize_t
orientation;
orientation=UndefinedOrientation;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
orientation=ParseCommandOption(MagickOrientationOptions,MagickFalse,
argv[i]);
if (orientation < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageOrientation",
argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'p':
{
if (LocaleCompare("page",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("paint",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("path",option+1) == 0)
{
(void) CloneString(&path,(char *) NULL);
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
(void) CloneString(&path,argv[i]);
break;
}
if (LocaleCompare("perceptible",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("polaroid",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("poly",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("posterize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("precision",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("print",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("process",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("profile",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'q':
{
if (LocaleCompare("quality",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("quantize",option+1) == 0)
{
ssize_t
colorspace;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,
argv[i]);
if (colorspace < 0)
ThrowMogrifyException(OptionError,"UnrecognizedColorspace",
argv[i]);
break;
}
if (LocaleCompare("quiet",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'r':
{
if (LocaleCompare("rotational-blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("raise",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("random-threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("range-threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("read-mask",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("red-primary",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
}
if (LocaleCompare("regard-warnings",option+1) == 0)
break;
if (LocaleCompare("region",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("remap",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("render",option+1) == 0)
break;
if (LocaleCompare("repage",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("resample",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("resize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleNCompare("respect-parentheses",option+1,17) == 0)
{
respect_parenthesis=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("reverse",option+1) == 0)
break;
if (LocaleCompare("roll",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("rotate",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 's':
{
if (LocaleCompare("sample",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sampling-factor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("scale",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("scene",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("seed",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("segment",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("selective-blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("separate",option+1) == 0)
break;
if (LocaleCompare("sepia-tone",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("set",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("shade",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("shadow",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sharpen",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("shave",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("shear",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sigmoidal-contrast",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("size",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sketch",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("smush",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
i++;
break;
}
if (LocaleCompare("solarize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sparse-color",option+1) == 0)
{
ssize_t
op;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickSparseColorOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedSparseColorMethod",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("splice",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("spread",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("statistic",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickStatisticOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedStatisticType",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("stretch",option+1) == 0)
{
ssize_t
stretch;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,
argv[i]);
if (stretch < 0)
ThrowMogrifyException(OptionError,"UnrecognizedStyleType",
argv[i]);
break;
}
if (LocaleCompare("strip",option+1) == 0)
break;
if (LocaleCompare("stroke",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
ssize_t
style;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,argv[i]);
if (style < 0)
ThrowMogrifyException(OptionError,"UnrecognizedStyleType",
argv[i]);
break;
}
if (LocaleCompare("swap",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("swirl",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("synchronize",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 't':
{
if (LocaleCompare("taint",option+1) == 0)
break;
if (LocaleCompare("texture",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("tile",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("tile-offset",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("tint",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("transform",option+1) == 0)
break;
if (LocaleCompare("transpose",option+1) == 0)
break;
if (LocaleCompare("transverse",option+1) == 0)
break;
if (LocaleCompare("threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("thumbnail",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("transparent",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("transparent-color",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("treedepth",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("trim",option+1) == 0)
break;
if (LocaleCompare("type",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickTypeOptions,MagickFalse,argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageType",
argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("unique-colors",option+1) == 0)
break;
if (LocaleCompare("units",option+1) == 0)
{
ssize_t
units;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
units=ParseCommandOption(MagickResolutionOptions,MagickFalse,
argv[i]);
if (units < 0)
ThrowMogrifyException(OptionError,"UnrecognizedUnitsType",
argv[i]);
break;
}
if (LocaleCompare("unsharp",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
image_info->verbose=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if ((LocaleCompare("version",option+1) == 0) ||
(LocaleCompare("-version",option+1) == 0))
{
ListMagickVersion(stdout);
break;
}
if (LocaleCompare("vignette",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
ssize_t
method;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
method=ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,
argv[i]);
if (method < 0)
ThrowMogrifyException(OptionError,
"UnrecognizedVirtualPixelMethod",argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'w':
{
if (LocaleCompare("wave",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("wavelet-denoise",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("weight",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("white-point",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("white-threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("write",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("write-mask",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case '?':
break;
default:
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
fire=(GetCommandOptionFlags(MagickCommandOptions,MagickFalse,option) &
FireOptionFlag) == 0 ? MagickFalse : MagickTrue;
if (fire != MagickFalse)
FireImageStack(MagickFalse,MagickTrue,MagickTrue);
}
if (k != 0)
ThrowMogrifyException(OptionError,"UnbalancedParenthesis",argv[i]);
if (i != (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingAnImageFilename",argv[i]);
DestroyMogrify();
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImageInfo() applies image processing settings to the image as
% prescribed by command line options.
%
% The format of the MogrifyImageInfo method is:
%
% MagickBooleanType MogrifyImageInfo(ImageInfo *image_info,const int argc,
% const char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MogrifyImageInfo(ImageInfo *image_info,
const int argc,const char **argv,ExceptionInfo *exception)
{
const char
*option;
GeometryInfo
geometry_info;
ssize_t
count;
register ssize_t
i;
/*
Initialize method variables.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (argc < 0)
return(MagickTrue);
/*
Set the image settings.
*/
for (i=0; i < (ssize_t) argc; i++)
{
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=ParseCommandOption(MagickCommandOptions,MagickFalse,option);
count=MagickMax(count,0L);
if ((i+count) >= (ssize_t) argc)
break;
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adjoin",option+1) == 0)
{
image_info->adjoin=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("antialias",option+1) == 0)
{
image_info->antialias=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("authenticate",option+1) == 0)
{
if (*option == '+')
(void) DeleteImageOption(image_info,option+1);
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'b':
{
if (LocaleCompare("background",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) QueryColorCompliance(MogrifyBackgroundColor,
AllCompliance,&image_info->background_color,exception);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorCompliance(argv[i+1],AllCompliance,
&image_info->background_color,exception);
break;
}
if (LocaleCompare("bias",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,"convolve:bias","0.0");
break;
}
(void) SetImageOption(image_info,"convolve:bias",argv[i+1]);
break;
}
if (LocaleCompare("black-point-compensation",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("blue-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) QueryColorCompliance(MogrifyBorderColor,AllCompliance,
&image_info->border_color,exception);
break;
}
(void) QueryColorCompliance(argv[i+1],AllCompliance,
&image_info->border_color,exception);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,"undercolor","none");
break;
}
(void) SetImageOption(image_info,"undercolor",argv[i+1]);
break;
}
break;
}
case 'c':
{
if (LocaleCompare("cache",option+1) == 0)
{
MagickSizeType
limit;
limit=MagickResourceInfinity;
if (LocaleCompare("unlimited",argv[i+1]) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+1],
100.0);
(void) SetMagickResourceLimit(MemoryResource,limit);
(void) SetMagickResourceLimit(MapResource,2*limit);
break;
}
if (LocaleCompare("caption",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
if (*option == '+')
{
image_info->colorspace=UndefinedColorspace;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("comment",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("compose",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("compress",option+1) == 0)
{
if (*option == '+')
{
image_info->compression=UndefinedCompression;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("debug",option+1) == 0)
{
if (*option == '+')
(void) SetLogEventMask("none");
else
(void) SetLogEventMask(argv[i+1]);
image_info->debug=IsEventLogging();
break;
}
if (LocaleCompare("define",option+1) == 0)
{
if (*option == '+')
{
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
(void) DeleteImageRegistry(argv[i+1]+9);
else
(void) DeleteImageOption(image_info,argv[i+1]);
break;
}
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
{
(void) DefineImageRegistry(StringRegistryType,argv[i+1]+9,
exception);
break;
}
(void) DefineImageOption(image_info,argv[i+1]);
break;
}
if (LocaleCompare("delay",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("density",option+1) == 0)
{
/*
Set image density.
*/
if (*option == '+')
{
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
(void) SetImageOption(image_info,option+1,"72");
break;
}
(void) CloneString(&image_info->density,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
if (*option == '+')
{
image_info->depth=MAGICKCORE_QUANTUM_DEPTH;
break;
}
image_info->depth=StringToUnsignedLong(argv[i+1]);
break;
}
if (LocaleCompare("direction",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
if (*option == '+')
{
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
break;
}
(void) CloneString(&image_info->server_name,argv[i+1]);
break;
}
if (LocaleCompare("dispose",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
image_info->dither=MagickFalse;
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
image_info->dither=MagickTrue;
break;
}
break;
}
case 'e':
{
if (LocaleCompare("encoding",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("endian",option+1) == 0)
{
if (*option == '+')
{
image_info->endian=UndefinedEndian;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->endian=(EndianType) ParseCommandOption(
MagickEndianOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("extract",option+1) == 0)
{
/*
Set image extract geometry.
*/
if (*option == '+')
{
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
break;
}
(void) CloneString(&image_info->extract,argv[i+1]);
break;
}
break;
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
if (*option != '+')
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("filter",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
if (*option == '+')
{
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
break;
}
(void) CloneString(&image_info->font,argv[i+1]);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
register const char
*q;
for (q=strchr(argv[i+1],'%'); q != (char *) NULL; q=strchr(q+1,'%'))
if (strchr("Agkrz@[#",*(q+1)) != (char *) NULL)
image_info->ping=MagickFalse;
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("fuzz",option+1) == 0)
{
if (*option == '+')
{
image_info->fuzz=0.0;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->fuzz=StringToDoubleInterval(argv[i+1],(double)
QuantumRange+1.0);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'g':
{
if (LocaleCompare("gravity",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("green-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'i':
{
if (LocaleCompare("intensity",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("intent",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interlace",option+1) == 0)
{
if (*option == '+')
{
image_info->interlace=UndefinedInterlace;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->interlace=(InterlaceType) ParseCommandOption(
MagickInterlaceOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("label",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("limit",option+1) == 0)
{
MagickSizeType
limit;
ResourceType
type;
if (*option == '+')
break;
type=(ResourceType) ParseCommandOption(MagickResourceOptions,
MagickFalse,argv[i+1]);
limit=MagickResourceInfinity;
if (LocaleCompare("unlimited",argv[i+2]) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+2],100.0);
(void) SetMagickResourceLimit(type,limit);
break;
}
if (LocaleCompare("list",option+1) == 0)
{
ssize_t
list;
/*
Display configuration list.
*/
list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i+1]);
switch (list)
{
case MagickCoderOptions:
{
(void) ListCoderInfo((FILE *) NULL,exception);
break;
}
case MagickColorOptions:
{
(void) ListColorInfo((FILE *) NULL,exception);
break;
}
case MagickConfigureOptions:
{
(void) ListConfigureInfo((FILE *) NULL,exception);
break;
}
case MagickDelegateOptions:
{
(void) ListDelegateInfo((FILE *) NULL,exception);
break;
}
case MagickFontOptions:
{
(void) ListTypeInfo((FILE *) NULL,exception);
break;
}
case MagickFormatOptions:
{
(void) ListMagickInfo((FILE *) NULL,exception);
break;
}
case MagickLocaleOptions:
{
(void) ListLocaleInfo((FILE *) NULL,exception);
break;
}
case MagickLogOptions:
{
(void) ListLogInfo((FILE *) NULL,exception);
break;
}
case MagickMagicOptions:
{
(void) ListMagicInfo((FILE *) NULL,exception);
break;
}
case MagickMimeOptions:
{
(void) ListMimeInfo((FILE *) NULL,exception);
break;
}
case MagickModuleOptions:
{
(void) ListModuleInfo((FILE *) NULL,exception);
break;
}
case MagickPolicyOptions:
{
(void) ListPolicyInfo((FILE *) NULL,exception);
break;
}
case MagickResourceOptions:
{
(void) ListMagickResourceInfo((FILE *) NULL,exception);
break;
}
case MagickThresholdOptions:
{
(void) ListThresholdMaps((FILE *) NULL,exception);
break;
}
default:
{
(void) ListCommandOptions((FILE *) NULL,(CommandOption) list,
exception);
break;
}
}
break;
}
if (LocaleCompare("log",option+1) == 0)
{
if (*option == '+')
break;
(void) SetLogFormat(argv[i+1]);
break;
}
if (LocaleCompare("loop",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'm':
{
if (LocaleCompare("matte",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("mattecolor",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorCompliance(MogrifyAlphaColor,AllCompliance,
&image_info->matte_color,exception);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorCompliance(argv[i+1],AllCompliance,
&image_info->matte_color,exception);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
if (*option == '+')
(void) DeleteImageOption(image_info,option+1);
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("monitor",option+1) == 0)
{
(void) SetImageInfoProgressMonitor(image_info,MonitorProgress,
(void *) NULL);
break;
}
if (LocaleCompare("monochrome",option+1) == 0)
{
image_info->monochrome=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
break;
}
case 'o':
{
if (LocaleCompare("orient",option+1) == 0)
{
if (*option == '+')
{
image_info->orientation=UndefinedOrientation;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
}
case 'p':
{
if (LocaleCompare("page",option+1) == 0)
{
char
*canonical_page,
page[MagickPathExtent];
const char
*image_option;
MagickStatusType
flags;
RectangleInfo
geometry;
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) CloneString(&image_info->page,(char *) NULL);
break;
}
(void) memset(&geometry,0,sizeof(geometry));
image_option=GetImageOption(image_info,"page");
if (image_option != (const char *) NULL)
flags=ParseAbsoluteGeometry(image_option,&geometry);
canonical_page=GetPageGeometry(argv[i+1]);
flags=ParseAbsoluteGeometry(canonical_page,&geometry);
canonical_page=DestroyString(canonical_page);
(void) FormatLocaleString(page,MagickPathExtent,"%lux%lu",
(unsigned long) geometry.width,(unsigned long) geometry.height);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
(void) FormatLocaleString(page,MagickPathExtent,"%lux%lu%+ld%+ld",
(unsigned long) geometry.width,(unsigned long) geometry.height,
(long) geometry.x,(long) geometry.y);
(void) SetImageOption(image_info,option+1,page);
(void) CloneString(&image_info->page,page);
break;
}
if (LocaleCompare("ping",option+1) == 0)
{
image_info->ping=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (*option == '+')
geometry_info.rho=0.0;
else
(void) ParseGeometry(argv[i+1],&geometry_info);
image_info->pointsize=geometry_info.rho;
break;
}
if (LocaleCompare("precision",option+1) == 0)
{
(void) SetMagickPrecision(StringToInteger(argv[i+1]));
break;
}
break;
}
case 'q':
{
if (LocaleCompare("quality",option+1) == 0)
{
/*
Set image compression quality.
*/
if (*option == '+')
{
image_info->quality=UndefinedCompressionQuality;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->quality=StringToUnsignedLong(argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("quiet",option+1) == 0)
{
static WarningHandler
warning_handler = (WarningHandler) NULL;
if (*option == '+')
{
/*
Restore error or warning messages.
*/
warning_handler=SetWarningHandler(warning_handler);
break;
}
/*
Suppress error or warning messages.
*/
warning_handler=SetWarningHandler((WarningHandler) NULL);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("red-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 's':
{
if (LocaleCompare("sampling-factor",option+1) == 0)
{
/*
Set image sampling factor.
*/
if (*option == '+')
{
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
break;
}
(void) CloneString(&image_info->sampling_factor,argv[i+1]);
break;
}
if (LocaleCompare("scene",option+1) == 0)
{
/*
Set image scene.
*/
if (*option == '+')
{
image_info->scene=0;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->scene=StringToUnsignedLong(argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("seed",option+1) == 0)
{
unsigned long
seed;
if (*option == '+')
{
seed=(unsigned long) time((time_t *) NULL);
SetRandomSecretKey(seed);
break;
}
seed=StringToUnsignedLong(argv[i+1]);
SetRandomSecretKey(seed);
break;
}
if (LocaleCompare("size",option+1) == 0)
{
if (*option == '+')
{
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
break;
}
(void) CloneString(&image_info->size,argv[i+1]);
break;
}
if (LocaleCompare("stroke",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("synchronize",option+1) == 0)
{
if (*option == '+')
{
image_info->synchronize=MagickFalse;
break;
}
image_info->synchronize=MagickTrue;
break;
}
break;
}
case 't':
{
if (LocaleCompare("taint",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("texture",option+1) == 0)
{
if (*option == '+')
{
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
break;
}
(void) CloneString(&image_info->texture,argv[i+1]);
break;
}
if (LocaleCompare("tile-offset",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("transparent-color",option+1) == 0)
{
if (*option == '+')
{
(void) QueryColorCompliance("none",AllCompliance,
&image_info->transparent_color,exception);
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) QueryColorCompliance(argv[i+1],AllCompliance,
&image_info->transparent_color,exception);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("type",option+1) == 0)
{
if (*option == '+')
{
image_info->type=UndefinedType;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->type=(ImageType) ParseCommandOption(MagickTypeOptions,
MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
if (*option == '+')
(void) DeleteImageOption(image_info,option+1);
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("units",option+1) == 0)
{
if (*option == '+')
{
image_info->units=UndefinedResolution;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->units=(ResolutionType) ParseCommandOption(
MagickResolutionOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
if (*option == '+')
{
image_info->verbose=MagickFalse;
break;
}
image_info->verbose=MagickTrue;
image_info->ping=MagickFalse;
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"undefined");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("weight",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("white-point",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0.0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
default:
break;
}
i+=count;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImageList() applies any command line options that might affect the
% entire image list (e.g. -append, -coalesce, etc.).
%
% The format of the MogrifyImage method is:
%
% MagickBooleanType MogrifyImageList(ImageInfo *image_info,const int argc,
% const char **argv,Image **images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o images: pointer to pointer of the first image in image list.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MogrifyImageList(ImageInfo *image_info,
const int argc,const char **argv,Image **images,ExceptionInfo *exception)
{
const char
*option;
ImageInfo
*mogrify_info;
MagickStatusType
status;
PixelInterpolateMethod
interpolate_method;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
count,
index;
/*
Apply options to the image list.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image **) NULL);
assert((*images)->previous == (Image *) NULL);
assert((*images)->signature == MagickCoreSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
if ((argc <= 0) || (*argv == (char *) NULL))
return(MagickTrue);
interpolate_method=UndefinedInterpolatePixel;
mogrify_info=CloneImageInfo(image_info);
quantize_info=AcquireQuantizeInfo(mogrify_info);
status=MagickTrue;
for (i=0; i < (ssize_t) argc; i++)
{
if (*images == (Image *) NULL)
break;
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=ParseCommandOption(MagickCommandOptions,MagickFalse,option);
count=MagickMax(count,0L);
if ((i+count) >= (ssize_t) argc)
break;
status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception);
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("affinity",option+1) == 0)
{
(void) SyncImagesSettings(mogrify_info,*images,exception);
if (*option == '+')
{
(void) RemapImages(quantize_info,*images,(Image *) NULL,
exception);
break;
}
i++;
break;
}
if (LocaleCompare("append",option+1) == 0)
{
Image
*append_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
append_image=AppendImages(*images,*option == '-' ? MagickTrue :
MagickFalse,exception);
if (append_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=append_image;
break;
}
if (LocaleCompare("average",option+1) == 0)
{
Image
*average_image;
/*
Average an image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images,exception);
average_image=EvaluateImages(*images,MeanEvaluateOperator,
exception);
if (average_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=average_image;
break;
}
break;
}
case 'c':
{
if (LocaleCompare("channel-fx",option+1) == 0)
{
Image
*channel_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
channel_image=ChannelFxImage(*images,argv[i+1],exception);
if (channel_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=channel_image;
break;
}
if (LocaleCompare("clut",option+1) == 0)
{
Image
*clut_image,
*image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
image=RemoveFirstImageFromList(images);
clut_image=RemoveFirstImageFromList(images);
if (clut_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
(void) ClutImage(image,clut_image,interpolate_method,exception);
clut_image=DestroyImage(clut_image);
*images=DestroyImageList(*images);
*images=image;
break;
}
if (LocaleCompare("coalesce",option+1) == 0)
{
Image
*coalesce_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
coalesce_image=CoalesceImages(*images,exception);
if (coalesce_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=coalesce_image;
break;
}
if (LocaleCompare("combine",option+1) == 0)
{
ColorspaceType
colorspace;
Image
*combine_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
colorspace=(*images)->colorspace;
if ((*images)->number_channels < GetImageListLength(*images))
colorspace=sRGBColorspace;
if (*option == '+')
colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
combine_image=CombineImages(*images,colorspace,exception);
if (combine_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=combine_image;
break;
}
if (LocaleCompare("compare",option+1) == 0)
{
double
distortion;
Image
*difference_image,
*image,
*reconstruct_image;
MetricType
metric;
/*
Mathematically and visually annotate the difference between an
image and its reconstruction.
*/
(void) SyncImagesSettings(mogrify_info,*images,exception);
image=RemoveFirstImageFromList(images);
reconstruct_image=RemoveFirstImageFromList(images);
if (reconstruct_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
metric=UndefinedErrorMetric;
option=GetImageOption(mogrify_info,"metric");
if (option != (const char *) NULL)
metric=(MetricType) ParseCommandOption(MagickMetricOptions,
MagickFalse,option);
difference_image=CompareImages(image,reconstruct_image,metric,
&distortion,exception);
if (difference_image == (Image *) NULL)
break;
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=difference_image;
break;
}
if (LocaleCompare("complex",option+1) == 0)
{
ComplexOperator
op;
Image
*complex_images;
(void) SyncImageSettings(mogrify_info,*images,exception);
op=(ComplexOperator) ParseCommandOption(MagickComplexOptions,
MagickFalse,argv[i+1]);
complex_images=ComplexImages(*images,op,exception);
if (complex_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=complex_images;
break;
}
if (LocaleCompare("composite",option+1) == 0)
{
CompositeOperator
compose;
const char*
value;
MagickBooleanType
clip_to_self;
Image
*mask_image,
*new_images,
*source_image;
RectangleInfo
geometry;
/* Compose value from "-compose" option only */
(void) SyncImageSettings(mogrify_info,*images,exception);
value=GetImageOption(mogrify_info,"compose");
if (value == (const char *) NULL)
compose=OverCompositeOp; /* use Over not source_image->compose */
else
compose=(CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,value);
/* Get "clip-to-self" expert setting (false is normal) */
clip_to_self=GetCompositeClipToSelf(compose);
value=GetImageOption(mogrify_info,"compose:clip-to-self");
if (value != (const char *) NULL)
clip_to_self=IsStringTrue(value);
value=GetImageOption(mogrify_info,"compose:outside-overlay");
if (value != (const char *) NULL)
clip_to_self=IsStringFalse(value); /* deprecated */
new_images=RemoveFirstImageFromList(images);
source_image=RemoveFirstImageFromList(images);
if (source_image == (Image *) NULL)
break; /* FUTURE - produce Exception, rather than silent fail */
/* FUTURE: this should not be here! - should be part of -geometry */
if (source_image->geometry != (char *) NULL)
{
RectangleInfo
resize_geometry;
(void) ParseRegionGeometry(source_image,source_image->geometry,
&resize_geometry,exception);
if ((source_image->columns != resize_geometry.width) ||
(source_image->rows != resize_geometry.height))
{
Image
*resize_image;
resize_image=ResizeImage(source_image,resize_geometry.width,
resize_geometry.height,source_image->filter,exception);
if (resize_image != (Image *) NULL)
{
source_image=DestroyImage(source_image);
source_image=resize_image;
}
}
}
SetGeometry(source_image,&geometry);
(void) ParseAbsoluteGeometry(source_image->geometry,&geometry);
GravityAdjustGeometry(new_images->columns,new_images->rows,
new_images->gravity,&geometry);
mask_image=RemoveFirstImageFromList(images);
if (mask_image == (Image *) NULL)
status&=CompositeImage(new_images,source_image,compose,
clip_to_self,geometry.x,geometry.y,exception);
else
{
if ((compose == DisplaceCompositeOp) ||
(compose == DistortCompositeOp))
{
status&=CompositeImage(source_image,mask_image,
CopyGreenCompositeOp,MagickTrue,0,0,exception);
status&=CompositeImage(new_images,source_image,compose,
clip_to_self,geometry.x,geometry.y,exception);
}
else
{
Image
*clone_image;
clone_image=CloneImage(new_images,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
break;
status&=CompositeImage(new_images,source_image,compose,
clip_to_self,geometry.x,geometry.y,exception);
status&=CompositeImage(new_images,mask_image,
CopyAlphaCompositeOp,MagickTrue,0,0,exception);
status&=CompositeImage(clone_image,new_images,
OverCompositeOp,clip_to_self,0,0,exception);
new_images=DestroyImageList(new_images);
new_images=clone_image;
}
mask_image=DestroyImage(mask_image);
}
source_image=DestroyImage(source_image);
*images=DestroyImageList(*images);
*images=new_images;
break;
}
if (LocaleCompare("copy",option+1) == 0)
{
Image
*source_image;
OffsetInfo
offset;
RectangleInfo
geometry;
/*
Copy image pixels.
*/
(void) SyncImageSettings(mogrify_info,*images,exception);
(void) ParsePageGeometry(*images,argv[i+2],&geometry,exception);
offset.x=geometry.x;
offset.y=geometry.y;
source_image=(*images);
if (source_image->next != (Image *) NULL)
source_image=source_image->next;
(void) ParsePageGeometry(source_image,argv[i+1],&geometry,
exception);
status=CopyImagePixels(*images,source_image,&geometry,&offset,
exception);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("deconstruct",option+1) == 0)
{
Image
*deconstruct_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
deconstruct_image=CompareImagesLayers(*images,CompareAnyLayer,
exception);
if (deconstruct_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=deconstruct_image;
break;
}
if (LocaleCompare("delete",option+1) == 0)
{
if (*option == '+')
DeleteImages(images,"-1",exception);
else
DeleteImages(images,argv[i+1],exception);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
quantize_info->dither_method=NoDitherMethod;
break;
}
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("duplicate",option+1) == 0)
{
Image
*duplicate_images;
if (*option == '+')
duplicate_images=DuplicateImages(*images,1,"-1",exception);
else
{
const char
*p;
size_t
number_duplicates;
number_duplicates=(size_t) StringToLong(argv[i+1]);
p=strchr(argv[i+1],',');
if (p == (const char *) NULL)
duplicate_images=DuplicateImages(*images,number_duplicates,
"-1",exception);
else
duplicate_images=DuplicateImages(*images,number_duplicates,p,
exception);
}
AppendImageToList(images, duplicate_images);
(void) SyncImagesSettings(mogrify_info,*images,exception);
break;
}
break;
}
case 'e':
{
if (LocaleCompare("evaluate-sequence",option+1) == 0)
{
Image
*evaluate_image;
MagickEvaluateOperator
op;
(void) SyncImageSettings(mogrify_info,*images,exception);
op=(MagickEvaluateOperator) ParseCommandOption(
MagickEvaluateOptions,MagickFalse,argv[i+1]);
evaluate_image=EvaluateImages(*images,op,exception);
if (evaluate_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=evaluate_image;
break;
}
break;
}
case 'f':
{
if (LocaleCompare("fft",option+1) == 0)
{
Image
*fourier_image;
/*
Implements the discrete Fourier transform (DFT).
*/
(void) SyncImageSettings(mogrify_info,*images,exception);
fourier_image=ForwardFourierTransformImage(*images,*option == '-' ?
MagickTrue : MagickFalse,exception);
if (fourier_image == (Image *) NULL)
break;
*images=DestroyImageList(*images);
*images=fourier_image;
break;
}
if (LocaleCompare("flatten",option+1) == 0)
{
Image
*flatten_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
flatten_image=MergeImageLayers(*images,FlattenLayer,exception);
if (flatten_image == (Image *) NULL)
break;
*images=DestroyImageList(*images);
*images=flatten_image;
break;
}
if (LocaleCompare("fx",option+1) == 0)
{
Image
*fx_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
fx_image=FxImage(*images,argv[i+1],exception);
if (fx_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=fx_image;
break;
}
break;
}
case 'h':
{
if (LocaleCompare("hald-clut",option+1) == 0)
{
Image
*hald_image,
*image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
image=RemoveFirstImageFromList(images);
hald_image=RemoveFirstImageFromList(images);
if (hald_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
(void) HaldClutImage(image,hald_image,exception);
hald_image=DestroyImage(hald_image);
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=image;
break;
}
break;
}
case 'i':
{
if (LocaleCompare("ift",option+1) == 0)
{
Image
*fourier_image,
*magnitude_image,
*phase_image;
/*
Implements the inverse fourier discrete Fourier transform (DFT).
*/
(void) SyncImagesSettings(mogrify_info,*images,exception);
magnitude_image=RemoveFirstImageFromList(images);
phase_image=RemoveFirstImageFromList(images);
if (phase_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
fourier_image=InverseFourierTransformImage(magnitude_image,
phase_image,*option == '-' ? MagickTrue : MagickFalse,exception);
if (fourier_image == (Image *) NULL)
break;
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=fourier_image;
break;
}
if (LocaleCompare("insert",option+1) == 0)
{
Image
*p,
*q;
index=0;
if (*option != '+')
index=(ssize_t) StringToLong(argv[i+1]);
p=RemoveLastImageFromList(images);
if (p == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",argv[i+1]);
status=MagickFalse;
break;
}
q=p;
if (index == 0)
PrependImageToList(images,q);
else
if (index == (ssize_t) GetImageListLength(*images))
AppendImageToList(images,q);
else
{
q=GetImageFromList(*images,index-1);
if (q == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",argv[i+1]);
status=MagickFalse;
break;
}
InsertImageInList(&q,p);
}
*images=GetFirstImageInList(q);
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
interpolate_method=(PixelInterpolateMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,argv[i+1]);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("layers",option+1) == 0)
{
Image
*layers;
LayerMethod
method;
(void) SyncImagesSettings(mogrify_info,*images,exception);
layers=(Image *) NULL;
method=(LayerMethod) ParseCommandOption(MagickLayerOptions,
MagickFalse,argv[i+1]);
switch (method)
{
case CoalesceLayer:
{
layers=CoalesceImages(*images,exception);
break;
}
case CompareAnyLayer:
case CompareClearLayer:
case CompareOverlayLayer:
default:
{
layers=CompareImagesLayers(*images,method,exception);
break;
}
case MergeLayer:
case FlattenLayer:
case MosaicLayer:
case TrimBoundsLayer:
{
layers=MergeImageLayers(*images,method,exception);
break;
}
case DisposeLayer:
{
layers=DisposeImages(*images,exception);
break;
}
case OptimizeImageLayer:
{
layers=OptimizeImageLayers(*images,exception);
break;
}
case OptimizePlusLayer:
{
layers=OptimizePlusImageLayers(*images,exception);
break;
}
case OptimizeTransLayer:
{
OptimizeImageTransparency(*images,exception);
break;
}
case RemoveDupsLayer:
{
RemoveDuplicateLayers(images,exception);
break;
}
case RemoveZeroLayer:
{
RemoveZeroDelayLayers(images,exception);
break;
}
case OptimizeLayer:
{
/*
General Purpose, GIF Animation Optimizer.
*/
layers=CoalesceImages(*images,exception);
if (layers == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=layers;
layers=OptimizeImageLayers(*images,exception);
if (layers == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=layers;
layers=(Image *) NULL;
OptimizeImageTransparency(*images,exception);
(void) RemapImages(quantize_info,*images,(Image *) NULL,
exception);
break;
}
case CompositeLayer:
{
CompositeOperator
compose;
Image
*source;
RectangleInfo
geometry;
/*
Split image sequence at the first 'NULL:' image.
*/
source=(*images);
while (source != (Image *) NULL)
{
source=GetNextImageInList(source);
if ((source != (Image *) NULL) &&
(LocaleCompare(source->magick,"NULL") == 0))
break;
}
if (source != (Image *) NULL)
{
if ((GetPreviousImageInList(source) == (Image *) NULL) ||
(GetNextImageInList(source) == (Image *) NULL))
source=(Image *) NULL;
else
{
/*
Separate the two lists, junk the null: image.
*/
source=SplitImageList(source->previous);
DeleteImageFromList(&source);
}
}
if (source == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"MissingNullSeparator","layers Composite");
status=MagickFalse;
break;
}
/*
Adjust offset with gravity and virtual canvas.
*/
SetGeometry(*images,&geometry);
(void) ParseAbsoluteGeometry((*images)->geometry,&geometry);
geometry.width=source->page.width != 0 ?
source->page.width : source->columns;
geometry.height=source->page.height != 0 ?
source->page.height : source->rows;
GravityAdjustGeometry((*images)->page.width != 0 ?
(*images)->page.width : (*images)->columns,
(*images)->page.height != 0 ? (*images)->page.height :
(*images)->rows,(*images)->gravity,&geometry);
compose=OverCompositeOp;
option=GetImageOption(mogrify_info,"compose");
if (option != (const char *) NULL)
compose=(CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,option);
CompositeLayers(*images,compose,source,geometry.x,geometry.y,
exception);
source=DestroyImageList(source);
break;
}
}
if (layers == (Image *) NULL)
break;
*images=DestroyImageList(*images);
*images=layers;
break;
}
break;
}
case 'm':
{
if (LocaleCompare("map",option+1) == 0)
{
(void) SyncImagesSettings(mogrify_info,*images,exception);
if (*option == '+')
{
(void) RemapImages(quantize_info,*images,(Image *) NULL,
exception);
break;
}
i++;
break;
}
if (LocaleCompare("maximum",option+1) == 0)
{
Image
*maximum_image;
/*
Maximum image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images,exception);
maximum_image=EvaluateImages(*images,MaxEvaluateOperator,exception);
if (maximum_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=maximum_image;
break;
}
if (LocaleCompare("minimum",option+1) == 0)
{
Image
*minimum_image;
/*
Minimum image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images,exception);
minimum_image=EvaluateImages(*images,MinEvaluateOperator,exception);
if (minimum_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=minimum_image;
break;
}
if (LocaleCompare("morph",option+1) == 0)
{
Image
*morph_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
morph_image=MorphImages(*images,StringToUnsignedLong(argv[i+1]),
exception);
if (morph_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=morph_image;
break;
}
if (LocaleCompare("mosaic",option+1) == 0)
{
Image
*mosaic_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
mosaic_image=MergeImageLayers(*images,MosaicLayer,exception);
if (mosaic_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=mosaic_image;
break;
}
break;
}
case 'p':
{
if (LocaleCompare("poly",option+1) == 0)
{
char
*args,
token[MagickPathExtent];
const char
*p;
double
*arguments;
Image
*polynomial_image;
register ssize_t
x;
size_t
number_arguments;
/*
Polynomial image.
*/
(void) SyncImageSettings(mogrify_info,*images,exception);
args=InterpretImageProperties(mogrify_info,*images,argv[i+1],
exception);
if (args == (char *) NULL)
break;
p=(char *) args;
for (x=0; *p != '\0'; x++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
}
number_arguments=(size_t) x;
arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*arguments));
if (arguments == (double *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,
"MemoryAllocationFailed",(*images)->filename);
(void) memset(arguments,0,number_arguments*
sizeof(*arguments));
p=(char *) args;
for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arguments[x]=StringToDouble(token,(char **) NULL);
}
args=DestroyString(args);
polynomial_image=PolynomialImage(*images,number_arguments >> 1,
arguments,exception);
arguments=(double *) RelinquishMagickMemory(arguments);
if (polynomial_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=polynomial_image;
}
if (LocaleCompare("print",option+1) == 0)
{
char
*string;
(void) SyncImagesSettings(mogrify_info,*images,exception);
string=InterpretImageProperties(mogrify_info,*images,argv[i+1],
exception);
if (string == (char *) NULL)
break;
(void) FormatLocaleFile(stdout,"%s",string);
string=DestroyString(string);
}
if (LocaleCompare("process",option+1) == 0)
{
char
**arguments;
int
j,
number_arguments;
(void) SyncImagesSettings(mogrify_info,*images,exception);
arguments=StringToArgv(argv[i+1],&number_arguments);
if (arguments == (char **) NULL)
break;
if ((argc > 1) && (strchr(arguments[1],'=') != (char *) NULL))
{
char
breaker,
quote,
*token;
const char
*argument;
int
next,
token_status;
size_t
length;
TokenInfo
*token_info;
/*
Support old style syntax, filter="-option arg".
*/
length=strlen(argv[i+1]);
token=(char *) NULL;
if (~length >= (MagickPathExtent-1))
token=(char *) AcquireQuantumMemory(length+MagickPathExtent,
sizeof(*token));
if (token == (char *) NULL)
break;
next=0;
argument=argv[i+1];
token_info=AcquireTokenInfo();
token_status=Tokenizer(token_info,0,token,length,argument,"",
"=","\"",'\0',&breaker,&next,"e);
token_info=DestroyTokenInfo(token_info);
if (token_status == 0)
{
const char
*arg;
arg=(&(argument[next]));
(void) InvokeDynamicImageFilter(token,&(*images),1,&arg,
exception);
}
token=DestroyString(token);
break;
}
(void) SubstituteString(&arguments[1],"-","");
(void) InvokeDynamicImageFilter(arguments[1],&(*images),
number_arguments-2,(const char **) arguments+2,exception);
for (j=0; j < number_arguments; j++)
arguments[j]=DestroyString(arguments[j]);
arguments=(char **) RelinquishMagickMemory(arguments);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("reverse",option+1) == 0)
{
ReverseImageList(images);
break;
}
break;
}
case 's':
{
if (LocaleCompare("smush",option+1) == 0)
{
Image
*smush_image;
ssize_t
offset;
(void) SyncImagesSettings(mogrify_info,*images,exception);
offset=(ssize_t) StringToLong(argv[i+1]);
smush_image=SmushImages(*images,*option == '-' ? MagickTrue :
MagickFalse,offset,exception);
if (smush_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=smush_image;
break;
}
if (LocaleCompare("swap",option+1) == 0)
{
Image
*p,
*q,
*u,
*v;
ssize_t
swap_index;
index=(-1);
swap_index=(-2);
if (*option != '+')
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
swap_index=(-1);
flags=ParseGeometry(argv[i+1],&geometry_info);
index=(ssize_t) geometry_info.rho;
if ((flags & SigmaValue) != 0)
swap_index=(ssize_t) geometry_info.sigma;
}
p=GetImageFromList(*images,index);
q=GetImageFromList(*images,swap_index);
if ((p == (Image *) NULL) || (q == (Image *) NULL))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",(*images)->filename);
status=MagickFalse;
break;
}
if (p == q)
break;
u=CloneImage(p,0,0,MagickTrue,exception);
if (u == (Image *) NULL)
break;
v=CloneImage(q,0,0,MagickTrue,exception);
if (v == (Image *) NULL)
{
u=DestroyImage(u);
break;
}
ReplaceImageInList(&p,v);
ReplaceImageInList(&q,u);
*images=GetFirstImageInList(q);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("write",option+1) == 0)
{
char
key[MagickPathExtent];
Image
*write_images;
ImageInfo
*write_info;
(void) SyncImagesSettings(mogrify_info,*images,exception);
(void) FormatLocaleString(key,MagickPathExtent,"cache:%s",
argv[i+1]);
(void) DeleteImageRegistry(key);
write_images=(*images);
if (*option == '+')
write_images=CloneImageList(*images,exception);
write_info=CloneImageInfo(mogrify_info);
status&=WriteImages(write_info,write_images,argv[i+1],exception);
write_info=DestroyImageInfo(write_info);
if (*option == '+')
write_images=DestroyImageList(write_images);
break;
}
break;
}
default:
break;
}
i+=count;
}
quantize_info=DestroyQuantizeInfo(quantize_info);
mogrify_info=DestroyImageInfo(mogrify_info);
status&=MogrifyImageInfo(image_info,argc,argv,exception);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImages() applies image processing options to a sequence of images as
% prescribed by command line options.
%
% The format of the MogrifyImage method is:
%
% MagickBooleanType MogrifyImages(ImageInfo *image_info,
% const MagickBooleanType post,const int argc,const char **argv,
% Image **images,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o post: If true, post process image list operators otherwise pre-process.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o images: pointer to a pointer of the first image in image list.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MogrifyImages(ImageInfo *image_info,
const MagickBooleanType post,const int argc,const char **argv,
Image **images,ExceptionInfo *exception)
{
#define MogrifyImageTag "Mogrify/Image"
MagickStatusType
status;
MagickBooleanType
proceed;
size_t
n;
register ssize_t
i;
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (images == (Image **) NULL)
return(MogrifyImage(image_info,argc,argv,images,exception));
assert((*images)->previous == (Image *) NULL);
assert((*images)->signature == MagickCoreSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
if ((argc <= 0) || (*argv == (char *) NULL))
return(MagickTrue);
(void) SetImageInfoProgressMonitor(image_info,(MagickProgressMonitor) NULL,
(void *) NULL);
status=MagickTrue;
#if 0
(void) FormatLocaleFile(stderr, "mogrify start %s %d (%s)\n",argv[0],argc,
post?"post":"pre");
#endif
/*
Pre-process multi-image sequence operators
*/
if (post == MagickFalse)
status&=MogrifyImageList(image_info,argc,argv,images,exception);
/*
For each image, process simple single image operators
*/
i=0;
n=GetImageListLength(*images);
for ( ; ; )
{
#if 0
(void) FormatLocaleFile(stderr,"mogrify %ld of %ld\n",(long)
GetImageIndexInList(*images),(long)GetImageListLength(*images));
#endif
status&=MogrifyImage(image_info,argc,argv,images,exception);
proceed=SetImageProgress(*images,MogrifyImageTag,(MagickOffsetType) i, n);
if (proceed == MagickFalse)
break;
if ( (*images)->next == (Image *) NULL )
break;
*images=(*images)->next;
i++;
}
assert( *images != (Image *) NULL );
#if 0
(void) FormatLocaleFile(stderr,"mogrify end %ld of %ld\n",(long)
GetImageIndexInList(*images),(long)GetImageListLength(*images));
#endif
/*
Post-process, multi-image sequence operators
*/
*images=GetFirstImageInList(*images);
if (post != MagickFalse)
status&=MogrifyImageList(image_info,argc,argv,images,exception);
return(status != 0 ? MagickTrue : MagickFalse);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_944_0 |
crossvul-cpp_data_bad_1416_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP DDDD FFFFF %
% P P D D F %
% PPPP D D FFF %
% P D D F %
% P DDDD F %
% %
% %
% Read/Write Portable Document Format %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/delegate.h"
#include "MagickCore/delegate-private.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/resize.h"
#include "MagickCore/signature.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/module.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/module.h"
/*
Define declarations.
*/
#if defined(MAGICKCORE_TIFF_DELEGATE)
#define CCITTParam "-1"
#else
#define CCITTParam "0"
#endif
/*
Forward declarations.
*/
static MagickBooleanType
WritePDFImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v o k e P D F D e l e g a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InvokePDFDelegate() executes the PDF interpreter with the specified command.
%
% The format of the InvokePDFDelegate method is:
%
% MagickBooleanType InvokePDFDelegate(const MagickBooleanType verbose,
% const char *command,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o verbose: A value other than zero displays the command prior to
% executing it.
%
% o command: the address of a character string containing the command to
% execute.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_GS_DELEGATE) || defined(MAGICKCORE_WINDOWS_SUPPORT)
static int MagickDLLCall PDFDelegateMessage(void *handle,const char *message,
int length)
{
char
**messages;
ssize_t
offset;
offset=0;
messages=(char **) handle;
if (*messages == (char *) NULL)
*messages=(char *) AcquireQuantumMemory(length+1,sizeof(char *));
else
{
offset=strlen(*messages);
*messages=(char *) ResizeQuantumMemory(*messages,offset+length+1,
sizeof(char *));
}
if (*messages == (char *) NULL)
return(0);
(void) memcpy(*messages+offset,message,length);
(*messages)[length+offset] ='\0';
return(length);
}
#endif
static MagickBooleanType InvokePDFDelegate(const MagickBooleanType verbose,
const char *command,char *message,ExceptionInfo *exception)
{
int
status;
#define ExecuteGhostscriptCommand(command,status) \
{ \
status=ExternalDelegateCommand(MagickFalse,verbose,command,message, \
exception); \
if (status == 0) \
return(MagickTrue); \
if (status < 0) \
return(MagickFalse); \
(void) ThrowMagickException(exception,GetMagickModule(),DelegateError, \
"FailedToExecuteCommand","`%s' (%d)",command,status); \
return(MagickFalse); \
}
#if defined(MAGICKCORE_GS_DELEGATE) || defined(MAGICKCORE_WINDOWS_SUPPORT)
#define SetArgsStart(command,args_start) \
if (args_start == (const char *) NULL) \
{ \
if (*command != '"') \
args_start=strchr(command,' '); \
else \
{ \
args_start=strchr(command+1,'"'); \
if (args_start != (const char *) NULL) \
args_start++; \
} \
}
char
**argv,
*errors;
const char
*args_start = (const char *) NULL;
const GhostInfo
*ghost_info;
gs_main_instance
*interpreter;
gsapi_revision_t
revision;
int
argc,
code;
register ssize_t
i;
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
ghost_info=NTGhostscriptDLLVectors();
#else
GhostInfo
ghost_info_struct;
ghost_info=(&ghost_info_struct);
(void) memset(&ghost_info_struct,0,sizeof(ghost_info_struct));
ghost_info_struct.delete_instance=(void (*)(gs_main_instance *))
gsapi_delete_instance;
ghost_info_struct.exit=(int (*)(gs_main_instance *)) gsapi_exit;
ghost_info_struct.new_instance=(int (*)(gs_main_instance **,void *))
gsapi_new_instance;
ghost_info_struct.init_with_args=(int (*)(gs_main_instance *,int,char **))
gsapi_init_with_args;
ghost_info_struct.run_string=(int (*)(gs_main_instance *,const char *,int,
int *)) gsapi_run_string;
ghost_info_struct.set_stdio=(int (*)(gs_main_instance *,int (*)(void *,char *,
int),int (*)(void *,const char *,int),int (*)(void *, const char *, int)))
gsapi_set_stdio;
ghost_info_struct.revision=(int (*)(gsapi_revision_t *,int)) gsapi_revision;
#endif
if (ghost_info == (GhostInfo *) NULL)
ExecuteGhostscriptCommand(command,status);
if ((ghost_info->revision)(&revision,sizeof(revision)) != 0)
revision.revision=0;
if (verbose != MagickFalse)
{
(void) fprintf(stdout,"[ghostscript library %.2f]",(double)
revision.revision/100.0);
SetArgsStart(command,args_start);
(void) fputs(args_start,stdout);
}
errors=(char *) NULL;
status=(ghost_info->new_instance)(&interpreter,(void *) &errors);
if (status < 0)
ExecuteGhostscriptCommand(command,status);
code=0;
argv=StringToArgv(command,&argc);
if (argv == (char **) NULL)
{
(ghost_info->delete_instance)(interpreter);
return(MagickFalse);
}
(void) (ghost_info->set_stdio)(interpreter,(int (MagickDLLCall *)(void *,
char *,int)) NULL,PDFDelegateMessage,PDFDelegateMessage);
status=(ghost_info->init_with_args)(interpreter,argc-1,argv+1);
if (status == 0)
status=(ghost_info->run_string)(interpreter,"systemdict /start get exec\n",
0,&code);
(ghost_info->exit)(interpreter);
(ghost_info->delete_instance)(interpreter);
for (i=0; i < (ssize_t) argc; i++)
argv[i]=DestroyString(argv[i]);
argv=(char **) RelinquishMagickMemory(argv);
if (status != 0)
{
SetArgsStart(command,args_start);
if (status == -101) /* quit */
(void) FormatLocaleString(message,MagickPathExtent,
"[ghostscript library %.2f]%s: %s",(double) revision.revision/100.0,
args_start,errors);
else
{
(void) ThrowMagickException(exception,GetMagickModule(),DelegateError,
"PDFDelegateFailed","`[ghostscript library %.2f]%s': %s",(double)
revision.revision/100.0,args_start,errors);
if (errors != (char *) NULL)
errors=DestroyString(errors);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Ghostscript returns status %d, exit code %d",status,code);
return(MagickFalse);
}
}
if (errors != (char *) NULL)
errors=DestroyString(errors);
return(MagickTrue);
#else
ExecuteGhostscriptCommand(command,status);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P D F %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPDF() returns MagickTrue if the image format type, identified by the
% magick string, is PDF.
%
% The format of the IsPDF method is:
%
% MagickBooleanType IsPDF(const unsigned char *magick,const size_t offset)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o offset: Specifies the offset of the magick string.
%
*/
static MagickBooleanType IsPDF(const unsigned char *magick,const size_t offset)
{
if (offset < 5)
return(MagickFalse);
if (LocaleNCompare((const char *) magick,"%PDF-",5) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P D F I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPDFImage() reads a Portable Document Format image file and
% returns it. It allocates the memory necessary for the new Image structure
% and returns a pointer to the new image.
%
% The format of the ReadPDFImage method is:
%
% Image *ReadPDFImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType IsPDFRendered(const char *path)
{
MagickBooleanType
status;
struct stat
attributes;
if ((path == (const char *) NULL) || (*path == '\0'))
return(MagickFalse);
status=GetPathAttributes(path,&attributes);
if ((status != MagickFalse) && S_ISREG(attributes.st_mode) &&
(attributes.st_size > 0))
return(MagickTrue);
return(MagickFalse);
}
static Image *ReadPDFImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define BeginXMPPacket "<?xpacket begin="
#define CMYKProcessColor "CMYKProcessColor"
#define CropBox "CropBox"
#define DefaultCMYK "DefaultCMYK"
#define DeviceCMYK "DeviceCMYK"
#define EndXMPPacket "<?xpacket end="
#define MediaBox "MediaBox"
#define RenderPostscriptText "Rendering Postscript... "
#define PDFRotate "Rotate"
#define SpotColor "Separation"
#define TrimBox "TrimBox"
#define PDFVersion "PDF-"
char
command[MagickPathExtent],
*density,
filename[MagickPathExtent],
geometry[MagickPathExtent],
input_filename[MagickPathExtent],
message[MagickPathExtent],
*options,
postscript_filename[MagickPathExtent];
const char
*option;
const DelegateInfo
*delegate_info;
double
angle;
GeometryInfo
geometry_info;
Image
*image,
*next,
*pdf_image;
ImageInfo
*read_info;
int
c,
file;
MagickBooleanType
cmyk,
cropbox,
fitPage,
status,
stop_on_error,
trimbox;
MagickStatusType
flags;
PointInfo
delta;
RectangleInfo
bounding_box,
page;
register char
*p;
register ssize_t
i;
SegmentInfo
bounds,
hires_bounds;
size_t
scene,
spotcolor;
ssize_t
count;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Open image file.
*/
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
status=AcquireUniqueSymbolicLink(image_info->filename,input_filename);
if (status == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile",
image_info->filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Set the page density.
*/
delta.x=DefaultResolution;
delta.y=DefaultResolution;
if ((image->resolution.x == 0.0) || (image->resolution.y == 0.0))
{
flags=ParseGeometry(PSDensityGeometry,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
if (image_info->density != (char *) NULL)
{
flags=ParseGeometry(image_info->density,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
(void) memset(&page,0,sizeof(page));
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
page.width=(size_t) ceil((double) (page.width*image->resolution.x/delta.x)-
0.5);
page.height=(size_t) ceil((double) (page.height*image->resolution.y/delta.y)-
0.5);
/*
Determine page geometry from the PDF media box.
*/
cmyk=image_info->colorspace == CMYKColorspace ? MagickTrue : MagickFalse;
cropbox=IsStringTrue(GetImageOption(image_info,"pdf:use-cropbox"));
stop_on_error=IsStringTrue(GetImageOption(image_info,"pdf:stop-on-error"));
trimbox=IsStringTrue(GetImageOption(image_info,"pdf:use-trimbox"));
count=0;
spotcolor=0;
(void) memset(&bounding_box,0,sizeof(bounding_box));
(void) memset(&bounds,0,sizeof(bounds));
(void) memset(&hires_bounds,0,sizeof(hires_bounds));
(void) memset(command,0,sizeof(command));
angle=0.0;
p=command;
for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image))
{
/*
Note PDF elements.
*/
if (c == '\n')
c=' ';
*p++=(char) c;
if ((c != (int) '/') && (c != (int) '%') &&
((size_t) (p-command) < (MagickPathExtent-1)))
continue;
*(--p)='\0';
p=command;
if (LocaleNCompare(PDFRotate,command,strlen(PDFRotate)) == 0)
count=(ssize_t) sscanf(command,"Rotate %lf",&angle);
/*
Is this a CMYK document?
*/
if (LocaleNCompare(DefaultCMYK,command,strlen(DefaultCMYK)) == 0)
cmyk=MagickTrue;
if (LocaleNCompare(DeviceCMYK,command,strlen(DeviceCMYK)) == 0)
cmyk=MagickTrue;
if (LocaleNCompare(CMYKProcessColor,command,strlen(CMYKProcessColor)) == 0)
cmyk=MagickTrue;
if (LocaleNCompare(SpotColor,command,strlen(SpotColor)) == 0)
{
char
name[MagickPathExtent],
property[MagickPathExtent],
*value;
/*
Note spot names.
*/
(void) FormatLocaleString(property,MagickPathExtent,
"pdf:SpotColor-%.20g",(double) spotcolor++);
i=0;
for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image))
{
if ((isspace(c) != 0) || (c == '/') || ((i+1) == MagickPathExtent))
break;
name[i++]=(char) c;
}
name[i]='\0';
value=ConstantString(name);
(void) SubstituteString(&value,"#20"," ");
if (*value != '\0')
(void) SetImageProperty(image,property,value,exception);
value=DestroyString(value);
continue;
}
if (LocaleNCompare(PDFVersion,command,strlen(PDFVersion)) == 0)
(void) SetImageProperty(image,"pdf:Version",command,exception);
if (image_info->page != (char *) NULL)
continue;
count=0;
if (cropbox != MagickFalse)
{
if (LocaleNCompare(CropBox,command,strlen(CropBox)) == 0)
{
/*
Note region defined by crop box.
*/
count=(ssize_t) sscanf(command,"CropBox [%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
if (count != 4)
count=(ssize_t) sscanf(command,"CropBox[%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
}
}
else
if (trimbox != MagickFalse)
{
if (LocaleNCompare(TrimBox,command,strlen(TrimBox)) == 0)
{
/*
Note region defined by trim box.
*/
count=(ssize_t) sscanf(command,"TrimBox [%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
if (count != 4)
count=(ssize_t) sscanf(command,"TrimBox[%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
}
}
else
if (LocaleNCompare(MediaBox,command,strlen(MediaBox)) == 0)
{
/*
Note region defined by media box.
*/
count=(ssize_t) sscanf(command,"MediaBox [%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
if (count != 4)
count=(ssize_t) sscanf(command,"MediaBox[%lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
}
if (count != 4)
continue;
if ((fabs(bounds.x2-bounds.x1) <= fabs(hires_bounds.x2-hires_bounds.x1)) ||
(fabs(bounds.y2-bounds.y1) <= fabs(hires_bounds.y2-hires_bounds.y1)))
continue;
hires_bounds=bounds;
}
if ((fabs(hires_bounds.x2-hires_bounds.x1) >= MagickEpsilon) &&
(fabs(hires_bounds.y2-hires_bounds.y1) >= MagickEpsilon))
{
/*
Set PDF render geometry.
*/
(void) FormatLocaleString(geometry,MagickPathExtent,"%gx%g%+.15g%+.15g",
hires_bounds.x2-bounds.x1,hires_bounds.y2-hires_bounds.y1,
hires_bounds.x1,hires_bounds.y1);
(void) SetImageProperty(image,"pdf:HiResBoundingBox",geometry,exception);
page.width=(size_t) ceil((double) ((hires_bounds.x2-hires_bounds.x1)*
image->resolution.x/delta.x)-0.5);
page.height=(size_t) ceil((double) ((hires_bounds.y2-hires_bounds.y1)*
image->resolution.y/delta.y)-0.5);
}
fitPage=MagickFalse;
option=GetImageOption(image_info,"pdf:fit-page");
if (option != (char *) NULL)
{
char
*page_geometry;
page_geometry=GetPageGeometry(option);
flags=ParseMetaGeometry(page_geometry,&page.x,&page.y,&page.width,
&page.height);
page_geometry=DestroyString(page_geometry);
if (flags == NoValue)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidGeometry","`%s'",option);
image=DestroyImage(image);
return((Image *) NULL);
}
page.width=(size_t) ceil((double) (page.width*image->resolution.x/delta.x)
-0.5);
page.height=(size_t) ceil((double) (page.height*image->resolution.y/
delta.y) -0.5);
fitPage=MagickTrue;
}
if ((fabs(angle) == 90.0) || (fabs(angle) == 270.0))
{
size_t
swap;
swap=page.width;
page.width=page.height;
page.height=swap;
}
if (IssRGBCompatibleColorspace(image_info->colorspace) != MagickFalse)
cmyk=MagickFalse;
/*
Create Ghostscript control file.
*/
file=AcquireUniqueFileResource(postscript_filename);
if (file == -1)
{
ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile",
image_info->filename);
image=DestroyImage(image);
return((Image *) NULL);
}
count=write(file," ",1);
file=close(file)-1;
/*
Render Postscript with the Ghostscript delegate.
*/
if (image_info->monochrome != MagickFalse)
delegate_info=GetDelegateInfo("ps:mono",(char *) NULL,exception);
else
if (cmyk != MagickFalse)
delegate_info=GetDelegateInfo("ps:cmyk",(char *) NULL,exception);
else
delegate_info=GetDelegateInfo("ps:alpha",(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) RelinquishUniqueFileResource(postscript_filename);
image=DestroyImage(image);
return((Image *) NULL);
}
density=AcquireString("");
options=AcquireString("");
(void) FormatLocaleString(density,MagickPathExtent,"%gx%g",
image->resolution.x,image->resolution.y);
if ((image_info->page != (char *) NULL) || (fitPage != MagickFalse))
(void) FormatLocaleString(options,MagickPathExtent,"-g%.20gx%.20g ",(double)
page.width,(double) page.height);
if (fitPage != MagickFalse)
(void) ConcatenateMagickString(options,"-dPSFitPage ",MagickPathExtent);
if (cmyk != MagickFalse)
(void) ConcatenateMagickString(options,"-dUseCIEColor ",MagickPathExtent);
if (cropbox != MagickFalse)
(void) ConcatenateMagickString(options,"-dUseCropBox ",MagickPathExtent);
if (stop_on_error != MagickFalse)
(void) ConcatenateMagickString(options,"-dPDFSTOPONERROR ",
MagickPathExtent);
if (trimbox != MagickFalse)
(void) ConcatenateMagickString(options,"-dUseTrimBox ",MagickPathExtent);
option=GetImageOption(image_info,"authenticate");
if (option != (char *) NULL)
{
char
passphrase[MagickPathExtent];
(void) FormatLocaleString(passphrase,MagickPathExtent,
"\"-sPDFPassword=%s\" ",option);
(void) ConcatenateMagickString(options,passphrase,MagickPathExtent);
}
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
if (read_info->number_scenes != 0)
{
char
pages[MagickPathExtent];
(void) FormatLocaleString(pages,MagickPathExtent,"-dFirstPage=%.20g "
"-dLastPage=%.20g",(double) read_info->scene+1,(double)
(read_info->scene+read_info->number_scenes));
(void) ConcatenateMagickString(options,pages,MagickPathExtent);
read_info->number_scenes=0;
if (read_info->scenes != (char *) NULL)
*read_info->scenes='\0';
}
(void) CopyMagickString(filename,read_info->filename,MagickPathExtent);
(void) AcquireUniqueFilename(filename);
(void) RelinquishUniqueFileResource(filename);
(void) ConcatenateMagickString(filename,"%d",MagickPathExtent);
(void) FormatLocaleString(command,MagickPathExtent,
GetDelegateCommands(delegate_info),
read_info->antialias != MagickFalse ? 4 : 1,
read_info->antialias != MagickFalse ? 4 : 1,density,options,filename,
postscript_filename,input_filename);
options=DestroyString(options);
density=DestroyString(density);
*message='\0';
status=InvokePDFDelegate(read_info->verbose,command,message,exception);
(void) RelinquishUniqueFileResource(postscript_filename);
(void) RelinquishUniqueFileResource(input_filename);
pdf_image=(Image *) NULL;
if (status == MagickFalse)
for (i=1; ; i++)
{
(void) InterpretImageFilename(image_info,image,filename,(int) i,
read_info->filename,exception);
if (IsPDFRendered(read_info->filename) == MagickFalse)
break;
(void) RelinquishUniqueFileResource(read_info->filename);
}
else
for (i=1; ; i++)
{
(void) InterpretImageFilename(image_info,image,filename,(int) i,
read_info->filename,exception);
if (IsPDFRendered(read_info->filename) == MagickFalse)
break;
read_info->blob=NULL;
read_info->length=0;
next=ReadImage(read_info,exception);
(void) RelinquishUniqueFileResource(read_info->filename);
if (next == (Image *) NULL)
break;
AppendImageToList(&pdf_image,next);
}
read_info=DestroyImageInfo(read_info);
if (pdf_image == (Image *) NULL)
{
if (*message != '\0')
(void) ThrowMagickException(exception,GetMagickModule(),DelegateError,
"PDFDelegateFailed","`%s'",message);
image=DestroyImage(image);
return((Image *) NULL);
}
if (LocaleCompare(pdf_image->magick,"BMP") == 0)
{
Image
*cmyk_image;
cmyk_image=ConsolidateCMYKImages(pdf_image,exception);
if (cmyk_image != (Image *) NULL)
{
pdf_image=DestroyImageList(pdf_image);
pdf_image=cmyk_image;
}
}
(void) SeekBlob(image,0,SEEK_SET);
for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image))
{
/*
Note document structuring comments.
*/
*p++=(char) c;
if ((strchr("\n\r%",c) == (char *) NULL) &&
((size_t) (p-command) < (MagickPathExtent-1)))
continue;
*p='\0';
p=command;
if (LocaleNCompare(BeginXMPPacket,command,strlen(BeginXMPPacket)) == 0)
{
StringInfo
*profile;
/*
Read XMP profile.
*/
p=command;
profile=StringToStringInfo(command);
for (i=(ssize_t) GetStringInfoLength(profile)-1; c != EOF; i++)
{
SetStringInfoLength(profile,(size_t) (i+1));
c=ReadBlobByte(image);
GetStringInfoDatum(profile)[i]=(unsigned char) c;
*p++=(char) c;
if ((strchr("\n\r%",c) == (char *) NULL) &&
((size_t) (p-command) < (MagickPathExtent-1)))
continue;
*p='\0';
p=command;
if (LocaleNCompare(EndXMPPacket,command,strlen(EndXMPPacket)) == 0)
break;
}
SetStringInfoLength(profile,(size_t) i);
(void) SetImageProfile(image,"xmp",profile,exception);
profile=DestroyStringInfo(profile);
continue;
}
}
(void) CloseBlob(image);
if (image_info->number_scenes != 0)
{
Image
*clone_image;
/*
Add place holder images to meet the subimage specification requirement.
*/
for (i=0; i < (ssize_t) image_info->scene; i++)
{
clone_image=CloneImage(pdf_image,1,1,MagickTrue,exception);
if (clone_image != (Image *) NULL)
PrependImageToList(&pdf_image,clone_image);
}
}
do
{
(void) CopyMagickString(pdf_image->filename,filename,MagickPathExtent);
(void) CopyMagickString(pdf_image->magick,image->magick,MagickPathExtent);
pdf_image->page=page;
(void) CloneImageProfiles(pdf_image,image);
(void) CloneImageProperties(pdf_image,image);
next=SyncNextImageInList(pdf_image);
if (next != (Image *) NULL)
pdf_image=next;
} while (next != (Image *) NULL);
image=DestroyImage(image);
scene=0;
for (next=GetFirstImageInList(pdf_image); next != (Image *) NULL; )
{
next->scene=scene++;
next=GetNextImageInList(next);
}
return(GetFirstImageInList(pdf_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P D F I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPDFImage() adds properties for the PDF image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPDFImage method is:
%
% size_t RegisterPDFImage(void)
%
*/
ModuleExport size_t RegisterPDFImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("PDF","AI","Adobe Illustrator CS2");
entry->decoder=(DecodeImageHandler *) ReadPDFImage;
entry->encoder=(EncodeImageHandler *) WritePDFImage;
entry->flags^=CoderAdjoinFlag;
entry->flags^=CoderBlobSupportFlag;
entry->mime_type=ConstantString("application/pdf");
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PDF","EPDF",
"Encapsulated Portable Document Format");
entry->decoder=(DecodeImageHandler *) ReadPDFImage;
entry->encoder=(EncodeImageHandler *) WritePDFImage;
entry->flags^=CoderAdjoinFlag;
entry->flags^=CoderBlobSupportFlag;
entry->mime_type=ConstantString("application/pdf");
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PDF","PDF","Portable Document Format");
entry->decoder=(DecodeImageHandler *) ReadPDFImage;
entry->encoder=(EncodeImageHandler *) WritePDFImage;
entry->magick=(IsImageFormatHandler *) IsPDF;
entry->flags^=CoderBlobSupportFlag;
entry->mime_type=ConstantString("application/pdf");
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("PDF","PDFA","Portable Document Archive Format");
entry->decoder=(DecodeImageHandler *) ReadPDFImage;
entry->encoder=(EncodeImageHandler *) WritePDFImage;
entry->magick=(IsImageFormatHandler *) IsPDF;
entry->flags^=CoderBlobSupportFlag;
entry->mime_type=ConstantString("application/pdf");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P D F I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPDFImage() removes format registrations made by the
% PDF module from the list of supported formats.
%
% The format of the UnregisterPDFImage method is:
%
% UnregisterPDFImage(void)
%
*/
ModuleExport void UnregisterPDFImage(void)
{
(void) UnregisterMagickInfo("AI");
(void) UnregisterMagickInfo("EPDF");
(void) UnregisterMagickInfo("PDF");
(void) UnregisterMagickInfo("PDFA");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P D F I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePDFImage() writes an image in the Portable Document image
% format.
%
% The format of the WritePDFImage method is:
%
% MagickBooleanType WritePDFImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static char *EscapeParenthesis(const char *source)
{
char
*destination;
register char
*q;
register const char
*p;
size_t
length;
assert(source != (const char *) NULL);
length=0;
for (p=source; *p != '\0'; p++)
{
if ((*p == '\\') || (*p == '(') || (*p == ')'))
{
if (~length < 1)
ThrowFatalException(ResourceLimitFatalError,"UnableToEscapeString");
length++;
}
length++;
}
destination=(char *) NULL;
if (~length >= (MagickPathExtent-1))
destination=(char *) AcquireQuantumMemory(length+MagickPathExtent,
sizeof(*destination));
if (destination == (char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToEscapeString");
*destination='\0';
q=destination;
for (p=source; *p != '\0'; p++)
{
if ((*p == '\\') || (*p == '(') || (*p == ')'))
*q++='\\';
*q++=(*p);
}
*q='\0';
return(destination);
}
static size_t UTF8ToUTF16(const unsigned char *utf8,wchar_t *utf16)
{
register const unsigned char
*p;
if (utf16 != (wchar_t *) NULL)
{
register wchar_t
*q;
wchar_t
c;
/*
Convert UTF-8 to UTF-16.
*/
q=utf16;
for (p=utf8; *p != '\0'; p++)
{
if ((*p & 0x80) == 0)
*q=(*p);
else
if ((*p & 0xE0) == 0xC0)
{
c=(*p);
*q=(c & 0x1F) << 6;
p++;
if ((*p & 0xC0) != 0x80)
return(0);
*q|=(*p & 0x3F);
}
else
if ((*p & 0xF0) == 0xE0)
{
c=(*p);
*q=c << 12;
p++;
if ((*p & 0xC0) != 0x80)
return(0);
c=(*p);
*q|=(c & 0x3F) << 6;
p++;
if ((*p & 0xC0) != 0x80)
return(0);
*q|=(*p & 0x3F);
}
else
return(0);
q++;
}
*q++=(wchar_t) '\0';
return((size_t) (q-utf16));
}
/*
Compute UTF-16 string length.
*/
for (p=utf8; *p != '\0'; p++)
{
if ((*p & 0x80) == 0)
;
else
if ((*p & 0xE0) == 0xC0)
{
p++;
if ((*p & 0xC0) != 0x80)
return(0);
}
else
if ((*p & 0xF0) == 0xE0)
{
p++;
if ((*p & 0xC0) != 0x80)
return(0);
p++;
if ((*p & 0xC0) != 0x80)
return(0);
}
else
return(0);
}
return((size_t) (p-utf8));
}
static wchar_t *ConvertUTF8ToUTF16(const unsigned char *source,size_t *length)
{
wchar_t
*utf16;
*length=UTF8ToUTF16(source,(wchar_t *) NULL);
if (*length == 0)
{
register ssize_t
i;
/*
Not UTF-8, just copy.
*/
*length=strlen((const char *) source);
utf16=(wchar_t *) AcquireQuantumMemory(*length+1,sizeof(*utf16));
if (utf16 == (wchar_t *) NULL)
return((wchar_t *) NULL);
for (i=0; i <= (ssize_t) *length; i++)
utf16[i]=source[i];
return(utf16);
}
utf16=(wchar_t *) AcquireQuantumMemory(*length+1,sizeof(*utf16));
if (utf16 == (wchar_t *) NULL)
return((wchar_t *) NULL);
*length=UTF8ToUTF16(source,utf16);
return(utf16);
}
static MagickBooleanType Huffman2DEncodeImage(const ImageInfo *image_info,
Image *image,Image *inject_image,ExceptionInfo *exception)
{
Image
*group4_image;
ImageInfo
*write_info;
MagickBooleanType
status;
size_t
length;
unsigned char
*group4;
status=MagickTrue;
write_info=CloneImageInfo(image_info);
(void) CopyMagickString(write_info->filename,"GROUP4:",MagickPathExtent);
(void) CopyMagickString(write_info->magick,"GROUP4",MagickPathExtent);
group4_image=CloneImage(inject_image,0,0,MagickTrue,exception);
if (group4_image == (Image *) NULL)
return(MagickFalse);
group4=(unsigned char *) ImageToBlob(write_info,group4_image,&length,
exception);
group4_image=DestroyImage(group4_image);
if (group4 == (unsigned char *) NULL)
return(MagickFalse);
write_info=DestroyImageInfo(write_info);
if (WriteBlob(image,length,group4) != (ssize_t) length)
status=MagickFalse;
group4=(unsigned char *) RelinquishMagickMemory(group4);
return(status);
}
static MagickBooleanType WritePDFImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
#define CFormat "/Filter [ /%s ]\n"
#define ObjectsPerImage 14
#define ThrowPDFException(exception,message) \
{ \
if (xref != (MagickOffsetType *) NULL) \
xref=(MagickOffsetType *) RelinquishMagickMemory(xref); \
ThrowWriterException((exception),(message)); \
}
DisableMSCWarning(4310)
static const char
XMPProfile[]=
{
"<?xpacket begin=\"%s\" id=\"W5M0MpCehiHzreSzNTczkc9d\"?>\n"
"<x:xmpmeta xmlns:x=\"adobe:ns:meta/\" x:xmptk=\"Adobe XMP Core 4.0-c316 44.253921, Sun Oct 01 2006 17:08:23\">\n"
" <rdf:RDF xmlns:rdf=\"http://www.w3.org/1999/02/22-rdf-syntax-ns#\">\n"
" <rdf:Description rdf:about=\"\"\n"
" xmlns:xap=\"http://ns.adobe.com/xap/1.0/\">\n"
" <xap:ModifyDate>%s</xap:ModifyDate>\n"
" <xap:CreateDate>%s</xap:CreateDate>\n"
" <xap:MetadataDate>%s</xap:MetadataDate>\n"
" <xap:CreatorTool>%s</xap:CreatorTool>\n"
" </rdf:Description>\n"
" <rdf:Description rdf:about=\"\"\n"
" xmlns:dc=\"http://purl.org/dc/elements/1.1/\">\n"
" <dc:format>application/pdf</dc:format>\n"
" <dc:title>\n"
" <rdf:Alt>\n"
" <rdf:li xml:lang=\"x-default\">%s</rdf:li>\n"
" </rdf:Alt>\n"
" </dc:title>\n"
" </rdf:Description>\n"
" <rdf:Description rdf:about=\"\"\n"
" xmlns:xapMM=\"http://ns.adobe.com/xap/1.0/mm/\">\n"
" <xapMM:DocumentID>uuid:6ec119d7-7982-4f56-808d-dfe64f5b35cf</xapMM:DocumentID>\n"
" <xapMM:InstanceID>uuid:a79b99b4-6235-447f-9f6c-ec18ef7555cb</xapMM:InstanceID>\n"
" </rdf:Description>\n"
" <rdf:Description rdf:about=\"\"\n"
" xmlns:pdf=\"http://ns.adobe.com/pdf/1.3/\">\n"
" <pdf:Producer>%s</pdf:Producer>\n"
" </rdf:Description>\n"
" <rdf:Description rdf:about=\"\"\n"
" xmlns:pdfaid=\"http://www.aiim.org/pdfa/ns/id/\">\n"
" <pdfaid:part>3</pdfaid:part>\n"
" <pdfaid:conformance>B</pdfaid:conformance>\n"
" </rdf:Description>\n"
" </rdf:RDF>\n"
"</x:xmpmeta>\n"
"<?xpacket end=\"w\"?>\n"
},
XMPProfileMagick[4]= { (char) 0xef, (char) 0xbb, (char) 0xbf, (char) 0x00 };
RestoreMSCWarning
char
basename[MagickPathExtent],
buffer[MagickPathExtent],
*escape,
date[MagickPathExtent],
**labels,
page_geometry[MagickPathExtent],
*url;
CompressionType
compression;
const char
*device,
*option,
*value;
const StringInfo
*profile;
double
pointsize;
GeometryInfo
geometry_info;
Image
*next,
*tile_image;
MagickBooleanType
status;
MagickOffsetType
offset,
scene,
*xref;
MagickSizeType
number_pixels;
MagickStatusType
flags;
PointInfo
delta,
resolution,
scale;
RectangleInfo
geometry,
media_info,
page_info;
register const Quantum
*p;
register unsigned char
*q;
register ssize_t
i,
x;
size_t
channels,
imageListLength,
info_id,
length,
object,
pages_id,
root_id,
text_size,
version;
ssize_t
count,
page_count,
y;
struct tm
local_time;
time_t
seconds;
unsigned char
*pixels;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
/*
Allocate X ref memory.
*/
xref=(MagickOffsetType *) AcquireQuantumMemory(2048UL,sizeof(*xref));
if (xref == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
(void) memset(xref,0,2048UL*sizeof(*xref));
/*
Write Info object.
*/
object=0;
version=3;
if (image_info->compression == JPEG2000Compression)
version=(size_t) MagickMax(version,5);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
if (next->alpha_trait != UndefinedPixelTrait)
version=(size_t) MagickMax(version,4);
if (LocaleCompare(image_info->magick,"PDFA") == 0)
version=(size_t) MagickMax(version,6);
profile=GetImageProfile(image,"icc");
if (profile != (StringInfo *) NULL)
version=(size_t) MagickMax(version,7);
(void) FormatLocaleString(buffer,MagickPathExtent,"%%PDF-1.%.20g \n",(double)
version);
(void) WriteBlobString(image,buffer);
if (LocaleCompare(image_info->magick,"PDFA") == 0)
{
(void) WriteBlobByte(image,'%');
(void) WriteBlobByte(image,0xe2);
(void) WriteBlobByte(image,0xe3);
(void) WriteBlobByte(image,0xcf);
(void) WriteBlobByte(image,0xd3);
(void) WriteBlobByte(image,'\n');
}
/*
Write Catalog object.
*/
xref[object++]=TellBlob(image);
root_id=object;
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"<<\n");
if (LocaleCompare(image_info->magick,"PDFA") != 0)
(void) FormatLocaleString(buffer,MagickPathExtent,"/Pages %.20g 0 R\n",
(double) object+1);
else
{
(void) FormatLocaleString(buffer,MagickPathExtent,"/Metadata %.20g 0 R\n",
(double) object+1);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Pages %.20g 0 R\n",
(double) object+2);
}
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"/Type /Catalog");
option=GetImageOption(image_info,"pdf:page-direction");
if ((option != (const char *) NULL) &&
(LocaleCompare(option,"right-to-left") == 0))
(void) WriteBlobString(image,"/ViewerPreferences<</PageDirection/R2L>>\n");
(void) WriteBlobString(image,"\n");
(void) WriteBlobString(image,">>\n");
(void) WriteBlobString(image,"endobj\n");
GetPathComponent(image->filename,BasePath,basename);
if (LocaleCompare(image_info->magick,"PDFA") == 0)
{
char
create_date[MagickPathExtent],
modify_date[MagickPathExtent],
timestamp[MagickPathExtent],
*url,
xmp_profile[MagickPathExtent];
/*
Write XMP object.
*/
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"<<\n");
(void) WriteBlobString(image,"/Subtype /XML\n");
*modify_date='\0';
value=GetImageProperty(image,"date:modify",exception);
if (value != (const char *) NULL)
(void) CopyMagickString(modify_date,value,MagickPathExtent);
*create_date='\0';
value=GetImageProperty(image,"date:create",exception);
if (value != (const char *) NULL)
(void) CopyMagickString(create_date,value,MagickPathExtent);
(void) FormatMagickTime(time((time_t *) NULL),MagickPathExtent,timestamp);
url=(char *) MagickAuthoritativeURL;
escape=EscapeParenthesis(basename);
i=FormatLocaleString(xmp_profile,MagickPathExtent,XMPProfile,
XMPProfileMagick,modify_date,create_date,timestamp,url,escape,url);
escape=DestroyString(escape);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Length %.20g\n",
(double) i);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"/Type /Metadata\n");
(void) WriteBlobString(image,">>\nstream\n");
(void) WriteBlobString(image,xmp_profile);
(void) WriteBlobString(image,"\nendstream\n");
(void) WriteBlobString(image,"endobj\n");
}
/*
Write Pages object.
*/
xref[object++]=TellBlob(image);
pages_id=object;
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"<<\n");
(void) WriteBlobString(image,"/Type /Pages\n");
(void) FormatLocaleString(buffer,MagickPathExtent,"/Kids [ %.20g 0 R ",
(double) object+1);
(void) WriteBlobString(image,buffer);
count=(ssize_t) (pages_id+ObjectsPerImage+1);
page_count=1;
if (image_info->adjoin != MagickFalse)
{
Image
*kid_image;
/*
Predict page object id's.
*/
kid_image=image;
for ( ; GetNextImageInList(kid_image) != (Image *) NULL; count+=ObjectsPerImage)
{
page_count++;
profile=GetImageProfile(kid_image,"icc");
if (profile != (StringInfo *) NULL)
count+=2;
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 R ",(double)
count);
(void) WriteBlobString(image,buffer);
kid_image=GetNextImageInList(kid_image);
}
xref=(MagickOffsetType *) ResizeQuantumMemory(xref,(size_t) count+2048UL,
sizeof(*xref));
if (xref == (MagickOffsetType *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) WriteBlobString(image,"]\n");
(void) FormatLocaleString(buffer,MagickPathExtent,"/Count %.20g\n",(double)
page_count);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,">>\n");
(void) WriteBlobString(image,"endobj\n");
scene=0;
imageListLength=GetImageListLength(image);
do
{
MagickBooleanType
has_icc_profile;
profile=GetImageProfile(image,"icc");
has_icc_profile=(profile != (StringInfo *) NULL) ? MagickTrue : MagickFalse;
compression=image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
switch (compression)
{
case FaxCompression:
case Group4Compression:
{
if ((SetImageMonochrome(image,exception) == MagickFalse) ||
(image->alpha_trait != UndefinedPixelTrait))
compression=RLECompression;
break;
}
#if !defined(MAGICKCORE_JPEG_DELEGATE)
case JPEGCompression:
{
compression=RLECompression;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn","`%s' (JPEG)",
image->filename);
break;
}
#endif
#if !defined(MAGICKCORE_LIBOPENJP2_DELEGATE)
case JPEG2000Compression:
{
compression=RLECompression;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn","`%s' (JP2)",
image->filename);
break;
}
#endif
#if !defined(MAGICKCORE_ZLIB_DELEGATE)
case ZipCompression:
{
compression=RLECompression;
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn","`%s' (ZLIB)",
image->filename);
break;
}
#endif
case LZWCompression:
{
if (LocaleCompare(image_info->magick,"PDFA") == 0)
compression=RLECompression; /* LZW compression is forbidden */
break;
}
case NoCompression:
{
if (LocaleCompare(image_info->magick,"PDFA") == 0)
compression=RLECompression; /* ASCII 85 compression is forbidden */
break;
}
default:
break;
}
if (compression == JPEG2000Compression)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
/*
Scale relative to dots-per-inch.
*/
delta.x=DefaultResolution;
delta.y=DefaultResolution;
resolution.x=image->resolution.x;
resolution.y=image->resolution.y;
if ((resolution.x == 0.0) || (resolution.y == 0.0))
{
flags=ParseGeometry(PSDensityGeometry,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
resolution.y=resolution.x;
}
if (image_info->density != (char *) NULL)
{
flags=ParseGeometry(image_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
resolution.y=resolution.x;
}
if (image->units == PixelsPerCentimeterResolution)
{
resolution.x=(double) ((size_t) (100.0*2.54*resolution.x+0.5)/100.0);
resolution.y=(double) ((size_t) (100.0*2.54*resolution.y+0.5)/100.0);
}
SetGeometry(image,&geometry);
(void) FormatLocaleString(page_geometry,MagickPathExtent,"%.20gx%.20g",
(double) image->columns,(double) image->rows);
if (image_info->page != (char *) NULL)
(void) CopyMagickString(page_geometry,image_info->page,MagickPathExtent);
else
if ((image->page.width != 0) && (image->page.height != 0))
(void) FormatLocaleString(page_geometry,MagickPathExtent,
"%.20gx%.20g%+.20g%+.20g",(double) image->page.width,(double)
image->page.height,(double) image->page.x,(double) image->page.y);
else
if ((image->gravity != UndefinedGravity) &&
(LocaleCompare(image_info->magick,"PDF") == 0))
(void) CopyMagickString(page_geometry,PSPageGeometry,
MagickPathExtent);
(void) ConcatenateMagickString(page_geometry,">",MagickPathExtent);
(void) ParseMetaGeometry(page_geometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
scale.x=(double) (geometry.width*delta.x)/resolution.x;
geometry.width=(size_t) floor(scale.x+0.5);
scale.y=(double) (geometry.height*delta.y)/resolution.y;
geometry.height=(size_t) floor(scale.y+0.5);
(void) ParseAbsoluteGeometry(page_geometry,&media_info);
(void) ParseGravityGeometry(image,page_geometry,&page_info,exception);
if (image->gravity != UndefinedGravity)
{
geometry.x=(-page_info.x);
geometry.y=(ssize_t) (media_info.height+page_info.y-image->rows);
}
pointsize=12.0;
if (image_info->pointsize != 0.0)
pointsize=image_info->pointsize;
text_size=0;
value=GetImageProperty(image,"label",exception);
if (value != (const char *) NULL)
text_size=(size_t) (MultilineCensus(value)*pointsize+12);
(void) text_size;
/*
Write Page object.
*/
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"<<\n");
(void) WriteBlobString(image,"/Type /Page\n");
(void) FormatLocaleString(buffer,MagickPathExtent,"/Parent %.20g 0 R\n",
(double) pages_id);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"/Resources <<\n");
labels=(char **) NULL;
value=GetImageProperty(image,"label",exception);
if (value != (const char *) NULL)
labels=StringToList(value);
if (labels != (char **) NULL)
{
(void) FormatLocaleString(buffer,MagickPathExtent,
"/Font << /F%.20g %.20g 0 R >>\n",(double) image->scene,(double)
object+4);
(void) WriteBlobString(image,buffer);
}
(void) FormatLocaleString(buffer,MagickPathExtent,
"/XObject << /Im%.20g %.20g 0 R >>\n",(double) image->scene,(double)
object+5);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/ProcSet %.20g 0 R >>\n",
(double) object+3);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,
"/MediaBox [0 0 %g %g]\n",72.0*media_info.width/resolution.x,
72.0*media_info.height/resolution.y);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,
"/CropBox [0 0 %g %g]\n",72.0*media_info.width/resolution.x,
72.0*media_info.height/resolution.y);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Contents %.20g 0 R\n",
(double) object+1);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Thumb %.20g 0 R\n",
(double) object+(has_icc_profile != MagickFalse ? 10 : 8));
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,">>\n");
(void) WriteBlobString(image,"endobj\n");
/*
Write Contents object.
*/
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"<<\n");
(void) FormatLocaleString(buffer,MagickPathExtent,"/Length %.20g 0 R\n",
(double) object+1);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,">>\n");
(void) WriteBlobString(image,"stream\n");
offset=TellBlob(image);
(void) WriteBlobString(image,"q\n");
if (labels != (char **) NULL)
for (i=0; labels[i] != (char *) NULL; i++)
{
(void) WriteBlobString(image,"BT\n");
(void) FormatLocaleString(buffer,MagickPathExtent,"/F%.20g %g Tf\n",
(double) image->scene,pointsize);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g %.20g Td\n",
(double) geometry.x,(double) (geometry.y+geometry.height+i*pointsize+
12));
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"(%s) Tj\n",
labels[i]);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"ET\n");
labels[i]=DestroyString(labels[i]);
}
(void) FormatLocaleString(buffer,MagickPathExtent,
"%g 0 0 %g %.20g %.20g cm\n",scale.x,scale.y,(double) geometry.x,
(double) geometry.y);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Im%.20g Do\n",(double)
image->scene);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"Q\n");
offset=TellBlob(image)-offset;
(void) WriteBlobString(image,"\nendstream\n");
(void) WriteBlobString(image,"endobj\n");
/*
Write Length object.
*/
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g\n",(double)
offset);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"endobj\n");
/*
Write Procset object.
*/
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
if ((image->storage_class == DirectClass) || (image->colors > 256))
(void) CopyMagickString(buffer,"[ /PDF /Text /ImageC",MagickPathExtent);
else
if ((compression == FaxCompression) || (compression == Group4Compression))
(void) CopyMagickString(buffer,"[ /PDF /Text /ImageB",MagickPathExtent);
else
(void) CopyMagickString(buffer,"[ /PDF /Text /ImageI",MagickPathExtent);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image," ]\n");
(void) WriteBlobString(image,"endobj\n");
/*
Write Font object.
*/
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"<<\n");
if (labels != (char **) NULL)
{
(void) WriteBlobString(image,"/Type /Font\n");
(void) WriteBlobString(image,"/Subtype /Type1\n");
(void) FormatLocaleString(buffer,MagickPathExtent,"/Name /F%.20g\n",
(double) image->scene);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"/BaseFont /Helvetica\n");
(void) WriteBlobString(image,"/Encoding /MacRomanEncoding\n");
labels=(char **) RelinquishMagickMemory(labels);
}
(void) WriteBlobString(image,">>\n");
(void) WriteBlobString(image,"endobj\n");
/*
Write XObject object.
*/
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"<<\n");
(void) WriteBlobString(image,"/Type /XObject\n");
(void) WriteBlobString(image,"/Subtype /Image\n");
(void) FormatLocaleString(buffer,MagickPathExtent,"/Name /Im%.20g\n",
(double) image->scene);
(void) WriteBlobString(image,buffer);
switch (compression)
{
case NoCompression:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,
"ASCII85Decode");
break;
}
case JPEGCompression:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,"DCTDecode");
if (image->colorspace != CMYKColorspace)
break;
(void) WriteBlobString(image,buffer);
(void) CopyMagickString(buffer,"/Decode [1 0 1 0 1 0 1 0]\n",
MagickPathExtent);
break;
}
case JPEG2000Compression:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,"JPXDecode");
if (image->colorspace != CMYKColorspace)
break;
(void) WriteBlobString(image,buffer);
(void) CopyMagickString(buffer,"/Decode [1 0 1 0 1 0 1 0]\n",
MagickPathExtent);
break;
}
case LZWCompression:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,"LZWDecode");
break;
}
case ZipCompression:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,
"FlateDecode");
break;
}
case FaxCompression:
case Group4Compression:
{
(void) CopyMagickString(buffer,"/Filter [ /CCITTFaxDecode ]\n",
MagickPathExtent);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/DecodeParms [ << "
"/K %s /BlackIs1 false /Columns %.20g /Rows %.20g >> ]\n",CCITTParam,
(double) image->columns,(double) image->rows);
break;
}
default:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,
"RunLengthDecode");
break;
}
}
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Width %.20g\n",(double)
image->columns);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Height %.20g\n",(double)
image->rows);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/ColorSpace %.20g 0 R\n",
(double) object+2);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/BitsPerComponent %d\n",
(compression == FaxCompression) || (compression == Group4Compression) ?
1 : 8);
(void) WriteBlobString(image,buffer);
if (image->alpha_trait != UndefinedPixelTrait)
{
(void) FormatLocaleString(buffer,MagickPathExtent,"/SMask %.20g 0 R\n",
(double) object+(has_icc_profile != MagickFalse ? 9 : 7));
(void) WriteBlobString(image,buffer);
}
(void) FormatLocaleString(buffer,MagickPathExtent,"/Length %.20g 0 R\n",
(double) object+1);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,">>\n");
(void) WriteBlobString(image,"stream\n");
offset=TellBlob(image);
number_pixels=(MagickSizeType) image->columns*image->rows;
if ((4*number_pixels) != (MagickSizeType) ((size_t) (4*number_pixels)))
ThrowPDFException(ResourceLimitError,"MemoryAllocationFailed");
if ((compression == FaxCompression) || (compression == Group4Compression) ||
((image_info->type != TrueColorType) &&
(SetImageGray(image,exception) != MagickFalse)))
{
switch (compression)
{
case FaxCompression:
case Group4Compression:
{
if (LocaleCompare(CCITTParam,"0") == 0)
{
(void) HuffmanEncodeImage(image_info,image,image,exception);
break;
}
(void) Huffman2DEncodeImage(image_info,image,image,exception);
break;
}
case JPEGCompression:
{
status=InjectImageBlob(image_info,image,image,"jpeg",exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(MagickFalse);
}
break;
}
case JPEG2000Compression:
{
status=InjectImageBlob(image_info,image,image,"jp2",exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(MagickFalse);
}
break;
}
case RLECompression:
default:
{
MemoryInfo
*pixel_info;
/*
Allocate pixel array.
*/
length=(size_t) number_pixels;
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
ThrowPDFException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
/*
Dump Runlength encoded pixels.
*/
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=ScaleQuantumToChar(ClampToQuantum(GetPixelLuma(image,p)));
p+=GetPixelChannels(image);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (compression == ZipCompression)
status=ZLIBEncodeImage(image,length,pixels,exception);
else
#endif
if (compression == LZWCompression)
status=LZWEncodeImage(image,length,pixels,exception);
else
status=PackbitsEncodeImage(image,length,pixels,exception);
pixel_info=RelinquishVirtualMemory(pixel_info);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(MagickFalse);
}
break;
}
case NoCompression:
{
/*
Dump uncompressed PseudoColor packets.
*/
Ascii85Initialize(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
Ascii85Encode(image,ScaleQuantumToChar(ClampToQuantum(
GetPixelLuma(image,p))));
p+=GetPixelChannels(image);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
Ascii85Flush(image);
break;
}
}
}
else
if ((image->storage_class == DirectClass) || (image->colors > 256) ||
(compression == JPEGCompression) ||
(compression == JPEG2000Compression))
switch (compression)
{
case JPEGCompression:
{
status=InjectImageBlob(image_info,image,image,"jpeg",exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(MagickFalse);
}
break;
}
case JPEG2000Compression:
{
status=InjectImageBlob(image_info,image,image,"jp2",exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(MagickFalse);
}
break;
}
case RLECompression:
default:
{
MemoryInfo
*pixel_info;
/*
Allocate pixel array.
*/
length=(size_t) number_pixels;
length*=image->colorspace == CMYKColorspace ? 4UL : 3UL;
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
{
xref=(MagickOffsetType *) RelinquishMagickMemory(xref);
ThrowPDFException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
/*
Dump runoffset encoded pixels.
*/
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
if (image->colorspace == CMYKColorspace)
*q++=ScaleQuantumToChar(GetPixelBlack(image,p));
p+=GetPixelChannels(image);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (compression == ZipCompression)
status=ZLIBEncodeImage(image,length,pixels,exception);
else
#endif
if (compression == LZWCompression)
status=LZWEncodeImage(image,length,pixels,exception);
else
status=PackbitsEncodeImage(image,length,pixels,exception);
pixel_info=RelinquishVirtualMemory(pixel_info);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(MagickFalse);
}
break;
}
case NoCompression:
{
/*
Dump uncompressed DirectColor packets.
*/
Ascii85Initialize(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
Ascii85Encode(image,ScaleQuantumToChar(GetPixelRed(image,p)));
Ascii85Encode(image,ScaleQuantumToChar(GetPixelGreen(image,p)));
Ascii85Encode(image,ScaleQuantumToChar(GetPixelBlue(image,p)));
if (image->colorspace == CMYKColorspace)
Ascii85Encode(image,ScaleQuantumToChar(
GetPixelBlack(image,p)));
p+=GetPixelChannels(image);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
Ascii85Flush(image);
break;
}
}
else
{
/*
Dump number of colors and colormap.
*/
switch (compression)
{
case RLECompression:
default:
{
MemoryInfo
*pixel_info;
/*
Allocate pixel array.
*/
length=(size_t) number_pixels;
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
{
xref=(MagickOffsetType *) RelinquishMagickMemory(xref);
ThrowPDFException(ResourceLimitError,
"MemoryAllocationFailed");
}
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
/*
Dump Runlength encoded pixels.
*/
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(unsigned char) GetPixelIndex(image,p);
p+=GetPixelChannels(image);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,
(MagickOffsetType) y,image->rows);
if (status == MagickFalse)
break;
}
}
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (compression == ZipCompression)
status=ZLIBEncodeImage(image,length,pixels,exception);
else
#endif
if (compression == LZWCompression)
status=LZWEncodeImage(image,length,pixels,exception);
else
status=PackbitsEncodeImage(image,length,pixels,exception);
pixel_info=RelinquishVirtualMemory(pixel_info);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(MagickFalse);
}
break;
}
case NoCompression:
{
/*
Dump uncompressed PseudoColor packets.
*/
Ascii85Initialize(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
Ascii85Encode(image,(unsigned char) GetPixelIndex(image,p));
p+=GetPixelChannels(image);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,
(MagickOffsetType) y,image->rows);
if (status == MagickFalse)
break;
}
}
Ascii85Flush(image);
break;
}
}
}
offset=TellBlob(image)-offset;
(void) WriteBlobString(image,"\nendstream\n");
(void) WriteBlobString(image,"endobj\n");
/*
Write Length object.
*/
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g\n",(double)
offset);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"endobj\n");
/*
Write Colorspace object.
*/
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
device="DeviceRGB";
channels=0;
if (image->colorspace == CMYKColorspace)
{
device="DeviceCMYK";
channels=4;
}
else
if ((compression == FaxCompression) ||
(compression == Group4Compression) ||
((image_info->type != TrueColorType) &&
(SetImageGray(image,exception) != MagickFalse)))
{
device="DeviceGray";
channels=1;
}
else
if ((image->storage_class == DirectClass) ||
(image->colors > 256) || (compression == JPEGCompression) ||
(compression == JPEG2000Compression))
{
device="DeviceRGB";
channels=3;
}
profile=GetImageProfile(image,"icc");
if ((profile == (StringInfo *) NULL) || (channels == 0))
{
if (channels != 0)
(void) FormatLocaleString(buffer,MagickPathExtent,"/%s\n",device);
else
(void) FormatLocaleString(buffer,MagickPathExtent,
"[ /Indexed /%s %.20g %.20g 0 R ]\n",device,(double) image->colors-
1,(double) object+3);
(void) WriteBlobString(image,buffer);
}
else
{
const unsigned char
*p;
/*
Write ICC profile.
*/
(void) FormatLocaleString(buffer,MagickPathExtent,
"[/ICCBased %.20g 0 R]\n",(double) object+1);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"endobj\n");
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",
(double) object);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"<<\n/N %.20g\n"
"/Filter /ASCII85Decode\n/Length %.20g 0 R\n/Alternate /%s\n>>\n"
"stream\n",(double) channels,(double) object+1,device);
(void) WriteBlobString(image,buffer);
offset=TellBlob(image);
Ascii85Initialize(image);
p=GetStringInfoDatum(profile);
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++)
Ascii85Encode(image,(unsigned char) *p++);
Ascii85Flush(image);
offset=TellBlob(image)-offset;
(void) WriteBlobString(image,"endstream\n");
(void) WriteBlobString(image,"endobj\n");
/*
Write Length object.
*/
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",
(double) object);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g\n",(double)
offset);
(void) WriteBlobString(image,buffer);
}
(void) WriteBlobString(image,"endobj\n");
/*
Write Thumb object.
*/
SetGeometry(image,&geometry);
(void) ParseMetaGeometry("106x106+0+0>",&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
tile_image=ThumbnailImage(image,geometry.width,geometry.height,exception);
if (tile_image == (Image *) NULL)
return(MagickFalse);
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"<<\n");
switch (compression)
{
case NoCompression:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,
"ASCII85Decode");
break;
}
case JPEGCompression:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,"DCTDecode");
if (image->colorspace != CMYKColorspace)
break;
(void) WriteBlobString(image,buffer);
(void) CopyMagickString(buffer,"/Decode [1 0 1 0 1 0 1 0]\n",
MagickPathExtent);
break;
}
case JPEG2000Compression:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,"JPXDecode");
if (image->colorspace != CMYKColorspace)
break;
(void) WriteBlobString(image,buffer);
(void) CopyMagickString(buffer,"/Decode [1 0 1 0 1 0 1 0]\n",
MagickPathExtent);
break;
}
case LZWCompression:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,"LZWDecode");
break;
}
case ZipCompression:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,
"FlateDecode");
break;
}
case FaxCompression:
case Group4Compression:
{
(void) CopyMagickString(buffer,"/Filter [ /CCITTFaxDecode ]\n",
MagickPathExtent);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/DecodeParms [ << "
"/K %s /BlackIs1 false /Columns %.20g /Rows %.20g >> ]\n",CCITTParam,
(double) tile_image->columns,(double) tile_image->rows);
break;
}
default:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,
"RunLengthDecode");
break;
}
}
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Width %.20g\n",(double)
tile_image->columns);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Height %.20g\n",(double)
tile_image->rows);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/ColorSpace %.20g 0 R\n",
(double) object-(has_icc_profile != MagickFalse ? 3 : 1));
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/BitsPerComponent %d\n",
(compression == FaxCompression) || (compression == Group4Compression) ?
1 : 8);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Length %.20g 0 R\n",
(double) object+1);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,">>\n");
(void) WriteBlobString(image,"stream\n");
offset=TellBlob(image);
number_pixels=(MagickSizeType) tile_image->columns*tile_image->rows;
if ((compression == FaxCompression) ||
(compression == Group4Compression) ||
((image_info->type != TrueColorType) &&
(SetImageGray(tile_image,exception) != MagickFalse)))
{
switch (compression)
{
case FaxCompression:
case Group4Compression:
{
if (LocaleCompare(CCITTParam,"0") == 0)
{
(void) HuffmanEncodeImage(image_info,image,tile_image,
exception);
break;
}
(void) Huffman2DEncodeImage(image_info,image,tile_image,exception);
break;
}
case JPEGCompression:
{
status=InjectImageBlob(image_info,image,tile_image,"jpeg",
exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(MagickFalse);
}
break;
}
case JPEG2000Compression:
{
status=InjectImageBlob(image_info,image,tile_image,"jp2",exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(MagickFalse);
}
break;
}
case RLECompression:
default:
{
MemoryInfo
*pixel_info;
/*
Allocate pixel array.
*/
length=(size_t) number_pixels;
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
{
tile_image=DestroyImage(tile_image);
ThrowPDFException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
/*
Dump runlength encoded pixels.
*/
q=pixels;
for (y=0; y < (ssize_t) tile_image->rows; y++)
{
p=GetVirtualPixels(tile_image,0,y,tile_image->columns,1,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) tile_image->columns; x++)
{
*q++=ScaleQuantumToChar(ClampToQuantum(GetPixelLuma(
tile_image,p)));
p+=GetPixelChannels(tile_image);
}
}
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (compression == ZipCompression)
status=ZLIBEncodeImage(image,length,pixels,exception);
else
#endif
if (compression == LZWCompression)
status=LZWEncodeImage(image,length,pixels,exception);
else
status=PackbitsEncodeImage(image,length,pixels,exception);
pixel_info=RelinquishVirtualMemory(pixel_info);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(MagickFalse);
}
break;
}
case NoCompression:
{
/*
Dump uncompressed PseudoColor packets.
*/
Ascii85Initialize(image);
for (y=0; y < (ssize_t) tile_image->rows; y++)
{
p=GetVirtualPixels(tile_image,0,y,tile_image->columns,1,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) tile_image->columns; x++)
{
Ascii85Encode(image,ScaleQuantumToChar(ClampToQuantum(
GetPixelLuma(tile_image,p))));
p+=GetPixelChannels(tile_image);
}
}
Ascii85Flush(image);
break;
}
}
}
else
if ((tile_image->storage_class == DirectClass) ||
(tile_image->colors > 256) || (compression == JPEGCompression) ||
(compression == JPEG2000Compression))
switch (compression)
{
case JPEGCompression:
{
status=InjectImageBlob(image_info,image,tile_image,"jpeg",
exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(MagickFalse);
}
break;
}
case JPEG2000Compression:
{
status=InjectImageBlob(image_info,image,tile_image,"jp2",exception);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(MagickFalse);
}
break;
}
case RLECompression:
default:
{
MemoryInfo
*pixel_info;
/*
Allocate pixel array.
*/
length=(size_t) number_pixels;
length*=tile_image->colorspace == CMYKColorspace ? 4UL : 3UL;
pixel_info=AcquireVirtualMemory(length,4*sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
{
tile_image=DestroyImage(tile_image);
ThrowPDFException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
/*
Dump runlength encoded pixels.
*/
q=pixels;
for (y=0; y < (ssize_t) tile_image->rows; y++)
{
p=GetVirtualPixels(tile_image,0,y,tile_image->columns,1,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) tile_image->columns; x++)
{
*q++=ScaleQuantumToChar(GetPixelRed(tile_image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(tile_image,p));
*q++=ScaleQuantumToChar(GetPixelBlue(tile_image,p));
if (tile_image->colorspace == CMYKColorspace)
*q++=ScaleQuantumToChar(GetPixelBlack(tile_image,p));
p+=GetPixelChannels(tile_image);
}
}
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (compression == ZipCompression)
status=ZLIBEncodeImage(image,length,pixels,exception);
else
#endif
if (compression == LZWCompression)
status=LZWEncodeImage(image,length,pixels,exception);
else
status=PackbitsEncodeImage(image,length,pixels,exception);
pixel_info=RelinquishVirtualMemory(pixel_info);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(MagickFalse);
}
break;
}
case NoCompression:
{
/*
Dump uncompressed DirectColor packets.
*/
Ascii85Initialize(image);
for (y=0; y < (ssize_t) tile_image->rows; y++)
{
p=GetVirtualPixels(tile_image,0,y,tile_image->columns,1,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) tile_image->columns; x++)
{
Ascii85Encode(image,ScaleQuantumToChar(
GetPixelRed(tile_image,p)));
Ascii85Encode(image,ScaleQuantumToChar(
GetPixelGreen(tile_image,p)));
Ascii85Encode(image,ScaleQuantumToChar(
GetPixelBlue(tile_image,p)));
if (image->colorspace == CMYKColorspace)
Ascii85Encode(image,ScaleQuantumToChar(
GetPixelBlack(tile_image,p)));
p+=GetPixelChannels(tile_image);
}
}
Ascii85Flush(image);
break;
}
}
else
{
/*
Dump number of colors and colormap.
*/
switch (compression)
{
case RLECompression:
default:
{
MemoryInfo
*pixel_info;
/*
Allocate pixel array.
*/
length=(size_t) number_pixels;
pixel_info=AcquireVirtualMemory(length,sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
{
tile_image=DestroyImage(tile_image);
ThrowPDFException(ResourceLimitError,
"MemoryAllocationFailed");
}
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
/*
Dump runlength encoded pixels.
*/
q=pixels;
for (y=0; y < (ssize_t) tile_image->rows; y++)
{
p=GetVirtualPixels(tile_image,0,y,tile_image->columns,1,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) tile_image->columns; x++)
{
*q++=(unsigned char) GetPixelIndex(tile_image,p);
p+=GetPixelChannels(tile_image);
}
}
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (compression == ZipCompression)
status=ZLIBEncodeImage(image,length,pixels,exception);
else
#endif
if (compression == LZWCompression)
status=LZWEncodeImage(image,length,pixels,exception);
else
status=PackbitsEncodeImage(image,length,pixels,exception);
pixel_info=RelinquishVirtualMemory(pixel_info);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(MagickFalse);
}
break;
}
case NoCompression:
{
/*
Dump uncompressed PseudoColor packets.
*/
Ascii85Initialize(image);
for (y=0; y < (ssize_t) tile_image->rows; y++)
{
p=GetVirtualPixels(tile_image,0,y,tile_image->columns,1,
exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) tile_image->columns; x++)
{
Ascii85Encode(image,(unsigned char)
GetPixelIndex(tile_image,p));
p+=GetPixelChannels(image);
}
}
Ascii85Flush(image);
break;
}
}
}
tile_image=DestroyImage(tile_image);
offset=TellBlob(image)-offset;
(void) WriteBlobString(image,"\nendstream\n");
(void) WriteBlobString(image,"endobj\n");
/*
Write Length object.
*/
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g\n",(double)
offset);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"endobj\n");
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"<<\n");
if ((image->storage_class == DirectClass) || (image->colors > 256) ||
(compression == FaxCompression) || (compression == Group4Compression))
(void) WriteBlobString(image,">>\n");
else
{
/*
Write Colormap object.
*/
if (compression == NoCompression)
(void) WriteBlobString(image,"/Filter [ /ASCII85Decode ]\n");
(void) FormatLocaleString(buffer,MagickPathExtent,"/Length %.20g 0 R\n",
(double) object+1);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,">>\n");
(void) WriteBlobString(image,"stream\n");
offset=TellBlob(image);
if (compression == NoCompression)
Ascii85Initialize(image);
for (i=0; i < (ssize_t) image->colors; i++)
{
if (compression == NoCompression)
{
Ascii85Encode(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].red)));
Ascii85Encode(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].green)));
Ascii85Encode(image,ScaleQuantumToChar(ClampToQuantum(
image->colormap[i].blue)));
continue;
}
(void) WriteBlobByte(image,ScaleQuantumToChar(
ClampToQuantum(image->colormap[i].red)));
(void) WriteBlobByte(image,ScaleQuantumToChar(
ClampToQuantum(image->colormap[i].green)));
(void) WriteBlobByte(image,ScaleQuantumToChar(
ClampToQuantum(image->colormap[i].blue)));
}
if (compression == NoCompression)
Ascii85Flush(image);
offset=TellBlob(image)-offset;
(void) WriteBlobString(image,"\nendstream\n");
}
(void) WriteBlobString(image,"endobj\n");
/*
Write Length object.
*/
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g\n",(double)
offset);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"endobj\n");
/*
Write softmask object.
*/
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"<<\n");
if (image->alpha_trait == UndefinedPixelTrait)
(void) WriteBlobString(image,">>\n");
else
{
(void) WriteBlobString(image,"/Type /XObject\n");
(void) WriteBlobString(image,"/Subtype /Image\n");
(void) FormatLocaleString(buffer,MagickPathExtent,"/Name /Ma%.20g\n",
(double) image->scene);
(void) WriteBlobString(image,buffer);
switch (compression)
{
case NoCompression:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,
"ASCII85Decode");
break;
}
case LZWCompression:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,
"LZWDecode");
break;
}
case ZipCompression:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,
"FlateDecode");
break;
}
default:
{
(void) FormatLocaleString(buffer,MagickPathExtent,CFormat,
"RunLengthDecode");
break;
}
}
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Width %.20g\n",
(double) image->columns);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Height %.20g\n",
(double) image->rows);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"/ColorSpace /DeviceGray\n");
(void) FormatLocaleString(buffer,MagickPathExtent,
"/BitsPerComponent %d\n",(compression == FaxCompression) ||
(compression == Group4Compression) ? 1 : 8);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Length %.20g 0 R\n",
(double) object+1);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,">>\n");
(void) WriteBlobString(image,"stream\n");
offset=TellBlob(image);
number_pixels=(MagickSizeType) image->columns*image->rows;
switch (compression)
{
case RLECompression:
default:
{
MemoryInfo
*pixel_info;
/*
Allocate pixel array.
*/
length=(size_t) number_pixels;
pixel_info=AcquireVirtualMemory(length,4*sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
{
image=DestroyImage(image);
ThrowPDFException(ResourceLimitError,"MemoryAllocationFailed");
}
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
/*
Dump Runlength encoded pixels.
*/
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=ScaleQuantumToChar(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
}
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (compression == ZipCompression)
status=ZLIBEncodeImage(image,length,pixels,exception);
else
#endif
if (compression == LZWCompression)
status=LZWEncodeImage(image,length,pixels,exception);
else
status=PackbitsEncodeImage(image,length,pixels,exception);
pixel_info=RelinquishVirtualMemory(pixel_info);
if (status == MagickFalse)
{
(void) CloseBlob(image);
return(MagickFalse);
}
break;
}
case NoCompression:
{
/*
Dump uncompressed PseudoColor packets.
*/
Ascii85Initialize(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
Ascii85Encode(image,ScaleQuantumToChar(GetPixelAlpha(image,p)));
p+=GetPixelChannels(image);
}
}
Ascii85Flush(image);
break;
}
}
offset=TellBlob(image)-offset;
(void) WriteBlobString(image,"\nendstream\n");
}
(void) WriteBlobString(image,"endobj\n");
/*
Write Length object.
*/
xref[object++]=TellBlob(image);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g\n",(double)
offset);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"endobj\n");
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength);
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
/*
Write Metadata object.
*/
xref[object++]=TellBlob(image);
info_id=object;
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g 0 obj\n",(double)
object);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"<<\n");
if (LocaleCompare(image_info->magick,"PDFA") == 0)
(void) FormatLocaleString(buffer,MagickPathExtent,"/Title (%s)\n",
EscapeParenthesis(basename));
else
{
wchar_t
*utf16;
utf16=ConvertUTF8ToUTF16((unsigned char *) basename,&length);
if (utf16 != (wchar_t *) NULL)
{
(void) FormatLocaleString(buffer,MagickPathExtent,"/Title (\xfe\xff");
(void) WriteBlobString(image,buffer);
for (i=0; i < (ssize_t) length; i++)
(void) WriteBlobMSBShort(image,(unsigned short) utf16[i]);
(void) FormatLocaleString(buffer,MagickPathExtent,")\n");
utf16=(wchar_t *) RelinquishMagickMemory(utf16);
}
}
(void) WriteBlobString(image,buffer);
seconds=time((time_t *) NULL);
#if defined(MAGICKCORE_HAVE_LOCALTIME_R)
(void) localtime_r(&seconds,&local_time);
#else
(void) memcpy(&local_time,localtime(&seconds),sizeof(local_time));
#endif
(void) FormatLocaleString(date,MagickPathExtent,"D:%04d%02d%02d%02d%02d%02d",
local_time.tm_year+1900,local_time.tm_mon+1,local_time.tm_mday,
local_time.tm_hour,local_time.tm_min,local_time.tm_sec);
(void) FormatLocaleString(buffer,MagickPathExtent,"/CreationDate (%s)\n",
date);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/ModDate (%s)\n",date);
(void) WriteBlobString(image,buffer);
url=(char *) MagickAuthoritativeURL;
escape=EscapeParenthesis(url);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Producer (%s)\n",escape);
escape=DestroyString(escape);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,">>\n");
(void) WriteBlobString(image,"endobj\n");
/*
Write Xref object.
*/
offset=TellBlob(image)-xref[0]+
(LocaleCompare(image_info->magick,"PDFA") == 0 ? 6 : 0)+10;
(void) WriteBlobString(image,"xref\n");
(void) FormatLocaleString(buffer,MagickPathExtent,"0 %.20g\n",(double)
object+1);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"0000000000 65535 f \n");
for (i=0; i < (ssize_t) object; i++)
{
(void) FormatLocaleString(buffer,MagickPathExtent,"%010lu 00000 n \n",
(unsigned long) xref[i]);
(void) WriteBlobString(image,buffer);
}
(void) WriteBlobString(image,"trailer\n");
(void) WriteBlobString(image,"<<\n");
(void) FormatLocaleString(buffer,MagickPathExtent,"/Size %.20g\n",(double)
object+1);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Info %.20g 0 R\n",(double)
info_id);
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MagickPathExtent,"/Root %.20g 0 R\n",(double)
root_id);
(void) WriteBlobString(image,buffer);
(void) SignatureImage(image,exception);
(void) FormatLocaleString(buffer,MagickPathExtent,"/ID [<%s> <%s>]\n",
GetImageProperty(image,"signature",exception),
GetImageProperty(image,"signature",exception));
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,">>\n");
(void) WriteBlobString(image,"startxref\n");
(void) FormatLocaleString(buffer,MagickPathExtent,"%.20g\n",(double) offset);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"%%EOF\n");
xref=(MagickOffsetType *) RelinquishMagickMemory(xref);
(void) CloseBlob(image);
return(MagickTrue);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_1416_0 |
crossvul-cpp_data_good_1845_0 | /*
*
* Copyright (C) 2011 Novell Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/file.h>
#include <linux/splice.h>
#include <linux/xattr.h>
#include <linux/security.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/namei.h>
#include "overlayfs.h"
#define OVL_COPY_UP_CHUNK_SIZE (1 << 20)
int ovl_copy_xattr(struct dentry *old, struct dentry *new)
{
ssize_t list_size, size;
char *buf, *name, *value;
int error;
if (!old->d_inode->i_op->getxattr ||
!new->d_inode->i_op->getxattr)
return 0;
list_size = vfs_listxattr(old, NULL, 0);
if (list_size <= 0) {
if (list_size == -EOPNOTSUPP)
return 0;
return list_size;
}
buf = kzalloc(list_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
error = -ENOMEM;
value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL);
if (!value)
goto out;
list_size = vfs_listxattr(old, buf, list_size);
if (list_size <= 0) {
error = list_size;
goto out_free_value;
}
for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX);
if (size <= 0) {
error = size;
goto out_free_value;
}
error = vfs_setxattr(new, name, value, size, 0);
if (error)
goto out_free_value;
}
out_free_value:
kfree(value);
out:
kfree(buf);
return error;
}
static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
{
struct file *old_file;
struct file *new_file;
loff_t old_pos = 0;
loff_t new_pos = 0;
int error = 0;
if (len == 0)
return 0;
old_file = ovl_path_open(old, O_LARGEFILE | O_RDONLY);
if (IS_ERR(old_file))
return PTR_ERR(old_file);
new_file = ovl_path_open(new, O_LARGEFILE | O_WRONLY);
if (IS_ERR(new_file)) {
error = PTR_ERR(new_file);
goto out_fput;
}
/* FIXME: copy up sparse files efficiently */
while (len) {
size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
long bytes;
if (len < this_len)
this_len = len;
if (signal_pending_state(TASK_KILLABLE, current)) {
error = -EINTR;
break;
}
bytes = do_splice_direct(old_file, &old_pos,
new_file, &new_pos,
this_len, SPLICE_F_MOVE);
if (bytes <= 0) {
error = bytes;
break;
}
WARN_ON(old_pos != new_pos);
len -= bytes;
}
fput(new_file);
out_fput:
fput(old_file);
return error;
}
static char *ovl_read_symlink(struct dentry *realdentry)
{
int res;
char *buf;
struct inode *inode = realdentry->d_inode;
mm_segment_t old_fs;
res = -EINVAL;
if (!inode->i_op->readlink)
goto err;
res = -ENOMEM;
buf = (char *) __get_free_page(GFP_KERNEL);
if (!buf)
goto err;
old_fs = get_fs();
set_fs(get_ds());
/* The cast to a user pointer is valid due to the set_fs() */
res = inode->i_op->readlink(realdentry,
(char __user *)buf, PAGE_SIZE - 1);
set_fs(old_fs);
if (res < 0) {
free_page((unsigned long) buf);
goto err;
}
buf[res] = '\0';
return buf;
err:
return ERR_PTR(res);
}
static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
{
struct iattr attr = {
.ia_valid =
ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET,
.ia_atime = stat->atime,
.ia_mtime = stat->mtime,
};
return notify_change(upperdentry, &attr, NULL);
}
int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
{
int err = 0;
if (!S_ISLNK(stat->mode)) {
struct iattr attr = {
.ia_valid = ATTR_MODE,
.ia_mode = stat->mode,
};
err = notify_change(upperdentry, &attr, NULL);
}
if (!err) {
struct iattr attr = {
.ia_valid = ATTR_UID | ATTR_GID,
.ia_uid = stat->uid,
.ia_gid = stat->gid,
};
err = notify_change(upperdentry, &attr, NULL);
}
if (!err)
ovl_set_timestamps(upperdentry, stat);
return err;
}
static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
struct dentry *dentry, struct path *lowerpath,
struct kstat *stat, struct iattr *attr,
const char *link)
{
struct inode *wdir = workdir->d_inode;
struct inode *udir = upperdir->d_inode;
struct dentry *newdentry = NULL;
struct dentry *upper = NULL;
umode_t mode = stat->mode;
int err;
newdentry = ovl_lookup_temp(workdir, dentry);
err = PTR_ERR(newdentry);
if (IS_ERR(newdentry))
goto out;
upper = lookup_one_len(dentry->d_name.name, upperdir,
dentry->d_name.len);
err = PTR_ERR(upper);
if (IS_ERR(upper))
goto out1;
/* Can't properly set mode on creation because of the umask */
stat->mode &= S_IFMT;
err = ovl_create_real(wdir, newdentry, stat, link, NULL, true);
stat->mode = mode;
if (err)
goto out2;
if (S_ISREG(stat->mode)) {
struct path upperpath;
ovl_path_upper(dentry, &upperpath);
BUG_ON(upperpath.dentry != NULL);
upperpath.dentry = newdentry;
err = ovl_copy_up_data(lowerpath, &upperpath, stat->size);
if (err)
goto out_cleanup;
}
err = ovl_copy_xattr(lowerpath->dentry, newdentry);
if (err)
goto out_cleanup;
mutex_lock(&newdentry->d_inode->i_mutex);
err = ovl_set_attr(newdentry, stat);
if (!err && attr)
err = notify_change(newdentry, attr, NULL);
mutex_unlock(&newdentry->d_inode->i_mutex);
if (err)
goto out_cleanup;
err = ovl_do_rename(wdir, newdentry, udir, upper, 0);
if (err)
goto out_cleanup;
ovl_dentry_update(dentry, newdentry);
newdentry = NULL;
/*
* Non-directores become opaque when copied up.
*/
if (!S_ISDIR(stat->mode))
ovl_dentry_set_opaque(dentry, true);
out2:
dput(upper);
out1:
dput(newdentry);
out:
return err;
out_cleanup:
ovl_cleanup(wdir, newdentry);
goto out2;
}
/*
* Copy up a single dentry
*
* Directory renames only allowed on "pure upper" (already created on
* upper filesystem, never copied up). Directories which are on lower or
* are merged may not be renamed. For these -EXDEV is returned and
* userspace has to deal with it. This means, when copying up a
* directory we can rely on it and ancestors being stable.
*
* Non-directory renames start with copy up of source if necessary. The
* actual rename will only proceed once the copy up was successful. Copy
* up uses upper parent i_mutex for exclusion. Since rename can change
* d_parent it is possible that the copy up will lock the old parent. At
* that point the file will have already been copied up anyway.
*/
int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
struct path *lowerpath, struct kstat *stat,
struct iattr *attr)
{
struct dentry *workdir = ovl_workdir(dentry);
int err;
struct kstat pstat;
struct path parentpath;
struct dentry *upperdir;
struct dentry *upperdentry;
const struct cred *old_cred;
struct cred *override_cred;
char *link = NULL;
if (WARN_ON(!workdir))
return -EROFS;
ovl_path_upper(parent, &parentpath);
upperdir = parentpath.dentry;
err = vfs_getattr(&parentpath, &pstat);
if (err)
return err;
if (S_ISLNK(stat->mode)) {
link = ovl_read_symlink(lowerpath->dentry);
if (IS_ERR(link))
return PTR_ERR(link);
}
err = -ENOMEM;
override_cred = prepare_creds();
if (!override_cred)
goto out_free_link;
override_cred->fsuid = stat->uid;
override_cred->fsgid = stat->gid;
/*
* CAP_SYS_ADMIN for copying up extended attributes
* CAP_DAC_OVERRIDE for create
* CAP_FOWNER for chmod, timestamp update
* CAP_FSETID for chmod
* CAP_CHOWN for chown
* CAP_MKNOD for mknod
*/
cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
cap_raise(override_cred->cap_effective, CAP_FOWNER);
cap_raise(override_cred->cap_effective, CAP_FSETID);
cap_raise(override_cred->cap_effective, CAP_CHOWN);
cap_raise(override_cred->cap_effective, CAP_MKNOD);
old_cred = override_creds(override_cred);
err = -EIO;
if (lock_rename(workdir, upperdir) != NULL) {
pr_err("overlayfs: failed to lock workdir+upperdir\n");
goto out_unlock;
}
upperdentry = ovl_dentry_upper(dentry);
if (upperdentry) {
unlock_rename(workdir, upperdir);
err = 0;
/* Raced with another copy-up? Do the setattr here */
if (attr) {
mutex_lock(&upperdentry->d_inode->i_mutex);
err = notify_change(upperdentry, attr, NULL);
mutex_unlock(&upperdentry->d_inode->i_mutex);
}
goto out_put_cred;
}
err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath,
stat, attr, link);
if (!err) {
/* Restore timestamps on parent (best effort) */
ovl_set_timestamps(upperdir, &pstat);
}
out_unlock:
unlock_rename(workdir, upperdir);
out_put_cred:
revert_creds(old_cred);
put_cred(override_cred);
out_free_link:
if (link)
free_page((unsigned long) link);
return err;
}
int ovl_copy_up(struct dentry *dentry)
{
int err;
err = 0;
while (!err) {
struct dentry *next;
struct dentry *parent;
struct path lowerpath;
struct kstat stat;
enum ovl_path_type type = ovl_path_type(dentry);
if (OVL_TYPE_UPPER(type))
break;
next = dget(dentry);
/* find the topmost dentry not yet copied up */
for (;;) {
parent = dget_parent(next);
type = ovl_path_type(parent);
if (OVL_TYPE_UPPER(type))
break;
dput(next);
next = parent;
}
ovl_path_lower(next, &lowerpath);
err = vfs_getattr(&lowerpath, &stat);
if (!err)
err = ovl_copy_up_one(parent, next, &lowerpath, &stat, NULL);
dput(parent);
dput(next);
}
return err;
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_1845_0 |
crossvul-cpp_data_good_3656_0 | /*
* Generic hugetlb support.
* (C) William Irwin, April 2004
*/
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/mmu_notifier.h>
#include <linux/nodemask.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
#include <linux/cpuset.h>
#include <linux/mutex.h>
#include <linux/bootmem.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <linux/io.h>
#include <linux/hugetlb.h>
#include <linux/node.h>
#include "internal.h"
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
static int max_hstate;
unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE];
__initdata LIST_HEAD(huge_boot_pages);
/* for command line parsing */
static struct hstate * __initdata parsed_hstate;
static unsigned long __initdata default_hstate_max_huge_pages;
static unsigned long __initdata default_hstate_size;
#define for_each_hstate(h) \
for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
/*
* Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
*/
static DEFINE_SPINLOCK(hugetlb_lock);
static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
{
bool free = (spool->count == 0) && (spool->used_hpages == 0);
spin_unlock(&spool->lock);
/* If no pages are used, and no other handles to the subpool
* remain, free the subpool the subpool remain */
if (free)
kfree(spool);
}
struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
{
struct hugepage_subpool *spool;
spool = kmalloc(sizeof(*spool), GFP_KERNEL);
if (!spool)
return NULL;
spin_lock_init(&spool->lock);
spool->count = 1;
spool->max_hpages = nr_blocks;
spool->used_hpages = 0;
return spool;
}
void hugepage_put_subpool(struct hugepage_subpool *spool)
{
spin_lock(&spool->lock);
BUG_ON(!spool->count);
spool->count--;
unlock_or_release_subpool(spool);
}
static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
long delta)
{
int ret = 0;
if (!spool)
return 0;
spin_lock(&spool->lock);
if ((spool->used_hpages + delta) <= spool->max_hpages) {
spool->used_hpages += delta;
} else {
ret = -ENOMEM;
}
spin_unlock(&spool->lock);
return ret;
}
static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
long delta)
{
if (!spool)
return;
spin_lock(&spool->lock);
spool->used_hpages -= delta;
/* If hugetlbfs_put_super couldn't free spool due to
* an outstanding quota reference, free it now. */
unlock_or_release_subpool(spool);
}
static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
{
return HUGETLBFS_SB(inode->i_sb)->spool;
}
static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
{
return subpool_inode(vma->vm_file->f_dentry->d_inode);
}
/*
* Region tracking -- allows tracking of reservations and instantiated pages
* across the pages in a mapping.
*
* The region data structures are protected by a combination of the mmap_sem
* and the hugetlb_instantion_mutex. To access or modify a region the caller
* must either hold the mmap_sem for write, or the mmap_sem for read and
* the hugetlb_instantiation mutex:
*
* down_write(&mm->mmap_sem);
* or
* down_read(&mm->mmap_sem);
* mutex_lock(&hugetlb_instantiation_mutex);
*/
struct file_region {
struct list_head link;
long from;
long to;
};
static long region_add(struct list_head *head, long f, long t)
{
struct file_region *rg, *nrg, *trg;
/* Locate the region we are either in or before. */
list_for_each_entry(rg, head, link)
if (f <= rg->to)
break;
/* Round our left edge to the current segment if it encloses us. */
if (f > rg->from)
f = rg->from;
/* Check for and consume any regions we now overlap with. */
nrg = rg;
list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
if (&rg->link == head)
break;
if (rg->from > t)
break;
/* If this area reaches higher then extend our area to
* include it completely. If this is not the first area
* which we intend to reuse, free it. */
if (rg->to > t)
t = rg->to;
if (rg != nrg) {
list_del(&rg->link);
kfree(rg);
}
}
nrg->from = f;
nrg->to = t;
return 0;
}
static long region_chg(struct list_head *head, long f, long t)
{
struct file_region *rg, *nrg;
long chg = 0;
/* Locate the region we are before or in. */
list_for_each_entry(rg, head, link)
if (f <= rg->to)
break;
/* If we are below the current region then a new region is required.
* Subtle, allocate a new region at the position but make it zero
* size such that we can guarantee to record the reservation. */
if (&rg->link == head || t < rg->from) {
nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
if (!nrg)
return -ENOMEM;
nrg->from = f;
nrg->to = f;
INIT_LIST_HEAD(&nrg->link);
list_add(&nrg->link, rg->link.prev);
return t - f;
}
/* Round our left edge to the current segment if it encloses us. */
if (f > rg->from)
f = rg->from;
chg = t - f;
/* Check for and consume any regions we now overlap with. */
list_for_each_entry(rg, rg->link.prev, link) {
if (&rg->link == head)
break;
if (rg->from > t)
return chg;
/* We overlap with this area, if it extends further than
* us then we must extend ourselves. Account for its
* existing reservation. */
if (rg->to > t) {
chg += rg->to - t;
t = rg->to;
}
chg -= rg->to - rg->from;
}
return chg;
}
static long region_truncate(struct list_head *head, long end)
{
struct file_region *rg, *trg;
long chg = 0;
/* Locate the region we are either in or before. */
list_for_each_entry(rg, head, link)
if (end <= rg->to)
break;
if (&rg->link == head)
return 0;
/* If we are in the middle of a region then adjust it. */
if (end > rg->from) {
chg = rg->to - end;
rg->to = end;
rg = list_entry(rg->link.next, typeof(*rg), link);
}
/* Drop any remaining regions. */
list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
if (&rg->link == head)
break;
chg += rg->to - rg->from;
list_del(&rg->link);
kfree(rg);
}
return chg;
}
static long region_count(struct list_head *head, long f, long t)
{
struct file_region *rg;
long chg = 0;
/* Locate each segment we overlap with, and count that overlap. */
list_for_each_entry(rg, head, link) {
long seg_from;
long seg_to;
if (rg->to <= f)
continue;
if (rg->from >= t)
break;
seg_from = max(rg->from, f);
seg_to = min(rg->to, t);
chg += seg_to - seg_from;
}
return chg;
}
/*
* Convert the address within this vma to the page offset within
* the mapping, in pagecache page units; huge pages here.
*/
static pgoff_t vma_hugecache_offset(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
{
return ((address - vma->vm_start) >> huge_page_shift(h)) +
(vma->vm_pgoff >> huge_page_order(h));
}
pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
unsigned long address)
{
return vma_hugecache_offset(hstate_vma(vma), vma, address);
}
/*
* Return the size of the pages allocated when backing a VMA. In the majority
* cases this will be same size as used by the page table entries.
*/
unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
{
struct hstate *hstate;
if (!is_vm_hugetlb_page(vma))
return PAGE_SIZE;
hstate = hstate_vma(vma);
return 1UL << (hstate->order + PAGE_SHIFT);
}
EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
/*
* Return the page size being used by the MMU to back a VMA. In the majority
* of cases, the page size used by the kernel matches the MMU size. On
* architectures where it differs, an architecture-specific version of this
* function is required.
*/
#ifndef vma_mmu_pagesize
unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{
return vma_kernel_pagesize(vma);
}
#endif
/*
* Flags for MAP_PRIVATE reservations. These are stored in the bottom
* bits of the reservation map pointer, which are always clear due to
* alignment.
*/
#define HPAGE_RESV_OWNER (1UL << 0)
#define HPAGE_RESV_UNMAPPED (1UL << 1)
#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
/*
* These helpers are used to track how many pages are reserved for
* faults in a MAP_PRIVATE mapping. Only the process that called mmap()
* is guaranteed to have their future faults succeed.
*
* With the exception of reset_vma_resv_huge_pages() which is called at fork(),
* the reserve counters are updated with the hugetlb_lock held. It is safe
* to reset the VMA at fork() time as it is not in use yet and there is no
* chance of the global counters getting corrupted as a result of the values.
*
* The private mapping reservation is represented in a subtly different
* manner to a shared mapping. A shared mapping has a region map associated
* with the underlying file, this region map represents the backing file
* pages which have ever had a reservation assigned which this persists even
* after the page is instantiated. A private mapping has a region map
* associated with the original mmap which is attached to all VMAs which
* reference it, this region map represents those offsets which have consumed
* reservation ie. where pages have been instantiated.
*/
static unsigned long get_vma_private_data(struct vm_area_struct *vma)
{
return (unsigned long)vma->vm_private_data;
}
static void set_vma_private_data(struct vm_area_struct *vma,
unsigned long value)
{
vma->vm_private_data = (void *)value;
}
struct resv_map {
struct kref refs;
struct list_head regions;
};
static struct resv_map *resv_map_alloc(void)
{
struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
if (!resv_map)
return NULL;
kref_init(&resv_map->refs);
INIT_LIST_HEAD(&resv_map->regions);
return resv_map;
}
static void resv_map_release(struct kref *ref)
{
struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
/* Clear out any active regions before we release the map. */
region_truncate(&resv_map->regions, 0);
kfree(resv_map);
}
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
if (!(vma->vm_flags & VM_MAYSHARE))
return (struct resv_map *)(get_vma_private_data(vma) &
~HPAGE_RESV_MASK);
return NULL;
}
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
set_vma_private_data(vma, (get_vma_private_data(vma) &
HPAGE_RESV_MASK) | (unsigned long)map);
}
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
set_vma_private_data(vma, get_vma_private_data(vma) | flags);
}
static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
return (get_vma_private_data(vma) & flag) != 0;
}
/* Decrement the reserved pages in the hugepage pool by one */
static void decrement_hugepage_resv_vma(struct hstate *h,
struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_NORESERVE)
return;
if (vma->vm_flags & VM_MAYSHARE) {
/* Shared mappings always use reserves */
h->resv_huge_pages--;
} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
/*
* Only the process that called mmap() has reserves for
* private mappings.
*/
h->resv_huge_pages--;
}
}
/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
if (!(vma->vm_flags & VM_MAYSHARE))
vma->vm_private_data = (void *)0;
}
/* Returns true if the VMA has associated reserve pages */
static int vma_has_reserves(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_MAYSHARE)
return 1;
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
return 1;
return 0;
}
static void copy_gigantic_page(struct page *dst, struct page *src)
{
int i;
struct hstate *h = page_hstate(src);
struct page *dst_base = dst;
struct page *src_base = src;
for (i = 0; i < pages_per_huge_page(h); ) {
cond_resched();
copy_highpage(dst, src);
i++;
dst = mem_map_next(dst, dst_base, i);
src = mem_map_next(src, src_base, i);
}
}
void copy_huge_page(struct page *dst, struct page *src)
{
int i;
struct hstate *h = page_hstate(src);
if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
copy_gigantic_page(dst, src);
return;
}
might_sleep();
for (i = 0; i < pages_per_huge_page(h); i++) {
cond_resched();
copy_highpage(dst + i, src + i);
}
}
static void enqueue_huge_page(struct hstate *h, struct page *page)
{
int nid = page_to_nid(page);
list_add(&page->lru, &h->hugepage_freelists[nid]);
h->free_huge_pages++;
h->free_huge_pages_node[nid]++;
}
static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
{
struct page *page;
if (list_empty(&h->hugepage_freelists[nid]))
return NULL;
page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
list_del(&page->lru);
set_page_refcounted(page);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
return page;
}
static struct page *dequeue_huge_page_vma(struct hstate *h,
struct vm_area_struct *vma,
unsigned long address, int avoid_reserve)
{
struct page *page = NULL;
struct mempolicy *mpol;
nodemask_t *nodemask;
struct zonelist *zonelist;
struct zone *zone;
struct zoneref *z;
unsigned int cpuset_mems_cookie;
retry_cpuset:
cpuset_mems_cookie = get_mems_allowed();
zonelist = huge_zonelist(vma, address,
htlb_alloc_mask, &mpol, &nodemask);
/*
* A child process with MAP_PRIVATE mappings created by their parent
* have no page reserves. This check ensures that reservations are
* not "stolen". The child may still get SIGKILLed
*/
if (!vma_has_reserves(vma) &&
h->free_huge_pages - h->resv_huge_pages == 0)
goto err;
/* If reserves cannot be used, ensure enough pages are in the pool */
if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
goto err;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
MAX_NR_ZONES - 1, nodemask) {
if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
page = dequeue_huge_page_node(h, zone_to_nid(zone));
if (page) {
if (!avoid_reserve)
decrement_hugepage_resv_vma(h, vma);
break;
}
}
}
mpol_cond_put(mpol);
if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
goto retry_cpuset;
return page;
err:
mpol_cond_put(mpol);
return NULL;
}
static void update_and_free_page(struct hstate *h, struct page *page)
{
int i;
VM_BUG_ON(h->order >= MAX_ORDER);
h->nr_huge_pages--;
h->nr_huge_pages_node[page_to_nid(page)]--;
for (i = 0; i < pages_per_huge_page(h); i++) {
page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1 << PG_referenced | 1 << PG_dirty |
1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1 << PG_writeback);
}
set_compound_page_dtor(page, NULL);
set_page_refcounted(page);
arch_release_hugepage(page);
__free_pages(page, huge_page_order(h));
}
struct hstate *size_to_hstate(unsigned long size)
{
struct hstate *h;
for_each_hstate(h) {
if (huge_page_size(h) == size)
return h;
}
return NULL;
}
static void free_huge_page(struct page *page)
{
/*
* Can't pass hstate in here because it is called from the
* compound page destructor.
*/
struct hstate *h = page_hstate(page);
int nid = page_to_nid(page);
struct hugepage_subpool *spool =
(struct hugepage_subpool *)page_private(page);
set_page_private(page, 0);
page->mapping = NULL;
BUG_ON(page_count(page));
BUG_ON(page_mapcount(page));
INIT_LIST_HEAD(&page->lru);
spin_lock(&hugetlb_lock);
if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
update_and_free_page(h, page);
h->surplus_huge_pages--;
h->surplus_huge_pages_node[nid]--;
} else {
enqueue_huge_page(h, page);
}
spin_unlock(&hugetlb_lock);
hugepage_subpool_put_pages(spool, 1);
}
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
{
set_compound_page_dtor(page, free_huge_page);
spin_lock(&hugetlb_lock);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
spin_unlock(&hugetlb_lock);
put_page(page); /* free it into the hugepage allocator */
}
static void prep_compound_gigantic_page(struct page *page, unsigned long order)
{
int i;
int nr_pages = 1 << order;
struct page *p = page + 1;
/* we rely on prep_new_huge_page to set the destructor */
set_compound_order(page, order);
__SetPageHead(page);
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
__SetPageTail(p);
set_page_count(p, 0);
p->first_page = page;
}
}
int PageHuge(struct page *page)
{
compound_page_dtor *dtor;
if (!PageCompound(page))
return 0;
page = compound_head(page);
dtor = get_compound_page_dtor(page);
return dtor == free_huge_page;
}
EXPORT_SYMBOL_GPL(PageHuge);
static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
{
struct page *page;
if (h->order >= MAX_ORDER)
return NULL;
page = alloc_pages_exact_node(nid,
htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
__GFP_REPEAT|__GFP_NOWARN,
huge_page_order(h));
if (page) {
if (arch_prepare_hugepage(page)) {
__free_pages(page, huge_page_order(h));
return NULL;
}
prep_new_huge_page(h, page, nid);
}
return page;
}
/*
* common helper functions for hstate_next_node_to_{alloc|free}.
* We may have allocated or freed a huge page based on a different
* nodes_allowed previously, so h->next_node_to_{alloc|free} might
* be outside of *nodes_allowed. Ensure that we use an allowed
* node for alloc or free.
*/
static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
{
nid = next_node(nid, *nodes_allowed);
if (nid == MAX_NUMNODES)
nid = first_node(*nodes_allowed);
VM_BUG_ON(nid >= MAX_NUMNODES);
return nid;
}
static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
{
if (!node_isset(nid, *nodes_allowed))
nid = next_node_allowed(nid, nodes_allowed);
return nid;
}
/*
* returns the previously saved node ["this node"] from which to
* allocate a persistent huge page for the pool and advance the
* next node from which to allocate, handling wrap at end of node
* mask.
*/
static int hstate_next_node_to_alloc(struct hstate *h,
nodemask_t *nodes_allowed)
{
int nid;
VM_BUG_ON(!nodes_allowed);
nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
return nid;
}
static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
{
struct page *page;
int start_nid;
int next_nid;
int ret = 0;
start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
next_nid = start_nid;
do {
page = alloc_fresh_huge_page_node(h, next_nid);
if (page) {
ret = 1;
break;
}
next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
} while (next_nid != start_nid);
if (ret)
count_vm_event(HTLB_BUDDY_PGALLOC);
else
count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
return ret;
}
/*
* helper for free_pool_huge_page() - return the previously saved
* node ["this node"] from which to free a huge page. Advance the
* next node id whether or not we find a free huge page to free so
* that the next attempt to free addresses the next node.
*/
static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
{
int nid;
VM_BUG_ON(!nodes_allowed);
nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
return nid;
}
/*
* Free huge page from pool from next node to free.
* Attempt to keep persistent huge pages more or less
* balanced over allowed nodes.
* Called with hugetlb_lock locked.
*/
static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
bool acct_surplus)
{
int start_nid;
int next_nid;
int ret = 0;
start_nid = hstate_next_node_to_free(h, nodes_allowed);
next_nid = start_nid;
do {
/*
* If we're returning unused surplus pages, only examine
* nodes with surplus pages.
*/
if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
!list_empty(&h->hugepage_freelists[next_nid])) {
struct page *page =
list_entry(h->hugepage_freelists[next_nid].next,
struct page, lru);
list_del(&page->lru);
h->free_huge_pages--;
h->free_huge_pages_node[next_nid]--;
if (acct_surplus) {
h->surplus_huge_pages--;
h->surplus_huge_pages_node[next_nid]--;
}
update_and_free_page(h, page);
ret = 1;
break;
}
next_nid = hstate_next_node_to_free(h, nodes_allowed);
} while (next_nid != start_nid);
return ret;
}
static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
{
struct page *page;
unsigned int r_nid;
if (h->order >= MAX_ORDER)
return NULL;
/*
* Assume we will successfully allocate the surplus page to
* prevent racing processes from causing the surplus to exceed
* overcommit
*
* This however introduces a different race, where a process B
* tries to grow the static hugepage pool while alloc_pages() is
* called by process A. B will only examine the per-node
* counters in determining if surplus huge pages can be
* converted to normal huge pages in adjust_pool_surplus(). A
* won't be able to increment the per-node counter, until the
* lock is dropped by B, but B doesn't drop hugetlb_lock until
* no more huge pages can be converted from surplus to normal
* state (and doesn't try to convert again). Thus, we have a
* case where a surplus huge page exists, the pool is grown, and
* the surplus huge page still exists after, even though it
* should just have been converted to a normal huge page. This
* does not leak memory, though, as the hugepage will be freed
* once it is out of use. It also does not allow the counters to
* go out of whack in adjust_pool_surplus() as we don't modify
* the node values until we've gotten the hugepage and only the
* per-node value is checked there.
*/
spin_lock(&hugetlb_lock);
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
spin_unlock(&hugetlb_lock);
return NULL;
} else {
h->nr_huge_pages++;
h->surplus_huge_pages++;
}
spin_unlock(&hugetlb_lock);
if (nid == NUMA_NO_NODE)
page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
__GFP_REPEAT|__GFP_NOWARN,
huge_page_order(h));
else
page = alloc_pages_exact_node(nid,
htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
if (page && arch_prepare_hugepage(page)) {
__free_pages(page, huge_page_order(h));
page = NULL;
}
spin_lock(&hugetlb_lock);
if (page) {
r_nid = page_to_nid(page);
set_compound_page_dtor(page, free_huge_page);
/*
* We incremented the global counters already
*/
h->nr_huge_pages_node[r_nid]++;
h->surplus_huge_pages_node[r_nid]++;
__count_vm_event(HTLB_BUDDY_PGALLOC);
} else {
h->nr_huge_pages--;
h->surplus_huge_pages--;
__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
}
spin_unlock(&hugetlb_lock);
return page;
}
/*
* This allocation function is useful in the context where vma is irrelevant.
* E.g. soft-offlining uses this function because it only cares physical
* address of error page.
*/
struct page *alloc_huge_page_node(struct hstate *h, int nid)
{
struct page *page;
spin_lock(&hugetlb_lock);
page = dequeue_huge_page_node(h, nid);
spin_unlock(&hugetlb_lock);
if (!page)
page = alloc_buddy_huge_page(h, nid);
return page;
}
/*
* Increase the hugetlb pool such that it can accommodate a reservation
* of size 'delta'.
*/
static int gather_surplus_pages(struct hstate *h, int delta)
{
struct list_head surplus_list;
struct page *page, *tmp;
int ret, i;
int needed, allocated;
bool alloc_ok = true;
needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
if (needed <= 0) {
h->resv_huge_pages += delta;
return 0;
}
allocated = 0;
INIT_LIST_HEAD(&surplus_list);
ret = -ENOMEM;
retry:
spin_unlock(&hugetlb_lock);
for (i = 0; i < needed; i++) {
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
alloc_ok = false;
break;
}
list_add(&page->lru, &surplus_list);
}
allocated += i;
/*
* After retaking hugetlb_lock, we need to recalculate 'needed'
* because either resv_huge_pages or free_huge_pages may have changed.
*/
spin_lock(&hugetlb_lock);
needed = (h->resv_huge_pages + delta) -
(h->free_huge_pages + allocated);
if (needed > 0) {
if (alloc_ok)
goto retry;
/*
* We were not able to allocate enough pages to
* satisfy the entire reservation so we free what
* we've allocated so far.
*/
goto free;
}
/*
* The surplus_list now contains _at_least_ the number of extra pages
* needed to accommodate the reservation. Add the appropriate number
* of pages to the hugetlb pool and free the extras back to the buddy
* allocator. Commit the entire reservation here to prevent another
* process from stealing the pages as they are added to the pool but
* before they are reserved.
*/
needed += allocated;
h->resv_huge_pages += delta;
ret = 0;
/* Free the needed pages to the hugetlb pool */
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
if ((--needed) < 0)
break;
list_del(&page->lru);
/*
* This page is now managed by the hugetlb allocator and has
* no users -- drop the buddy allocator's reference.
*/
put_page_testzero(page);
VM_BUG_ON(page_count(page));
enqueue_huge_page(h, page);
}
free:
spin_unlock(&hugetlb_lock);
/* Free unnecessary surplus pages to the buddy allocator */
if (!list_empty(&surplus_list)) {
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
list_del(&page->lru);
put_page(page);
}
}
spin_lock(&hugetlb_lock);
return ret;
}
/*
* When releasing a hugetlb pool reservation, any surplus pages that were
* allocated to satisfy the reservation must be explicitly freed if they were
* never used.
* Called with hugetlb_lock held.
*/
static void return_unused_surplus_pages(struct hstate *h,
unsigned long unused_resv_pages)
{
unsigned long nr_pages;
/* Uncommit the reservation */
h->resv_huge_pages -= unused_resv_pages;
/* Cannot return gigantic pages currently */
if (h->order >= MAX_ORDER)
return;
nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
/*
* We want to release as many surplus pages as possible, spread
* evenly across all nodes with memory. Iterate across these nodes
* until we can no longer free unreserved surplus pages. This occurs
* when the nodes with surplus pages have no free pages.
* free_pool_huge_page() will balance the the freed pages across the
* on-line nodes with memory and will handle the hstate accounting.
*/
while (nr_pages--) {
if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
break;
}
}
/*
* Determine if the huge page at addr within the vma has an associated
* reservation. Where it does not we will need to logically increase
* reservation and actually increase subpool usage before an allocation
* can occur. Where any new reservation would be required the
* reservation change is prepared, but not committed. Once the page
* has been allocated from the subpool and instantiated the change should
* be committed via vma_commit_reservation. No action is required on
* failure.
*/
static long vma_needs_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host;
if (vma->vm_flags & VM_MAYSHARE) {
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
return region_chg(&inode->i_mapping->private_list,
idx, idx + 1);
} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
return 1;
} else {
long err;
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *reservations = vma_resv_map(vma);
err = region_chg(&reservations->regions, idx, idx + 1);
if (err < 0)
return err;
return 0;
}
}
static void vma_commit_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host;
if (vma->vm_flags & VM_MAYSHARE) {
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
region_add(&inode->i_mapping->private_list, idx, idx + 1);
} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *reservations = vma_resv_map(vma);
/* Mark this page used in the map. */
region_add(&reservations->regions, idx, idx + 1);
}
}
static struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve)
{
struct hugepage_subpool *spool = subpool_vma(vma);
struct hstate *h = hstate_vma(vma);
struct page *page;
long chg;
/*
* Processes that did not create the mapping will have no
* reserves and will not have accounted against subpool
* limit. Check that the subpool limit can be made before
* satisfying the allocation MAP_NORESERVE mappings may also
* need pages and subpool limit allocated allocated if no reserve
* mapping overlaps.
*/
chg = vma_needs_reservation(h, vma, addr);
if (chg < 0)
return ERR_PTR(-VM_FAULT_OOM);
if (chg)
if (hugepage_subpool_get_pages(spool, chg))
return ERR_PTR(-VM_FAULT_SIGBUS);
spin_lock(&hugetlb_lock);
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
spin_unlock(&hugetlb_lock);
if (!page) {
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
hugepage_subpool_put_pages(spool, chg);
return ERR_PTR(-VM_FAULT_SIGBUS);
}
}
set_page_private(page, (unsigned long)spool);
vma_commit_reservation(h, vma, addr);
return page;
}
int __weak alloc_bootmem_huge_page(struct hstate *h)
{
struct huge_bootmem_page *m;
int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
while (nr_nodes) {
void *addr;
addr = __alloc_bootmem_node_nopanic(
NODE_DATA(hstate_next_node_to_alloc(h,
&node_states[N_HIGH_MEMORY])),
huge_page_size(h), huge_page_size(h), 0);
if (addr) {
/*
* Use the beginning of the huge page to store the
* huge_bootmem_page struct (until gather_bootmem
* puts them into the mem_map).
*/
m = addr;
goto found;
}
nr_nodes--;
}
return 0;
found:
BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
/* Put them into a private list first because mem_map is not up yet */
list_add(&m->list, &huge_boot_pages);
m->hstate = h;
return 1;
}
static void prep_compound_huge_page(struct page *page, int order)
{
if (unlikely(order > (MAX_ORDER - 1)))
prep_compound_gigantic_page(page, order);
else
prep_compound_page(page, order);
}
/* Put bootmem huge pages into the standard lists after mem_map is up */
static void __init gather_bootmem_prealloc(void)
{
struct huge_bootmem_page *m;
list_for_each_entry(m, &huge_boot_pages, list) {
struct hstate *h = m->hstate;
struct page *page;
#ifdef CONFIG_HIGHMEM
page = pfn_to_page(m->phys >> PAGE_SHIFT);
free_bootmem_late((unsigned long)m,
sizeof(struct huge_bootmem_page));
#else
page = virt_to_page(m);
#endif
__ClearPageReserved(page);
WARN_ON(page_count(page) != 1);
prep_compound_huge_page(page, h->order);
prep_new_huge_page(h, page, page_to_nid(page));
/*
* If we had gigantic hugepages allocated at boot time, we need
* to restore the 'stolen' pages to totalram_pages in order to
* fix confusing memory reports from free(1) and another
* side-effects, like CommitLimit going negative.
*/
if (h->order > (MAX_ORDER - 1))
totalram_pages += 1 << h->order;
}
}
static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
{
unsigned long i;
for (i = 0; i < h->max_huge_pages; ++i) {
if (h->order >= MAX_ORDER) {
if (!alloc_bootmem_huge_page(h))
break;
} else if (!alloc_fresh_huge_page(h,
&node_states[N_HIGH_MEMORY]))
break;
}
h->max_huge_pages = i;
}
static void __init hugetlb_init_hstates(void)
{
struct hstate *h;
for_each_hstate(h) {
/* oversize hugepages were init'ed in early boot */
if (h->order < MAX_ORDER)
hugetlb_hstate_alloc_pages(h);
}
}
static char * __init memfmt(char *buf, unsigned long n)
{
if (n >= (1UL << 30))
sprintf(buf, "%lu GB", n >> 30);
else if (n >= (1UL << 20))
sprintf(buf, "%lu MB", n >> 20);
else
sprintf(buf, "%lu KB", n >> 10);
return buf;
}
static void __init report_hugepages(void)
{
struct hstate *h;
for_each_hstate(h) {
char buf[32];
printk(KERN_INFO "HugeTLB registered %s page size, "
"pre-allocated %ld pages\n",
memfmt(buf, huge_page_size(h)),
h->free_huge_pages);
}
}
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(struct hstate *h, unsigned long count,
nodemask_t *nodes_allowed)
{
int i;
if (h->order >= MAX_ORDER)
return;
for_each_node_mask(i, *nodes_allowed) {
struct page *page, *next;
struct list_head *freel = &h->hugepage_freelists[i];
list_for_each_entry_safe(page, next, freel, lru) {
if (count >= h->nr_huge_pages)
return;
if (PageHighMem(page))
continue;
list_del(&page->lru);
update_and_free_page(h, page);
h->free_huge_pages--;
h->free_huge_pages_node[page_to_nid(page)]--;
}
}
}
#else
static inline void try_to_free_low(struct hstate *h, unsigned long count,
nodemask_t *nodes_allowed)
{
}
#endif
/*
* Increment or decrement surplus_huge_pages. Keep node-specific counters
* balanced by operating on them in a round-robin fashion.
* Returns 1 if an adjustment was made.
*/
static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
int delta)
{
int start_nid, next_nid;
int ret = 0;
VM_BUG_ON(delta != -1 && delta != 1);
if (delta < 0)
start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
else
start_nid = hstate_next_node_to_free(h, nodes_allowed);
next_nid = start_nid;
do {
int nid = next_nid;
if (delta < 0) {
/*
* To shrink on this node, there must be a surplus page
*/
if (!h->surplus_huge_pages_node[nid]) {
next_nid = hstate_next_node_to_alloc(h,
nodes_allowed);
continue;
}
}
if (delta > 0) {
/*
* Surplus cannot exceed the total number of pages
*/
if (h->surplus_huge_pages_node[nid] >=
h->nr_huge_pages_node[nid]) {
next_nid = hstate_next_node_to_free(h,
nodes_allowed);
continue;
}
}
h->surplus_huge_pages += delta;
h->surplus_huge_pages_node[nid] += delta;
ret = 1;
break;
} while (next_nid != start_nid);
return ret;
}
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
nodemask_t *nodes_allowed)
{
unsigned long min_count, ret;
if (h->order >= MAX_ORDER)
return h->max_huge_pages;
/*
* Increase the pool size
* First take pages out of surplus state. Then make up the
* remaining difference by allocating fresh huge pages.
*
* We might race with alloc_buddy_huge_page() here and be unable
* to convert a surplus huge page to a normal huge page. That is
* not critical, though, it just means the overall size of the
* pool might be one hugepage larger than it needs to be, but
* within all the constraints specified by the sysctls.
*/
spin_lock(&hugetlb_lock);
while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
if (!adjust_pool_surplus(h, nodes_allowed, -1))
break;
}
while (count > persistent_huge_pages(h)) {
/*
* If this allocation races such that we no longer need the
* page, free_huge_page will handle it by freeing the page
* and reducing the surplus.
*/
spin_unlock(&hugetlb_lock);
ret = alloc_fresh_huge_page(h, nodes_allowed);
spin_lock(&hugetlb_lock);
if (!ret)
goto out;
/* Bail for signals. Probably ctrl-c from user */
if (signal_pending(current))
goto out;
}
/*
* Decrease the pool size
* First return free pages to the buddy allocator (being careful
* to keep enough around to satisfy reservations). Then place
* pages into surplus state as needed so the pool will shrink
* to the desired size as pages become free.
*
* By placing pages into the surplus state independent of the
* overcommit value, we are allowing the surplus pool size to
* exceed overcommit. There are few sane options here. Since
* alloc_buddy_huge_page() is checking the global counter,
* though, we'll note that we're not allowed to exceed surplus
* and won't grow the pool anywhere else. Not until one of the
* sysctls are changed, or the surplus pages go out of use.
*/
min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
min_count = max(count, min_count);
try_to_free_low(h, min_count, nodes_allowed);
while (min_count < persistent_huge_pages(h)) {
if (!free_pool_huge_page(h, nodes_allowed, 0))
break;
}
while (count < persistent_huge_pages(h)) {
if (!adjust_pool_surplus(h, nodes_allowed, 1))
break;
}
out:
ret = persistent_huge_pages(h);
spin_unlock(&hugetlb_lock);
return ret;
}
#define HSTATE_ATTR_RO(_name) \
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
#define HSTATE_ATTR(_name) \
static struct kobj_attribute _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
static struct kobject *hugepages_kobj;
static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
{
int i;
for (i = 0; i < HUGE_MAX_HSTATE; i++)
if (hstate_kobjs[i] == kobj) {
if (nidp)
*nidp = NUMA_NO_NODE;
return &hstates[i];
}
return kobj_to_node_hstate(kobj, nidp);
}
static ssize_t nr_hugepages_show_common(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct hstate *h;
unsigned long nr_huge_pages;
int nid;
h = kobj_to_hstate(kobj, &nid);
if (nid == NUMA_NO_NODE)
nr_huge_pages = h->nr_huge_pages;
else
nr_huge_pages = h->nr_huge_pages_node[nid];
return sprintf(buf, "%lu\n", nr_huge_pages);
}
static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t len)
{
int err;
int nid;
unsigned long count;
struct hstate *h;
NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
err = strict_strtoul(buf, 10, &count);
if (err)
goto out;
h = kobj_to_hstate(kobj, &nid);
if (h->order >= MAX_ORDER) {
err = -EINVAL;
goto out;
}
if (nid == NUMA_NO_NODE) {
/*
* global hstate attribute
*/
if (!(obey_mempolicy &&
init_nodemask_of_mempolicy(nodes_allowed))) {
NODEMASK_FREE(nodes_allowed);
nodes_allowed = &node_states[N_HIGH_MEMORY];
}
} else if (nodes_allowed) {
/*
* per node hstate attribute: adjust count to global,
* but restrict alloc/free to the specified node.
*/
count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
init_nodemask_of_node(nodes_allowed, nid);
} else
nodes_allowed = &node_states[N_HIGH_MEMORY];
h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
if (nodes_allowed != &node_states[N_HIGH_MEMORY])
NODEMASK_FREE(nodes_allowed);
return len;
out:
NODEMASK_FREE(nodes_allowed);
return err;
}
static ssize_t nr_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return nr_hugepages_show_common(kobj, attr, buf);
}
static ssize_t nr_hugepages_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t len)
{
return nr_hugepages_store_common(false, kobj, attr, buf, len);
}
HSTATE_ATTR(nr_hugepages);
#ifdef CONFIG_NUMA
/*
* hstate attribute for optionally mempolicy-based constraint on persistent
* huge page alloc/free.
*/
static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return nr_hugepages_show_common(kobj, attr, buf);
}
static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t len)
{
return nr_hugepages_store_common(true, kobj, attr, buf, len);
}
HSTATE_ATTR(nr_hugepages_mempolicy);
#endif
static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct hstate *h = kobj_to_hstate(kobj, NULL);
return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
}
static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int err;
unsigned long input;
struct hstate *h = kobj_to_hstate(kobj, NULL);
if (h->order >= MAX_ORDER)
return -EINVAL;
err = strict_strtoul(buf, 10, &input);
if (err)
return err;
spin_lock(&hugetlb_lock);
h->nr_overcommit_huge_pages = input;
spin_unlock(&hugetlb_lock);
return count;
}
HSTATE_ATTR(nr_overcommit_hugepages);
static ssize_t free_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct hstate *h;
unsigned long free_huge_pages;
int nid;
h = kobj_to_hstate(kobj, &nid);
if (nid == NUMA_NO_NODE)
free_huge_pages = h->free_huge_pages;
else
free_huge_pages = h->free_huge_pages_node[nid];
return sprintf(buf, "%lu\n", free_huge_pages);
}
HSTATE_ATTR_RO(free_hugepages);
static ssize_t resv_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct hstate *h = kobj_to_hstate(kobj, NULL);
return sprintf(buf, "%lu\n", h->resv_huge_pages);
}
HSTATE_ATTR_RO(resv_hugepages);
static ssize_t surplus_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct hstate *h;
unsigned long surplus_huge_pages;
int nid;
h = kobj_to_hstate(kobj, &nid);
if (nid == NUMA_NO_NODE)
surplus_huge_pages = h->surplus_huge_pages;
else
surplus_huge_pages = h->surplus_huge_pages_node[nid];
return sprintf(buf, "%lu\n", surplus_huge_pages);
}
HSTATE_ATTR_RO(surplus_hugepages);
static struct attribute *hstate_attrs[] = {
&nr_hugepages_attr.attr,
&nr_overcommit_hugepages_attr.attr,
&free_hugepages_attr.attr,
&resv_hugepages_attr.attr,
&surplus_hugepages_attr.attr,
#ifdef CONFIG_NUMA
&nr_hugepages_mempolicy_attr.attr,
#endif
NULL,
};
static struct attribute_group hstate_attr_group = {
.attrs = hstate_attrs,
};
static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
struct kobject **hstate_kobjs,
struct attribute_group *hstate_attr_group)
{
int retval;
int hi = h - hstates;
hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
if (!hstate_kobjs[hi])
return -ENOMEM;
retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
if (retval)
kobject_put(hstate_kobjs[hi]);
return retval;
}
static void __init hugetlb_sysfs_init(void)
{
struct hstate *h;
int err;
hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
if (!hugepages_kobj)
return;
for_each_hstate(h) {
err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
hstate_kobjs, &hstate_attr_group);
if (err)
printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
h->name);
}
}
#ifdef CONFIG_NUMA
/*
* node_hstate/s - associate per node hstate attributes, via their kobjects,
* with node devices in node_devices[] using a parallel array. The array
* index of a node device or _hstate == node id.
* This is here to avoid any static dependency of the node device driver, in
* the base kernel, on the hugetlb module.
*/
struct node_hstate {
struct kobject *hugepages_kobj;
struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
};
struct node_hstate node_hstates[MAX_NUMNODES];
/*
* A subset of global hstate attributes for node devices
*/
static struct attribute *per_node_hstate_attrs[] = {
&nr_hugepages_attr.attr,
&free_hugepages_attr.attr,
&surplus_hugepages_attr.attr,
NULL,
};
static struct attribute_group per_node_hstate_attr_group = {
.attrs = per_node_hstate_attrs,
};
/*
* kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
* Returns node id via non-NULL nidp.
*/
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
{
int nid;
for (nid = 0; nid < nr_node_ids; nid++) {
struct node_hstate *nhs = &node_hstates[nid];
int i;
for (i = 0; i < HUGE_MAX_HSTATE; i++)
if (nhs->hstate_kobjs[i] == kobj) {
if (nidp)
*nidp = nid;
return &hstates[i];
}
}
BUG();
return NULL;
}
/*
* Unregister hstate attributes from a single node device.
* No-op if no hstate attributes attached.
*/
void hugetlb_unregister_node(struct node *node)
{
struct hstate *h;
struct node_hstate *nhs = &node_hstates[node->dev.id];
if (!nhs->hugepages_kobj)
return; /* no hstate attributes */
for_each_hstate(h)
if (nhs->hstate_kobjs[h - hstates]) {
kobject_put(nhs->hstate_kobjs[h - hstates]);
nhs->hstate_kobjs[h - hstates] = NULL;
}
kobject_put(nhs->hugepages_kobj);
nhs->hugepages_kobj = NULL;
}
/*
* hugetlb module exit: unregister hstate attributes from node devices
* that have them.
*/
static void hugetlb_unregister_all_nodes(void)
{
int nid;
/*
* disable node device registrations.
*/
register_hugetlbfs_with_node(NULL, NULL);
/*
* remove hstate attributes from any nodes that have them.
*/
for (nid = 0; nid < nr_node_ids; nid++)
hugetlb_unregister_node(&node_devices[nid]);
}
/*
* Register hstate attributes for a single node device.
* No-op if attributes already registered.
*/
void hugetlb_register_node(struct node *node)
{
struct hstate *h;
struct node_hstate *nhs = &node_hstates[node->dev.id];
int err;
if (nhs->hugepages_kobj)
return; /* already allocated */
nhs->hugepages_kobj = kobject_create_and_add("hugepages",
&node->dev.kobj);
if (!nhs->hugepages_kobj)
return;
for_each_hstate(h) {
err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
nhs->hstate_kobjs,
&per_node_hstate_attr_group);
if (err) {
printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
" for node %d\n",
h->name, node->dev.id);
hugetlb_unregister_node(node);
break;
}
}
}
/*
* hugetlb init time: register hstate attributes for all registered node
* devices of nodes that have memory. All on-line nodes should have
* registered their associated device by this time.
*/
static void hugetlb_register_all_nodes(void)
{
int nid;
for_each_node_state(nid, N_HIGH_MEMORY) {
struct node *node = &node_devices[nid];
if (node->dev.id == nid)
hugetlb_register_node(node);
}
/*
* Let the node device driver know we're here so it can
* [un]register hstate attributes on node hotplug.
*/
register_hugetlbfs_with_node(hugetlb_register_node,
hugetlb_unregister_node);
}
#else /* !CONFIG_NUMA */
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
{
BUG();
if (nidp)
*nidp = -1;
return NULL;
}
static void hugetlb_unregister_all_nodes(void) { }
static void hugetlb_register_all_nodes(void) { }
#endif
static void __exit hugetlb_exit(void)
{
struct hstate *h;
hugetlb_unregister_all_nodes();
for_each_hstate(h) {
kobject_put(hstate_kobjs[h - hstates]);
}
kobject_put(hugepages_kobj);
}
module_exit(hugetlb_exit);
static int __init hugetlb_init(void)
{
/* Some platform decide whether they support huge pages at boot
* time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
* there is no such support
*/
if (HPAGE_SHIFT == 0)
return 0;
if (!size_to_hstate(default_hstate_size)) {
default_hstate_size = HPAGE_SIZE;
if (!size_to_hstate(default_hstate_size))
hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
}
default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
if (default_hstate_max_huge_pages)
default_hstate.max_huge_pages = default_hstate_max_huge_pages;
hugetlb_init_hstates();
gather_bootmem_prealloc();
report_hugepages();
hugetlb_sysfs_init();
hugetlb_register_all_nodes();
return 0;
}
module_init(hugetlb_init);
/* Should be called on processing a hugepagesz=... option */
void __init hugetlb_add_hstate(unsigned order)
{
struct hstate *h;
unsigned long i;
if (size_to_hstate(PAGE_SIZE << order)) {
printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
return;
}
BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
BUG_ON(order == 0);
h = &hstates[max_hstate++];
h->order = order;
h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
h->nr_huge_pages = 0;
h->free_huge_pages = 0;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&h->hugepage_freelists[i]);
h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
huge_page_size(h)/1024);
parsed_hstate = h;
}
static int __init hugetlb_nrpages_setup(char *s)
{
unsigned long *mhp;
static unsigned long *last_mhp;
/*
* !max_hstate means we haven't parsed a hugepagesz= parameter yet,
* so this hugepages= parameter goes to the "default hstate".
*/
if (!max_hstate)
mhp = &default_hstate_max_huge_pages;
else
mhp = &parsed_hstate->max_huge_pages;
if (mhp == last_mhp) {
printk(KERN_WARNING "hugepages= specified twice without "
"interleaving hugepagesz=, ignoring\n");
return 1;
}
if (sscanf(s, "%lu", mhp) <= 0)
*mhp = 0;
/*
* Global state is always initialized later in hugetlb_init.
* But we need to allocate >= MAX_ORDER hstates here early to still
* use the bootmem allocator.
*/
if (max_hstate && parsed_hstate->order >= MAX_ORDER)
hugetlb_hstate_alloc_pages(parsed_hstate);
last_mhp = mhp;
return 1;
}
__setup("hugepages=", hugetlb_nrpages_setup);
static int __init hugetlb_default_setup(char *s)
{
default_hstate_size = memparse(s, &s);
return 1;
}
__setup("default_hugepagesz=", hugetlb_default_setup);
static unsigned int cpuset_mems_nr(unsigned int *array)
{
int node;
unsigned int nr = 0;
for_each_node_mask(node, cpuset_current_mems_allowed)
nr += array[node];
return nr;
}
#ifdef CONFIG_SYSCTL
static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
struct hstate *h = &default_hstate;
unsigned long tmp;
int ret;
tmp = h->max_huge_pages;
if (write && h->order >= MAX_ORDER)
return -EINVAL;
table->data = &tmp;
table->maxlen = sizeof(unsigned long);
ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (ret)
goto out;
if (write) {
NODEMASK_ALLOC(nodemask_t, nodes_allowed,
GFP_KERNEL | __GFP_NORETRY);
if (!(obey_mempolicy &&
init_nodemask_of_mempolicy(nodes_allowed))) {
NODEMASK_FREE(nodes_allowed);
nodes_allowed = &node_states[N_HIGH_MEMORY];
}
h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
if (nodes_allowed != &node_states[N_HIGH_MEMORY])
NODEMASK_FREE(nodes_allowed);
}
out:
return ret;
}
int hugetlb_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
return hugetlb_sysctl_handler_common(false, table, write,
buffer, length, ppos);
}
#ifdef CONFIG_NUMA
int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
return hugetlb_sysctl_handler_common(true, table, write,
buffer, length, ppos);
}
#endif /* CONFIG_NUMA */
int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
void __user *buffer,
size_t *length, loff_t *ppos)
{
proc_dointvec(table, write, buffer, length, ppos);
if (hugepages_treat_as_movable)
htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
else
htlb_alloc_mask = GFP_HIGHUSER;
return 0;
}
int hugetlb_overcommit_handler(struct ctl_table *table, int write,
void __user *buffer,
size_t *length, loff_t *ppos)
{
struct hstate *h = &default_hstate;
unsigned long tmp;
int ret;
tmp = h->nr_overcommit_huge_pages;
if (write && h->order >= MAX_ORDER)
return -EINVAL;
table->data = &tmp;
table->maxlen = sizeof(unsigned long);
ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (ret)
goto out;
if (write) {
spin_lock(&hugetlb_lock);
h->nr_overcommit_huge_pages = tmp;
spin_unlock(&hugetlb_lock);
}
out:
return ret;
}
#endif /* CONFIG_SYSCTL */
void hugetlb_report_meminfo(struct seq_file *m)
{
struct hstate *h = &default_hstate;
seq_printf(m,
"HugePages_Total: %5lu\n"
"HugePages_Free: %5lu\n"
"HugePages_Rsvd: %5lu\n"
"HugePages_Surp: %5lu\n"
"Hugepagesize: %8lu kB\n",
h->nr_huge_pages,
h->free_huge_pages,
h->resv_huge_pages,
h->surplus_huge_pages,
1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
}
int hugetlb_report_node_meminfo(int nid, char *buf)
{
struct hstate *h = &default_hstate;
return sprintf(buf,
"Node %d HugePages_Total: %5u\n"
"Node %d HugePages_Free: %5u\n"
"Node %d HugePages_Surp: %5u\n",
nid, h->nr_huge_pages_node[nid],
nid, h->free_huge_pages_node[nid],
nid, h->surplus_huge_pages_node[nid]);
}
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
struct hstate *h = &default_hstate;
return h->nr_huge_pages * pages_per_huge_page(h);
}
static int hugetlb_acct_memory(struct hstate *h, long delta)
{
int ret = -ENOMEM;
spin_lock(&hugetlb_lock);
/*
* When cpuset is configured, it breaks the strict hugetlb page
* reservation as the accounting is done on a global variable. Such
* reservation is completely rubbish in the presence of cpuset because
* the reservation is not checked against page availability for the
* current cpuset. Application can still potentially OOM'ed by kernel
* with lack of free htlb page in cpuset that the task is in.
* Attempt to enforce strict accounting with cpuset is almost
* impossible (or too ugly) because cpuset is too fluid that
* task or memory node can be dynamically moved between cpusets.
*
* The change of semantics for shared hugetlb mapping with cpuset is
* undesirable. However, in order to preserve some of the semantics,
* we fall back to check against current free page availability as
* a best attempt and hopefully to minimize the impact of changing
* semantics that cpuset has.
*/
if (delta > 0) {
if (gather_surplus_pages(h, delta) < 0)
goto out;
if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
return_unused_surplus_pages(h, delta);
goto out;
}
}
ret = 0;
if (delta < 0)
return_unused_surplus_pages(h, (unsigned long) -delta);
out:
spin_unlock(&hugetlb_lock);
return ret;
}
static void hugetlb_vm_op_open(struct vm_area_struct *vma)
{
struct resv_map *reservations = vma_resv_map(vma);
/*
* This new VMA should share its siblings reservation map if present.
* The VMA will only ever have a valid reservation map pointer where
* it is being copied for another still existing VMA. As that VMA
* has a reference to the reservation map it cannot disappear until
* after this open call completes. It is therefore safe to take a
* new reference here without additional locking.
*/
if (reservations)
kref_get(&reservations->refs);
}
static void resv_map_put(struct vm_area_struct *vma)
{
struct resv_map *reservations = vma_resv_map(vma);
if (!reservations)
return;
kref_put(&reservations->refs, resv_map_release);
}
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
{
struct hstate *h = hstate_vma(vma);
struct resv_map *reservations = vma_resv_map(vma);
struct hugepage_subpool *spool = subpool_vma(vma);
unsigned long reserve;
unsigned long start;
unsigned long end;
if (reservations) {
start = vma_hugecache_offset(h, vma, vma->vm_start);
end = vma_hugecache_offset(h, vma, vma->vm_end);
reserve = (end - start) -
region_count(&reservations->regions, start, end);
resv_map_put(vma);
if (reserve) {
hugetlb_acct_memory(h, -reserve);
hugepage_subpool_put_pages(spool, reserve);
}
}
}
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
* hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
* this far.
*/
static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
BUG();
return 0;
}
const struct vm_operations_struct hugetlb_vm_ops = {
.fault = hugetlb_vm_op_fault,
.open = hugetlb_vm_op_open,
.close = hugetlb_vm_op_close,
};
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
int writable)
{
pte_t entry;
if (writable) {
entry =
pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
} else {
entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
}
entry = pte_mkyoung(entry);
entry = pte_mkhuge(entry);
entry = arch_make_huge_pte(entry, vma, page, writable);
return entry;
}
static void set_huge_ptep_writable(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t entry;
entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
update_mmu_cache(vma, address, ptep);
}
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
pte_t *src_pte, *dst_pte, entry;
struct page *ptepage;
unsigned long addr;
int cow;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
src_pte = huge_pte_offset(src, addr);
if (!src_pte)
continue;
dst_pte = huge_pte_alloc(dst, addr, sz);
if (!dst_pte)
goto nomem;
/* If the pagetables are shared don't copy or take references */
if (dst_pte == src_pte)
continue;
spin_lock(&dst->page_table_lock);
spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
if (!huge_pte_none(huge_ptep_get(src_pte))) {
if (cow)
huge_ptep_set_wrprotect(src, addr, src_pte);
entry = huge_ptep_get(src_pte);
ptepage = pte_page(entry);
get_page(ptepage);
page_dup_rmap(ptepage);
set_huge_pte_at(dst, addr, dst_pte, entry);
}
spin_unlock(&src->page_table_lock);
spin_unlock(&dst->page_table_lock);
}
return 0;
nomem:
return -ENOMEM;
}
static int is_hugetlb_entry_migration(pte_t pte)
{
swp_entry_t swp;
if (huge_pte_none(pte) || pte_present(pte))
return 0;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_migration_entry(swp))
return 1;
else
return 0;
}
static int is_hugetlb_entry_hwpoisoned(pte_t pte)
{
swp_entry_t swp;
if (huge_pte_none(pte) || pte_present(pte))
return 0;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_hwpoison_entry(swp))
return 1;
else
return 0;
}
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
pte_t *ptep;
pte_t pte;
struct page *page;
struct page *tmp;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
/*
* A page gathering list, protected by per file i_mmap_mutex. The
* lock is used to avoid list corruption from multiple unmapping
* of the same page since we are using page->lru.
*/
LIST_HEAD(page_list);
WARN_ON(!is_vm_hugetlb_page(vma));
BUG_ON(start & ~huge_page_mask(h));
BUG_ON(end & ~huge_page_mask(h));
mmu_notifier_invalidate_range_start(mm, start, end);
spin_lock(&mm->page_table_lock);
for (address = start; address < end; address += sz) {
ptep = huge_pte_offset(mm, address);
if (!ptep)
continue;
if (huge_pmd_unshare(mm, &address, ptep))
continue;
pte = huge_ptep_get(ptep);
if (huge_pte_none(pte))
continue;
/*
* HWPoisoned hugepage is already unmapped and dropped reference
*/
if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
continue;
page = pte_page(pte);
/*
* If a reference page is supplied, it is because a specific
* page is being unmapped, not a range. Ensure the page we
* are about to unmap is the actual page of interest.
*/
if (ref_page) {
if (page != ref_page)
continue;
/*
* Mark the VMA as having unmapped its page so that
* future faults in this VMA will fail rather than
* looking like data was lost
*/
set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
}
pte = huge_ptep_get_and_clear(mm, address, ptep);
if (pte_dirty(pte))
set_page_dirty(page);
list_add(&page->lru, &page_list);
/* Bail out after unmapping reference page if supplied */
if (ref_page)
break;
}
flush_tlb_range(vma, start, end);
spin_unlock(&mm->page_table_lock);
mmu_notifier_invalidate_range_end(mm, start, end);
list_for_each_entry_safe(page, tmp, &page_list, lru) {
page_remove_rmap(page);
list_del(&page->lru);
put_page(page);
}
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
__unmap_hugepage_range(vma, start, end, ref_page);
mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
}
/*
* This is called when the original mapper is failing to COW a MAP_PRIVATE
* mappping it owns the reserve page for. The intention is to unmap the page
* from other VMAs and let the children be SIGKILLed if they are faulting the
* same region.
*/
static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page, unsigned long address)
{
struct hstate *h = hstate_vma(vma);
struct vm_area_struct *iter_vma;
struct address_space *mapping;
struct prio_tree_iter iter;
pgoff_t pgoff;
/*
* vm_pgoff is in PAGE_SIZE units, hence the different calculation
* from page cache lookup which is in HPAGE_SIZE units.
*/
address = address & huge_page_mask(h);
pgoff = vma_hugecache_offset(h, vma, address);
mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
/*
* Take the mapping lock for the duration of the table walk. As
* this mapping should be shared between all the VMAs,
* __unmap_hugepage_range() is called as the lock is already held
*/
mutex_lock(&mapping->i_mmap_mutex);
vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
/* Do not unmap the current VMA */
if (iter_vma == vma)
continue;
/*
* Unmap the page from other VMAs without their own reserves.
* They get marked to be SIGKILLed if they fault in these
* areas. This is because a future no-page fault on this VMA
* could insert a zeroed page instead of the data existing
* from the time of fork. This would look like data corruption
*/
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
__unmap_hugepage_range(iter_vma,
address, address + huge_page_size(h),
page);
}
mutex_unlock(&mapping->i_mmap_mutex);
return 1;
}
/*
* Hugetlb_cow() should be called with page lock of the original hugepage held.
* Called with hugetlb_instantiation_mutex held and pte_page locked so we
* cannot race with other handlers or page migration.
* Keep the pte_same checks anyway to make transition from the mutex easier.
*/
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, pte_t pte,
struct page *pagecache_page)
{
struct hstate *h = hstate_vma(vma);
struct page *old_page, *new_page;
int avoidcopy;
int outside_reserve = 0;
old_page = pte_page(pte);
retry_avoidcopy:
/* If no-one else is actually using this page, avoid the copy
* and just make the page writable */
avoidcopy = (page_mapcount(old_page) == 1);
if (avoidcopy) {
if (PageAnon(old_page))
page_move_anon_rmap(old_page, vma, address);
set_huge_ptep_writable(vma, address, ptep);
return 0;
}
/*
* If the process that created a MAP_PRIVATE mapping is about to
* perform a COW due to a shared page count, attempt to satisfy
* the allocation without using the existing reserves. The pagecache
* page is used to determine if the reserve at this address was
* consumed or not. If reserves were used, a partial faulted mapping
* at the time of fork() could consume its reserves on COW instead
* of the full address range.
*/
if (!(vma->vm_flags & VM_MAYSHARE) &&
is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
old_page != pagecache_page)
outside_reserve = 1;
page_cache_get(old_page);
/* Drop page_table_lock as buddy allocator may be called */
spin_unlock(&mm->page_table_lock);
new_page = alloc_huge_page(vma, address, outside_reserve);
if (IS_ERR(new_page)) {
page_cache_release(old_page);
/*
* If a process owning a MAP_PRIVATE mapping fails to COW,
* it is due to references held by a child and an insufficient
* huge page pool. To guarantee the original mappers
* reliability, unmap the page from child processes. The child
* may get SIGKILLed if it later faults.
*/
if (outside_reserve) {
BUG_ON(huge_pte_none(pte));
if (unmap_ref_private(mm, vma, old_page, address)) {
BUG_ON(huge_pte_none(pte));
spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
if (likely(pte_same(huge_ptep_get(ptep), pte)))
goto retry_avoidcopy;
/*
* race occurs while re-acquiring page_table_lock, and
* our job is done.
*/
return 0;
}
WARN_ON_ONCE(1);
}
/* Caller expects lock to be held */
spin_lock(&mm->page_table_lock);
return -PTR_ERR(new_page);
}
/*
* When the original hugepage is shared one, it does not have
* anon_vma prepared.
*/
if (unlikely(anon_vma_prepare(vma))) {
page_cache_release(new_page);
page_cache_release(old_page);
/* Caller expects lock to be held */
spin_lock(&mm->page_table_lock);
return VM_FAULT_OOM;
}
copy_user_huge_page(new_page, old_page, address, vma,
pages_per_huge_page(h));
__SetPageUptodate(new_page);
/*
* Retake the page_table_lock to check for racing updates
* before the page tables are altered
*/
spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
if (likely(pte_same(huge_ptep_get(ptep), pte))) {
/* Break COW */
mmu_notifier_invalidate_range_start(mm,
address & huge_page_mask(h),
(address & huge_page_mask(h)) + huge_page_size(h));
huge_ptep_clear_flush(vma, address, ptep);
set_huge_pte_at(mm, address, ptep,
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page);
hugepage_add_new_anon_rmap(new_page, vma, address);
/* Make the old page be freed below */
new_page = old_page;
mmu_notifier_invalidate_range_end(mm,
address & huge_page_mask(h),
(address & huge_page_mask(h)) + huge_page_size(h));
}
page_cache_release(new_page);
page_cache_release(old_page);
return 0;
}
/* Return the pagecache page at a given address within a VMA */
static struct page *hugetlbfs_pagecache_page(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
{
struct address_space *mapping;
pgoff_t idx;
mapping = vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, vma, address);
return find_lock_page(mapping, idx);
}
/*
* Return whether there is a pagecache page to back given address within VMA.
* Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
*/
static bool hugetlbfs_pagecache_present(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
{
struct address_space *mapping;
pgoff_t idx;
struct page *page;
mapping = vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, vma, address);
page = find_get_page(mapping, idx);
if (page)
put_page(page);
return page != NULL;
}
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, unsigned int flags)
{
struct hstate *h = hstate_vma(vma);
int ret = VM_FAULT_SIGBUS;
int anon_rmap = 0;
pgoff_t idx;
unsigned long size;
struct page *page;
struct address_space *mapping;
pte_t new_pte;
/*
* Currently, we are forced to kill the process in the event the
* original mapper has unmapped pages from the child due to a failed
* COW. Warn that such a situation has occurred as it may not be obvious
*/
if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
printk(KERN_WARNING
"PID %d killed due to inadequate hugepage pool\n",
current->pid);
return ret;
}
mapping = vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, vma, address);
/*
* Use page lock to guard against racing truncation
* before we get page_table_lock.
*/
retry:
page = find_lock_page(mapping, idx);
if (!page) {
size = i_size_read(mapping->host) >> huge_page_shift(h);
if (idx >= size)
goto out;
page = alloc_huge_page(vma, address, 0);
if (IS_ERR(page)) {
ret = -PTR_ERR(page);
goto out;
}
clear_huge_page(page, address, pages_per_huge_page(h));
__SetPageUptodate(page);
if (vma->vm_flags & VM_MAYSHARE) {
int err;
struct inode *inode = mapping->host;
err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
if (err) {
put_page(page);
if (err == -EEXIST)
goto retry;
goto out;
}
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);
spin_unlock(&inode->i_lock);
} else {
lock_page(page);
if (unlikely(anon_vma_prepare(vma))) {
ret = VM_FAULT_OOM;
goto backout_unlocked;
}
anon_rmap = 1;
}
} else {
/*
* If memory error occurs between mmap() and fault, some process
* don't have hwpoisoned swap entry for errored virtual address.
* So we need to block hugepage fault by PG_hwpoison bit check.
*/
if (unlikely(PageHWPoison(page))) {
ret = VM_FAULT_HWPOISON |
VM_FAULT_SET_HINDEX(h - hstates);
goto backout_unlocked;
}
}
/*
* If we are going to COW a private mapping later, we examine the
* pending reservations for this page now. This will ensure that
* any allocations necessary to record that reservation occur outside
* the spinlock.
*/
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
if (vma_needs_reservation(h, vma, address) < 0) {
ret = VM_FAULT_OOM;
goto backout_unlocked;
}
spin_lock(&mm->page_table_lock);
size = i_size_read(mapping->host) >> huge_page_shift(h);
if (idx >= size)
goto backout;
ret = 0;
if (!huge_pte_none(huge_ptep_get(ptep)))
goto backout;
if (anon_rmap)
hugepage_add_new_anon_rmap(page, vma, address);
else
page_dup_rmap(page);
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
}
spin_unlock(&mm->page_table_lock);
unlock_page(page);
out:
return ret;
backout:
spin_unlock(&mm->page_table_lock);
backout_unlocked:
unlock_page(page);
put_page(page);
goto out;
}
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
pte_t *ptep;
pte_t entry;
int ret;
struct page *page = NULL;
struct page *pagecache_page = NULL;
static DEFINE_MUTEX(hugetlb_instantiation_mutex);
struct hstate *h = hstate_vma(vma);
address &= huge_page_mask(h);
ptep = huge_pte_offset(mm, address);
if (ptep) {
entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_migration(entry))) {
migration_entry_wait(mm, (pmd_t *)ptep, address);
return 0;
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(h - hstates);
}
ptep = huge_pte_alloc(mm, address, huge_page_size(h));
if (!ptep)
return VM_FAULT_OOM;
/*
* Serialize hugepage allocation and instantiation, so that we don't
* get spurious allocation failures if two CPUs race to instantiate
* the same page in the page cache.
*/
mutex_lock(&hugetlb_instantiation_mutex);
entry = huge_ptep_get(ptep);
if (huge_pte_none(entry)) {
ret = hugetlb_no_page(mm, vma, address, ptep, flags);
goto out_mutex;
}
ret = 0;
/*
* If we are going to COW the mapping later, we examine the pending
* reservations for this page now. This will ensure that any
* allocations necessary to record that reservation occur outside the
* spinlock. For private mappings, we also lookup the pagecache
* page now as it is used to determine if a reservation has been
* consumed.
*/
if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
if (vma_needs_reservation(h, vma, address) < 0) {
ret = VM_FAULT_OOM;
goto out_mutex;
}
if (!(vma->vm_flags & VM_MAYSHARE))
pagecache_page = hugetlbfs_pagecache_page(h,
vma, address);
}
/*
* hugetlb_cow() requires page locks of pte_page(entry) and
* pagecache_page, so here we need take the former one
* when page != pagecache_page or !pagecache_page.
* Note that locking order is always pagecache_page -> page,
* so no worry about deadlock.
*/
page = pte_page(entry);
get_page(page);
if (page != pagecache_page)
lock_page(page);
spin_lock(&mm->page_table_lock);
/* Check for a racing update before calling hugetlb_cow */
if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
goto out_page_table_lock;
if (flags & FAULT_FLAG_WRITE) {
if (!pte_write(entry)) {
ret = hugetlb_cow(mm, vma, address, ptep, entry,
pagecache_page);
goto out_page_table_lock;
}
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
if (huge_ptep_set_access_flags(vma, address, ptep, entry,
flags & FAULT_FLAG_WRITE))
update_mmu_cache(vma, address, ptep);
out_page_table_lock:
spin_unlock(&mm->page_table_lock);
if (pagecache_page) {
unlock_page(pagecache_page);
put_page(pagecache_page);
}
if (page != pagecache_page)
unlock_page(page);
put_page(page);
out_mutex:
mutex_unlock(&hugetlb_instantiation_mutex);
return ret;
}
/* Can be overriden by architectures */
__attribute__((weak)) struct page *
follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int write)
{
BUG();
return NULL;
}
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, int *length, int i,
unsigned int flags)
{
unsigned long pfn_offset;
unsigned long vaddr = *position;
int remainder = *length;
struct hstate *h = hstate_vma(vma);
spin_lock(&mm->page_table_lock);
while (vaddr < vma->vm_end && remainder) {
pte_t *pte;
int absent;
struct page *page;
/*
* Some archs (sparc64, sh*) have multiple pte_ts to
* each hugepage. We have to make sure we get the
* first, for the page indexing below to work.
*/
pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
absent = !pte || huge_pte_none(huge_ptep_get(pte));
/*
* When coredumping, it suits get_dump_page if we just return
* an error where there's an empty slot with no huge pagecache
* to back it. This way, we avoid allocating a hugepage, and
* the sparse dumpfile avoids allocating disk blocks, but its
* huge holes still show up with zeroes where they need to be.
*/
if (absent && (flags & FOLL_DUMP) &&
!hugetlbfs_pagecache_present(h, vma, vaddr)) {
remainder = 0;
break;
}
if (absent ||
((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
int ret;
spin_unlock(&mm->page_table_lock);
ret = hugetlb_fault(mm, vma, vaddr,
(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
spin_lock(&mm->page_table_lock);
if (!(ret & VM_FAULT_ERROR))
continue;
remainder = 0;
break;
}
pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
page = pte_page(huge_ptep_get(pte));
same_page:
if (pages) {
pages[i] = mem_map_offset(page, pfn_offset);
get_page(pages[i]);
}
if (vmas)
vmas[i] = vma;
vaddr += PAGE_SIZE;
++pfn_offset;
--remainder;
++i;
if (vaddr < vma->vm_end && remainder &&
pfn_offset < pages_per_huge_page(h)) {
/*
* We use pfn_offset to avoid touching the pageframes
* of this compound page.
*/
goto same_page;
}
}
spin_unlock(&mm->page_table_lock);
*length = remainder;
*position = vaddr;
return i ? i : -EFAULT;
}
void hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long start = address;
pte_t *ptep;
pte_t pte;
struct hstate *h = hstate_vma(vma);
BUG_ON(address >= end);
flush_cache_range(vma, address, end);
mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
spin_lock(&mm->page_table_lock);
for (; address < end; address += huge_page_size(h)) {
ptep = huge_pte_offset(mm, address);
if (!ptep)
continue;
if (huge_pmd_unshare(mm, &address, ptep))
continue;
if (!huge_pte_none(huge_ptep_get(ptep))) {
pte = huge_ptep_get_and_clear(mm, address, ptep);
pte = pte_mkhuge(pte_modify(pte, newprot));
set_huge_pte_at(mm, address, ptep, pte);
}
}
spin_unlock(&mm->page_table_lock);
mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
flush_tlb_range(vma, start, end);
}
int hugetlb_reserve_pages(struct inode *inode,
long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags)
{
long ret, chg;
struct hstate *h = hstate_inode(inode);
struct hugepage_subpool *spool = subpool_inode(inode);
/*
* Only apply hugepage reservation if asked. At fault time, an
* attempt will be made for VM_NORESERVE to allocate a page
* without using reserves
*/
if (vm_flags & VM_NORESERVE)
return 0;
/*
* Shared mappings base their reservation on the number of pages that
* are already allocated on behalf of the file. Private mappings need
* to reserve the full area even if read-only as mprotect() may be
* called to make the mapping read-write. Assume !vma is a shm mapping
*/
if (!vma || vma->vm_flags & VM_MAYSHARE)
chg = region_chg(&inode->i_mapping->private_list, from, to);
else {
struct resv_map *resv_map = resv_map_alloc();
if (!resv_map)
return -ENOMEM;
chg = to - from;
set_vma_resv_map(vma, resv_map);
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
}
if (chg < 0) {
ret = chg;
goto out_err;
}
/* There must be enough pages in the subpool for the mapping */
if (hugepage_subpool_get_pages(spool, chg)) {
ret = -ENOSPC;
goto out_err;
}
/*
* Check enough hugepages are available for the reservation.
* Hand the pages back to the subpool if there are not
*/
ret = hugetlb_acct_memory(h, chg);
if (ret < 0) {
hugepage_subpool_put_pages(spool, chg);
goto out_err;
}
/*
* Account for the reservations made. Shared mappings record regions
* that have reservations as they are shared by multiple VMAs.
* When the last VMA disappears, the region map says how much
* the reservation was and the page cache tells how much of
* the reservation was consumed. Private mappings are per-VMA and
* only the consumed reservations are tracked. When the VMA
* disappears, the original reservation is the VMA size and the
* consumed reservations are stored in the map. Hence, nothing
* else has to be done for private mappings here
*/
if (!vma || vma->vm_flags & VM_MAYSHARE)
region_add(&inode->i_mapping->private_list, from, to);
return 0;
out_err:
resv_map_put(vma);
return ret;
}
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
{
struct hstate *h = hstate_inode(inode);
long chg = region_truncate(&inode->i_mapping->private_list, offset);
struct hugepage_subpool *spool = subpool_inode(inode);
spin_lock(&inode->i_lock);
inode->i_blocks -= (blocks_per_huge_page(h) * freed);
spin_unlock(&inode->i_lock);
hugepage_subpool_put_pages(spool, (chg - freed));
hugetlb_acct_memory(h, -(chg - freed));
}
#ifdef CONFIG_MEMORY_FAILURE
/* Should be called in hugetlb_lock */
static int is_hugepage_on_freelist(struct page *hpage)
{
struct page *page;
struct page *tmp;
struct hstate *h = page_hstate(hpage);
int nid = page_to_nid(hpage);
list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
if (page == hpage)
return 1;
return 0;
}
/*
* This function is called from memory failure code.
* Assume the caller holds page lock of the head page.
*/
int dequeue_hwpoisoned_huge_page(struct page *hpage)
{
struct hstate *h = page_hstate(hpage);
int nid = page_to_nid(hpage);
int ret = -EBUSY;
spin_lock(&hugetlb_lock);
if (is_hugepage_on_freelist(hpage)) {
list_del(&hpage->lru);
set_page_refcounted(hpage);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
ret = 0;
}
spin_unlock(&hugetlb_lock);
return ret;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3656_0 |
crossvul-cpp_data_good_2262_2 | /*
* linux/fs/isofs/rock.c
*
* (C) 1992, 1993 Eric Youngdale
*
* Rock Ridge Extensions to iso9660
*/
#include <linux/slab.h>
#include <linux/pagemap.h>
#include "isofs.h"
#include "rock.h"
/*
* These functions are designed to read the system areas of a directory record
* and extract relevant information. There are different functions provided
* depending upon what information we need at the time. One function fills
* out an inode structure, a second one extracts a filename, a third one
* returns a symbolic link name, and a fourth one returns the extent number
* for the file.
*/
#define SIG(A,B) ((A) | ((B) << 8)) /* isonum_721() */
struct rock_state {
void *buffer;
unsigned char *chr;
int len;
int cont_size;
int cont_extent;
int cont_offset;
struct inode *inode;
};
/*
* This is a way of ensuring that we have something in the system
* use fields that is compatible with Rock Ridge. Return zero on success.
*/
static int check_sp(struct rock_ridge *rr, struct inode *inode)
{
if (rr->u.SP.magic[0] != 0xbe)
return -1;
if (rr->u.SP.magic[1] != 0xef)
return -1;
ISOFS_SB(inode->i_sb)->s_rock_offset = rr->u.SP.skip;
return 0;
}
static void setup_rock_ridge(struct iso_directory_record *de,
struct inode *inode, struct rock_state *rs)
{
rs->len = sizeof(struct iso_directory_record) + de->name_len[0];
if (rs->len & 1)
(rs->len)++;
rs->chr = (unsigned char *)de + rs->len;
rs->len = *((unsigned char *)de) - rs->len;
if (rs->len < 0)
rs->len = 0;
if (ISOFS_SB(inode->i_sb)->s_rock_offset != -1) {
rs->len -= ISOFS_SB(inode->i_sb)->s_rock_offset;
rs->chr += ISOFS_SB(inode->i_sb)->s_rock_offset;
if (rs->len < 0)
rs->len = 0;
}
}
static void init_rock_state(struct rock_state *rs, struct inode *inode)
{
memset(rs, 0, sizeof(*rs));
rs->inode = inode;
}
/*
* Returns 0 if the caller should continue scanning, 1 if the scan must end
* and -ve on error.
*/
static int rock_continue(struct rock_state *rs)
{
int ret = 1;
int blocksize = 1 << rs->inode->i_blkbits;
const int min_de_size = offsetof(struct rock_ridge, u);
kfree(rs->buffer);
rs->buffer = NULL;
if ((unsigned)rs->cont_offset > blocksize - min_de_size ||
(unsigned)rs->cont_size > blocksize ||
(unsigned)(rs->cont_offset + rs->cont_size) > blocksize) {
printk(KERN_NOTICE "rock: corrupted directory entry. "
"extent=%d, offset=%d, size=%d\n",
rs->cont_extent, rs->cont_offset, rs->cont_size);
ret = -EIO;
goto out;
}
if (rs->cont_extent) {
struct buffer_head *bh;
rs->buffer = kmalloc(rs->cont_size, GFP_KERNEL);
if (!rs->buffer) {
ret = -ENOMEM;
goto out;
}
ret = -EIO;
bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
if (bh) {
memcpy(rs->buffer, bh->b_data + rs->cont_offset,
rs->cont_size);
put_bh(bh);
rs->chr = rs->buffer;
rs->len = rs->cont_size;
rs->cont_extent = 0;
rs->cont_size = 0;
rs->cont_offset = 0;
return 0;
}
printk("Unable to read rock-ridge attributes\n");
}
out:
kfree(rs->buffer);
rs->buffer = NULL;
return ret;
}
/*
* We think there's a record of type `sig' at rs->chr. Parse the signature
* and make sure that there's really room for a record of that type.
*/
static int rock_check_overflow(struct rock_state *rs, int sig)
{
int len;
switch (sig) {
case SIG('S', 'P'):
len = sizeof(struct SU_SP_s);
break;
case SIG('C', 'E'):
len = sizeof(struct SU_CE_s);
break;
case SIG('E', 'R'):
len = sizeof(struct SU_ER_s);
break;
case SIG('R', 'R'):
len = sizeof(struct RR_RR_s);
break;
case SIG('P', 'X'):
len = sizeof(struct RR_PX_s);
break;
case SIG('P', 'N'):
len = sizeof(struct RR_PN_s);
break;
case SIG('S', 'L'):
len = sizeof(struct RR_SL_s);
break;
case SIG('N', 'M'):
len = sizeof(struct RR_NM_s);
break;
case SIG('C', 'L'):
len = sizeof(struct RR_CL_s);
break;
case SIG('P', 'L'):
len = sizeof(struct RR_PL_s);
break;
case SIG('T', 'F'):
len = sizeof(struct RR_TF_s);
break;
case SIG('Z', 'F'):
len = sizeof(struct RR_ZF_s);
break;
default:
len = 0;
break;
}
len += offsetof(struct rock_ridge, u);
if (len > rs->len) {
printk(KERN_NOTICE "rock: directory entry would overflow "
"storage\n");
printk(KERN_NOTICE "rock: sig=0x%02x, size=%d, remaining=%d\n",
sig, len, rs->len);
return -EIO;
}
return 0;
}
/*
* return length of name field; 0: not found, -1: to be ignored
*/
int get_rock_ridge_filename(struct iso_directory_record *de,
char *retname, struct inode *inode)
{
struct rock_state rs;
struct rock_ridge *rr;
int sig;
int retnamlen = 0;
int truncate = 0;
int ret = 0;
if (!ISOFS_SB(inode->i_sb)->s_rock)
return 0;
*retname = 0;
init_rock_state(&rs, inode);
setup_rock_ridge(de, inode, &rs);
repeat:
while (rs.len > 2) { /* There may be one byte for padding somewhere */
rr = (struct rock_ridge *)rs.chr;
/*
* Ignore rock ridge info if rr->len is out of range, but
* don't return -EIO because that would make the file
* invisible.
*/
if (rr->len < 3)
goto out; /* Something got screwed up here */
sig = isonum_721(rs.chr);
if (rock_check_overflow(&rs, sig))
goto eio;
rs.chr += rr->len;
rs.len -= rr->len;
/*
* As above, just ignore the rock ridge info if rr->len
* is bogus.
*/
if (rs.len < 0)
goto out; /* Something got screwed up here */
switch (sig) {
case SIG('R', 'R'):
if ((rr->u.RR.flags[0] & RR_NM) == 0)
goto out;
break;
case SIG('S', 'P'):
if (check_sp(rr, inode))
goto out;
break;
case SIG('C', 'E'):
rs.cont_extent = isonum_733(rr->u.CE.extent);
rs.cont_offset = isonum_733(rr->u.CE.offset);
rs.cont_size = isonum_733(rr->u.CE.size);
break;
case SIG('N', 'M'):
if (truncate)
break;
if (rr->len < 5)
break;
/*
* If the flags are 2 or 4, this indicates '.' or '..'.
* We don't want to do anything with this, because it
* screws up the code that calls us. We don't really
* care anyways, since we can just use the non-RR
* name.
*/
if (rr->u.NM.flags & 6)
break;
if (rr->u.NM.flags & ~1) {
printk("Unsupported NM flag settings (%d)\n",
rr->u.NM.flags);
break;
}
if ((strlen(retname) + rr->len - 5) >= 254) {
truncate = 1;
break;
}
strncat(retname, rr->u.NM.name, rr->len - 5);
retnamlen += rr->len - 5;
break;
case SIG('R', 'E'):
kfree(rs.buffer);
return -1;
default:
break;
}
}
ret = rock_continue(&rs);
if (ret == 0)
goto repeat;
if (ret == 1)
return retnamlen; /* If 0, this file did not have a NM field */
out:
kfree(rs.buffer);
return ret;
eio:
ret = -EIO;
goto out;
}
#define RR_REGARD_XA 1
#define RR_RELOC_DE 2
static int
parse_rock_ridge_inode_internal(struct iso_directory_record *de,
struct inode *inode, int flags)
{
int symlink_len = 0;
int cnt, sig;
unsigned int reloc_block;
struct inode *reloc;
struct rock_ridge *rr;
int rootflag;
struct rock_state rs;
int ret = 0;
if (!ISOFS_SB(inode->i_sb)->s_rock)
return 0;
init_rock_state(&rs, inode);
setup_rock_ridge(de, inode, &rs);
if (flags & RR_REGARD_XA) {
rs.chr += 14;
rs.len -= 14;
if (rs.len < 0)
rs.len = 0;
}
repeat:
while (rs.len > 2) { /* There may be one byte for padding somewhere */
rr = (struct rock_ridge *)rs.chr;
/*
* Ignore rock ridge info if rr->len is out of range, but
* don't return -EIO because that would make the file
* invisible.
*/
if (rr->len < 3)
goto out; /* Something got screwed up here */
sig = isonum_721(rs.chr);
if (rock_check_overflow(&rs, sig))
goto eio;
rs.chr += rr->len;
rs.len -= rr->len;
/*
* As above, just ignore the rock ridge info if rr->len
* is bogus.
*/
if (rs.len < 0)
goto out; /* Something got screwed up here */
switch (sig) {
#ifndef CONFIG_ZISOFS /* No flag for SF or ZF */
case SIG('R', 'R'):
if ((rr->u.RR.flags[0] &
(RR_PX | RR_TF | RR_SL | RR_CL)) == 0)
goto out;
break;
#endif
case SIG('S', 'P'):
if (check_sp(rr, inode))
goto out;
break;
case SIG('C', 'E'):
rs.cont_extent = isonum_733(rr->u.CE.extent);
rs.cont_offset = isonum_733(rr->u.CE.offset);
rs.cont_size = isonum_733(rr->u.CE.size);
break;
case SIG('E', 'R'):
ISOFS_SB(inode->i_sb)->s_rock = 1;
printk(KERN_DEBUG "ISO 9660 Extensions: ");
{
int p;
for (p = 0; p < rr->u.ER.len_id; p++)
printk("%c", rr->u.ER.data[p]);
}
printk("\n");
break;
case SIG('P', 'X'):
inode->i_mode = isonum_733(rr->u.PX.mode);
set_nlink(inode, isonum_733(rr->u.PX.n_links));
i_uid_write(inode, isonum_733(rr->u.PX.uid));
i_gid_write(inode, isonum_733(rr->u.PX.gid));
break;
case SIG('P', 'N'):
{
int high, low;
high = isonum_733(rr->u.PN.dev_high);
low = isonum_733(rr->u.PN.dev_low);
/*
* The Rock Ridge standard specifies that if
* sizeof(dev_t) <= 4, then the high field is
* unused, and the device number is completely
* stored in the low field. Some writers may
* ignore this subtlety,
* and as a result we test to see if the entire
* device number is
* stored in the low field, and use that.
*/
if ((low & ~0xff) && high == 0) {
inode->i_rdev =
MKDEV(low >> 8, low & 0xff);
} else {
inode->i_rdev =
MKDEV(high, low);
}
}
break;
case SIG('T', 'F'):
/*
* Some RRIP writers incorrectly place ctime in the
* TF_CREATE field. Try to handle this correctly for
* either case.
*/
/* Rock ridge never appears on a High Sierra disk */
cnt = 0;
if (rr->u.TF.flags & TF_CREATE) {
inode->i_ctime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_ctime.tv_nsec = 0;
}
if (rr->u.TF.flags & TF_MODIFY) {
inode->i_mtime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_mtime.tv_nsec = 0;
}
if (rr->u.TF.flags & TF_ACCESS) {
inode->i_atime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_atime.tv_nsec = 0;
}
if (rr->u.TF.flags & TF_ATTRIBUTES) {
inode->i_ctime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_ctime.tv_nsec = 0;
}
break;
case SIG('S', 'L'):
{
int slen;
struct SL_component *slp;
struct SL_component *oldslp;
slen = rr->len - 5;
slp = &rr->u.SL.link;
inode->i_size = symlink_len;
while (slen > 1) {
rootflag = 0;
switch (slp->flags & ~1) {
case 0:
inode->i_size +=
slp->len;
break;
case 2:
inode->i_size += 1;
break;
case 4:
inode->i_size += 2;
break;
case 8:
rootflag = 1;
inode->i_size += 1;
break;
default:
printk("Symlink component flag "
"not implemented\n");
}
slen -= slp->len + 2;
oldslp = slp;
slp = (struct SL_component *)
(((char *)slp) + slp->len + 2);
if (slen < 2) {
if (((rr->u.SL.
flags & 1) != 0)
&&
((oldslp->
flags & 1) == 0))
inode->i_size +=
1;
break;
}
/*
* If this component record isn't
* continued, then append a '/'.
*/
if (!rootflag
&& (oldslp->flags & 1) == 0)
inode->i_size += 1;
}
}
symlink_len = inode->i_size;
break;
case SIG('R', 'E'):
printk(KERN_WARNING "Attempt to read inode for "
"relocated directory\n");
goto out;
case SIG('C', 'L'):
if (flags & RR_RELOC_DE) {
printk(KERN_ERR
"ISOFS: Recursive directory relocation "
"is not supported\n");
goto eio;
}
reloc_block = isonum_733(rr->u.CL.location);
if (reloc_block == ISOFS_I(inode)->i_iget5_block &&
ISOFS_I(inode)->i_iget5_offset == 0) {
printk(KERN_ERR
"ISOFS: Directory relocation points to "
"itself\n");
goto eio;
}
ISOFS_I(inode)->i_first_extent = reloc_block;
reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0);
if (IS_ERR(reloc)) {
ret = PTR_ERR(reloc);
goto out;
}
inode->i_mode = reloc->i_mode;
set_nlink(inode, reloc->i_nlink);
inode->i_uid = reloc->i_uid;
inode->i_gid = reloc->i_gid;
inode->i_rdev = reloc->i_rdev;
inode->i_size = reloc->i_size;
inode->i_blocks = reloc->i_blocks;
inode->i_atime = reloc->i_atime;
inode->i_ctime = reloc->i_ctime;
inode->i_mtime = reloc->i_mtime;
iput(reloc);
break;
#ifdef CONFIG_ZISOFS
case SIG('Z', 'F'): {
int algo;
if (ISOFS_SB(inode->i_sb)->s_nocompress)
break;
algo = isonum_721(rr->u.ZF.algorithm);
if (algo == SIG('p', 'z')) {
int block_shift =
isonum_711(&rr->u.ZF.parms[1]);
if (block_shift > 17) {
printk(KERN_WARNING "isofs: "
"Can't handle ZF block "
"size of 2^%d\n",
block_shift);
} else {
/*
* Note: we don't change
* i_blocks here
*/
ISOFS_I(inode)->i_file_format =
isofs_file_compressed;
/*
* Parameters to compression
* algorithm (header size,
* block size)
*/
ISOFS_I(inode)->i_format_parm[0] =
isonum_711(&rr->u.ZF.parms[0]);
ISOFS_I(inode)->i_format_parm[1] =
isonum_711(&rr->u.ZF.parms[1]);
inode->i_size =
isonum_733(rr->u.ZF.
real_size);
}
} else {
printk(KERN_WARNING
"isofs: Unknown ZF compression "
"algorithm: %c%c\n",
rr->u.ZF.algorithm[0],
rr->u.ZF.algorithm[1]);
}
break;
}
#endif
default:
break;
}
}
ret = rock_continue(&rs);
if (ret == 0)
goto repeat;
if (ret == 1)
ret = 0;
out:
kfree(rs.buffer);
return ret;
eio:
ret = -EIO;
goto out;
}
static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit)
{
int slen;
int rootflag;
struct SL_component *oldslp;
struct SL_component *slp;
slen = rr->len - 5;
slp = &rr->u.SL.link;
while (slen > 1) {
rootflag = 0;
switch (slp->flags & ~1) {
case 0:
if (slp->len > plimit - rpnt)
return NULL;
memcpy(rpnt, slp->text, slp->len);
rpnt += slp->len;
break;
case 2:
if (rpnt >= plimit)
return NULL;
*rpnt++ = '.';
break;
case 4:
if (2 > plimit - rpnt)
return NULL;
*rpnt++ = '.';
*rpnt++ = '.';
break;
case 8:
if (rpnt >= plimit)
return NULL;
rootflag = 1;
*rpnt++ = '/';
break;
default:
printk("Symlink component flag not implemented (%d)\n",
slp->flags);
}
slen -= slp->len + 2;
oldslp = slp;
slp = (struct SL_component *)((char *)slp + slp->len + 2);
if (slen < 2) {
/*
* If there is another SL record, and this component
* record isn't continued, then add a slash.
*/
if ((!rootflag) && (rr->u.SL.flags & 1) &&
!(oldslp->flags & 1)) {
if (rpnt >= plimit)
return NULL;
*rpnt++ = '/';
}
break;
}
/*
* If this component record isn't continued, then append a '/'.
*/
if (!rootflag && !(oldslp->flags & 1)) {
if (rpnt >= plimit)
return NULL;
*rpnt++ = '/';
}
}
return rpnt;
}
int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
int relocated)
{
int flags = relocated ? RR_RELOC_DE : 0;
int result = parse_rock_ridge_inode_internal(de, inode, flags);
/*
* if rockridge flag was reset and we didn't look for attributes
* behind eventual XA attributes, have a look there
*/
if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
&& (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
result = parse_rock_ridge_inode_internal(de, inode,
flags | RR_REGARD_XA);
}
return result;
}
/*
* readpage() for symlinks: reads symlink contents into the page and either
* makes it uptodate and returns 0 or returns error (-EIO)
*/
static int rock_ridge_symlink_readpage(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
struct iso_inode_info *ei = ISOFS_I(inode);
struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb);
char *link = kmap(page);
unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
struct buffer_head *bh;
char *rpnt = link;
unsigned char *pnt;
struct iso_directory_record *raw_de;
unsigned long block, offset;
int sig;
struct rock_ridge *rr;
struct rock_state rs;
int ret;
if (!sbi->s_rock)
goto error;
init_rock_state(&rs, inode);
block = ei->i_iget5_block;
bh = sb_bread(inode->i_sb, block);
if (!bh)
goto out_noread;
offset = ei->i_iget5_offset;
pnt = (unsigned char *)bh->b_data + offset;
raw_de = (struct iso_directory_record *)pnt;
/*
* If we go past the end of the buffer, there is some sort of error.
*/
if (offset + *pnt > bufsize)
goto out_bad_span;
/*
* Now test for possible Rock Ridge extensions which will override
* some of these numbers in the inode structure.
*/
setup_rock_ridge(raw_de, inode, &rs);
repeat:
while (rs.len > 2) { /* There may be one byte for padding somewhere */
rr = (struct rock_ridge *)rs.chr;
if (rr->len < 3)
goto out; /* Something got screwed up here */
sig = isonum_721(rs.chr);
if (rock_check_overflow(&rs, sig))
goto out;
rs.chr += rr->len;
rs.len -= rr->len;
if (rs.len < 0)
goto out; /* corrupted isofs */
switch (sig) {
case SIG('R', 'R'):
if ((rr->u.RR.flags[0] & RR_SL) == 0)
goto out;
break;
case SIG('S', 'P'):
if (check_sp(rr, inode))
goto out;
break;
case SIG('S', 'L'):
rpnt = get_symlink_chunk(rpnt, rr,
link + (PAGE_SIZE - 1));
if (rpnt == NULL)
goto out;
break;
case SIG('C', 'E'):
/* This tells is if there is a continuation record */
rs.cont_extent = isonum_733(rr->u.CE.extent);
rs.cont_offset = isonum_733(rr->u.CE.offset);
rs.cont_size = isonum_733(rr->u.CE.size);
default:
break;
}
}
ret = rock_continue(&rs);
if (ret == 0)
goto repeat;
if (ret < 0)
goto fail;
if (rpnt == link)
goto fail;
brelse(bh);
*rpnt = '\0';
SetPageUptodate(page);
kunmap(page);
unlock_page(page);
return 0;
/* error exit from macro */
out:
kfree(rs.buffer);
goto fail;
out_noread:
printk("unable to read i-node block");
goto fail;
out_bad_span:
printk("symlink spans iso9660 blocks\n");
fail:
brelse(bh);
error:
SetPageError(page);
kunmap(page);
unlock_page(page);
return -EIO;
}
const struct address_space_operations isofs_symlink_aops = {
.readpage = rock_ridge_symlink_readpage
};
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_2262_2 |
crossvul-cpp_data_good_5785_0 | /*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/mman.h>
#include <linux/sched.h>
#include <linux/kvm.h>
#include <trace/events/kvm.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/mman.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include <asm/virt.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_psci.h>
#ifdef REQUIRES_VIRT
__asm__(".arch_extension virt");
#endif
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
static unsigned long hyp_default_vectors;
/* Per-CPU variable containing the currently running vcpu. */
static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
/* The VMID used in the VTTBR */
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u8 kvm_next_vmid;
static DEFINE_SPINLOCK(kvm_vmid_lock);
static bool vgic_present;
static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
{
BUG_ON(preemptible());
__get_cpu_var(kvm_arm_running_vcpu) = vcpu;
}
/**
* kvm_arm_get_running_vcpu - get the vcpu running on the current CPU.
* Must be called from non-preemptible context
*/
struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
{
BUG_ON(preemptible());
return __get_cpu_var(kvm_arm_running_vcpu);
}
/**
* kvm_arm_get_running_vcpus - get the per-CPU array of currently running vcpus.
*/
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void)
{
return &kvm_arm_running_vcpu;
}
int kvm_arch_hardware_enable(void *garbage)
{
return 0;
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
}
void kvm_arch_hardware_disable(void *garbage)
{
}
int kvm_arch_hardware_setup(void)
{
return 0;
}
void kvm_arch_hardware_unsetup(void)
{
}
void kvm_arch_check_processor_compat(void *rtn)
{
*(int *)rtn = 0;
}
void kvm_arch_sync_events(struct kvm *kvm)
{
}
/**
* kvm_arch_init_vm - initializes a VM data structure
* @kvm: pointer to the KVM struct
*/
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
int ret = 0;
if (type)
return -EINVAL;
ret = kvm_alloc_stage2_pgd(kvm);
if (ret)
goto out_fail_alloc;
ret = create_hyp_mappings(kvm, kvm + 1);
if (ret)
goto out_free_stage2_pgd;
/* Mark the initial VMID generation invalid */
kvm->arch.vmid_gen = 0;
return ret;
out_free_stage2_pgd:
kvm_free_stage2_pgd(kvm);
out_fail_alloc:
return ret;
}
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
}
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
{
return 0;
}
/**
* kvm_arch_destroy_vm - destroy the VM data structure
* @kvm: pointer to the KVM struct
*/
void kvm_arch_destroy_vm(struct kvm *kvm)
{
int i;
kvm_free_stage2_pgd(kvm);
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm_arch_vcpu_free(kvm->vcpus[i]);
kvm->vcpus[i] = NULL;
}
}
}
int kvm_dev_ioctl_check_extension(long ext)
{
int r;
switch (ext) {
case KVM_CAP_IRQCHIP:
r = vgic_present;
break;
case KVM_CAP_USER_MEMORY:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_ONE_REG:
case KVM_CAP_ARM_PSCI:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
case KVM_CAP_ARM_SET_DEVICE_ADDR:
r = 1;
break;
case KVM_CAP_NR_VCPUS:
r = num_online_cpus();
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
default:
r = kvm_arch_dev_ioctl_check_extension(ext);
break;
}
return r;
}
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
return -EINVAL;
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem,
enum kvm_mr_change change)
{
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
const struct kvm_memory_slot *old,
enum kvm_mr_change change)
{
}
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
}
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
int err;
struct kvm_vcpu *vcpu;
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu) {
err = -ENOMEM;
goto out;
}
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
goto free_vcpu;
err = create_hyp_mappings(vcpu, vcpu + 1);
if (err)
goto vcpu_uninit;
return vcpu;
vcpu_uninit:
kvm_vcpu_uninit(vcpu);
free_vcpu:
kmem_cache_free(kvm_vcpu_cache, vcpu);
out:
return ERR_PTR(err);
}
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
return 0;
}
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
kvm_mmu_free_memory_caches(vcpu);
kvm_timer_vcpu_terminate(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
kvm_arch_vcpu_free(vcpu);
}
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return 0;
}
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
int ret;
/* Force users to call KVM_ARM_VCPU_INIT */
vcpu->arch.target = -1;
/* Set up VGIC */
ret = kvm_vgic_vcpu_init(vcpu);
if (ret)
return ret;
/* Set up the timer */
kvm_timer_vcpu_init(vcpu);
return 0;
}
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
/*
* Check whether this vcpu requires the cache to be flushed on
* this physical CPU. This is a consequence of doing dcache
* operations by set/way on this vcpu. We do it here to be in
* a non-preemptible section.
*/
if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
kvm_arm_set_running_vcpu(vcpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
kvm_arm_set_running_vcpu(NULL);
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
return -EINVAL;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
return -EINVAL;
}
/**
* kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
* @v: The VCPU pointer
*
* If the guest CPU is not waiting for interrupts or an interrupt line is
* asserted, the CPU is by definition runnable.
*/
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
return !!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v);
}
/* Just ensure a guest exit from a particular CPU */
static void exit_vm_noop(void *info)
{
}
void force_vm_exit(const cpumask_t *mask)
{
smp_call_function_many(mask, exit_vm_noop, NULL, true);
}
/**
* need_new_vmid_gen - check that the VMID is still valid
* @kvm: The VM's VMID to checkt
*
* return true if there is a new generation of VMIDs being used
*
* The hardware supports only 256 values with the value zero reserved for the
* host, so we check if an assigned value belongs to a previous generation,
* which which requires us to assign a new value. If we're the first to use a
* VMID for the new generation, we must flush necessary caches and TLBs on all
* CPUs.
*/
static bool need_new_vmid_gen(struct kvm *kvm)
{
return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
}
/**
* update_vttbr - Update the VTTBR with a valid VMID before the guest runs
* @kvm The guest that we are about to run
*
* Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the
* VM has a valid VMID, otherwise assigns a new one and flushes corresponding
* caches and TLBs.
*/
static void update_vttbr(struct kvm *kvm)
{
phys_addr_t pgd_phys;
u64 vmid;
if (!need_new_vmid_gen(kvm))
return;
spin_lock(&kvm_vmid_lock);
/*
* We need to re-check the vmid_gen here to ensure that if another vcpu
* already allocated a valid vmid for this vm, then this vcpu should
* use the same vmid.
*/
if (!need_new_vmid_gen(kvm)) {
spin_unlock(&kvm_vmid_lock);
return;
}
/* First user of a new VMID generation? */
if (unlikely(kvm_next_vmid == 0)) {
atomic64_inc(&kvm_vmid_gen);
kvm_next_vmid = 1;
/*
* On SMP we know no other CPUs can use this CPU's or each
* other's VMID after force_vm_exit returns since the
* kvm_vmid_lock blocks them from reentry to the guest.
*/
force_vm_exit(cpu_all_mask);
/*
* Now broadcast TLB + ICACHE invalidation over the inner
* shareable domain to make sure all data structures are
* clean.
*/
kvm_call_hyp(__kvm_flush_vm_context);
}
kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
kvm->arch.vmid = kvm_next_vmid;
kvm_next_vmid++;
/* update vttbr to be used with the new vmid */
pgd_phys = virt_to_phys(kvm->arch.pgd);
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK;
kvm->arch.vttbr |= vmid;
spin_unlock(&kvm_vmid_lock);
}
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
{
if (likely(vcpu->arch.has_run_once))
return 0;
vcpu->arch.has_run_once = true;
/*
* Initialize the VGIC before running a vcpu the first time on
* this VM.
*/
if (irqchip_in_kernel(vcpu->kvm) &&
unlikely(!vgic_initialized(vcpu->kvm))) {
int ret = kvm_vgic_init(vcpu->kvm);
if (ret)
return ret;
}
/*
* Handle the "start in power-off" case by calling into the
* PSCI code.
*/
if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) {
*vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF;
kvm_psci_call(vcpu);
}
return 0;
}
static void vcpu_pause(struct kvm_vcpu *vcpu)
{
wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
wait_event_interruptible(*wq, !vcpu->arch.pause);
}
static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
{
return vcpu->arch.target >= 0;
}
/**
* kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
* @vcpu: The VCPU pointer
* @run: The kvm_run structure pointer used for userspace state exchange
*
* This function is called through the VCPU_RUN ioctl called from user space. It
* will execute VM code in a loop until the time slice for the process is used
* or some emulation is needed from user space in which case the function will
* return with return value 0 and with the kvm_run structure filled in with the
* required data for the requested emulation.
*/
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
sigset_t sigsaved;
if (unlikely(!kvm_vcpu_initialized(vcpu)))
return -ENOEXEC;
ret = kvm_vcpu_first_run_init(vcpu);
if (ret)
return ret;
if (run->exit_reason == KVM_EXIT_MMIO) {
ret = kvm_handle_mmio_return(vcpu, vcpu->run);
if (ret)
return ret;
}
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
ret = 1;
run->exit_reason = KVM_EXIT_UNKNOWN;
while (ret > 0) {
/*
* Check conditions before entering the guest
*/
cond_resched();
update_vttbr(vcpu->kvm);
if (vcpu->arch.pause)
vcpu_pause(vcpu);
kvm_vgic_flush_hwstate(vcpu);
kvm_timer_flush_hwstate(vcpu);
local_irq_disable();
/*
* Re-check atomic conditions
*/
if (signal_pending(current)) {
ret = -EINTR;
run->exit_reason = KVM_EXIT_INTR;
}
if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
local_irq_enable();
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
continue;
}
/**************************************************************
* Enter the guest
*/
trace_kvm_entry(*vcpu_pc(vcpu));
kvm_guest_enter();
vcpu->mode = IN_GUEST_MODE;
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->arch.last_pcpu = smp_processor_id();
kvm_guest_exit();
trace_kvm_exit(*vcpu_pc(vcpu));
/*
* We may have taken a host interrupt in HYP mode (ie
* while executing the guest). This interrupt is still
* pending, as we haven't serviced it yet!
*
* We're now back in SVC mode, with interrupts
* disabled. Enabling the interrupts now will have
* the effect of taking the interrupt again, in SVC
* mode this time.
*/
local_irq_enable();
/*
* Back from guest
*************************************************************/
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
ret = handle_exit(vcpu, run, ret);
}
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
return ret;
}
static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
{
int bit_index;
bool set;
unsigned long *ptr;
if (number == KVM_ARM_IRQ_CPU_IRQ)
bit_index = __ffs(HCR_VI);
else /* KVM_ARM_IRQ_CPU_FIQ */
bit_index = __ffs(HCR_VF);
ptr = (unsigned long *)&vcpu->arch.irq_lines;
if (level)
set = test_and_set_bit(bit_index, ptr);
else
set = test_and_clear_bit(bit_index, ptr);
/*
* If we didn't change anything, no need to wake up or kick other CPUs
*/
if (set == level)
return 0;
/*
* The vcpu irq_lines field was updated, wake up sleeping VCPUs and
* trigger a world-switch round on the running physical CPU to set the
* virtual IRQ/FIQ fields in the HCR appropriately.
*/
kvm_vcpu_kick(vcpu);
return 0;
}
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
bool line_status)
{
u32 irq = irq_level->irq;
unsigned int irq_type, vcpu_idx, irq_num;
int nrcpus = atomic_read(&kvm->online_vcpus);
struct kvm_vcpu *vcpu = NULL;
bool level = irq_level->level;
irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
switch (irq_type) {
case KVM_ARM_IRQ_TYPE_CPU:
if (irqchip_in_kernel(kvm))
return -ENXIO;
if (vcpu_idx >= nrcpus)
return -EINVAL;
vcpu = kvm_get_vcpu(kvm, vcpu_idx);
if (!vcpu)
return -EINVAL;
if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
return -EINVAL;
return vcpu_interrupt_line(vcpu, irq_num, level);
case KVM_ARM_IRQ_TYPE_PPI:
if (!irqchip_in_kernel(kvm))
return -ENXIO;
if (vcpu_idx >= nrcpus)
return -EINVAL;
vcpu = kvm_get_vcpu(kvm, vcpu_idx);
if (!vcpu)
return -EINVAL;
if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
return -EINVAL;
return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level);
case KVM_ARM_IRQ_TYPE_SPI:
if (!irqchip_in_kernel(kvm))
return -ENXIO;
if (irq_num < VGIC_NR_PRIVATE_IRQS ||
irq_num > KVM_ARM_IRQ_GIC_MAX)
return -EINVAL;
return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
}
return -EINVAL;
}
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
switch (ioctl) {
case KVM_ARM_VCPU_INIT: {
struct kvm_vcpu_init init;
if (copy_from_user(&init, argp, sizeof(init)))
return -EFAULT;
return kvm_vcpu_set_target(vcpu, &init);
}
case KVM_SET_ONE_REG:
case KVM_GET_ONE_REG: {
struct kvm_one_reg reg;
if (unlikely(!kvm_vcpu_initialized(vcpu)))
return -ENOEXEC;
if (copy_from_user(®, argp, sizeof(reg)))
return -EFAULT;
if (ioctl == KVM_SET_ONE_REG)
return kvm_arm_set_reg(vcpu, ®);
else
return kvm_arm_get_reg(vcpu, ®);
}
case KVM_GET_REG_LIST: {
struct kvm_reg_list __user *user_list = argp;
struct kvm_reg_list reg_list;
unsigned n;
if (unlikely(!kvm_vcpu_initialized(vcpu)))
return -ENOEXEC;
if (copy_from_user(®_list, user_list, sizeof(reg_list)))
return -EFAULT;
n = reg_list.n;
reg_list.n = kvm_arm_num_regs(vcpu);
if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
return -EFAULT;
if (n < reg_list.n)
return -E2BIG;
return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
}
default:
return -EINVAL;
}
}
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
return -EINVAL;
}
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
struct kvm_arm_device_addr *dev_addr)
{
unsigned long dev_id, type;
dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
KVM_ARM_DEVICE_ID_SHIFT;
type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
KVM_ARM_DEVICE_TYPE_SHIFT;
switch (dev_id) {
case KVM_ARM_DEVICE_VGIC_V2:
if (!vgic_present)
return -ENXIO;
return kvm_vgic_set_addr(kvm, type, dev_addr->addr);
default:
return -ENODEV;
}
}
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg;
switch (ioctl) {
case KVM_CREATE_IRQCHIP: {
if (vgic_present)
return kvm_vgic_create(kvm);
else
return -ENXIO;
}
case KVM_ARM_SET_DEVICE_ADDR: {
struct kvm_arm_device_addr dev_addr;
if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
return -EFAULT;
return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
}
default:
return -EINVAL;
}
}
static void cpu_init_hyp_mode(void *dummy)
{
unsigned long long boot_pgd_ptr;
unsigned long long pgd_ptr;
unsigned long hyp_stack_ptr;
unsigned long stack_page;
unsigned long vector_ptr;
/* Switch from the HYP stub to our own HYP init vector */
__hyp_set_vectors(kvm_get_idmap_vector());
boot_pgd_ptr = (unsigned long long)kvm_mmu_get_boot_httbr();
pgd_ptr = (unsigned long long)kvm_mmu_get_httbr();
stack_page = __get_cpu_var(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
vector_ptr = (unsigned long)__kvm_hyp_vector;
__cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
}
static int hyp_init_cpu_notify(struct notifier_block *self,
unsigned long action, void *cpu)
{
switch (action) {
case CPU_STARTING:
case CPU_STARTING_FROZEN:
cpu_init_hyp_mode(NULL);
break;
}
return NOTIFY_OK;
}
static struct notifier_block hyp_init_cpu_nb = {
.notifier_call = hyp_init_cpu_notify,
};
/**
* Inits Hyp-mode on all online CPUs
*/
static int init_hyp_mode(void)
{
int cpu;
int err = 0;
/*
* Allocate Hyp PGD and setup Hyp identity mapping
*/
err = kvm_mmu_init();
if (err)
goto out_err;
/*
* It is probably enough to obtain the default on one
* CPU. It's unlikely to be different on the others.
*/
hyp_default_vectors = __hyp_get_vectors();
/*
* Allocate stack pages for Hypervisor-mode
*/
for_each_possible_cpu(cpu) {
unsigned long stack_page;
stack_page = __get_free_page(GFP_KERNEL);
if (!stack_page) {
err = -ENOMEM;
goto out_free_stack_pages;
}
per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
}
/*
* Map the Hyp-code called directly from the host
*/
err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
if (err) {
kvm_err("Cannot map world-switch code\n");
goto out_free_mappings;
}
/*
* Map the Hyp stack pages
*/
for_each_possible_cpu(cpu) {
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
if (err) {
kvm_err("Cannot map hyp stack\n");
goto out_free_mappings;
}
}
/*
* Map the host CPU structures
*/
kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
if (!kvm_host_cpu_state) {
err = -ENOMEM;
kvm_err("Cannot allocate host CPU state\n");
goto out_free_mappings;
}
for_each_possible_cpu(cpu) {
kvm_cpu_context_t *cpu_ctxt;
cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1);
if (err) {
kvm_err("Cannot map host CPU state: %d\n", err);
goto out_free_context;
}
}
/*
* Execute the init code on each CPU.
*/
on_each_cpu(cpu_init_hyp_mode, NULL, 1);
/*
* Init HYP view of VGIC
*/
err = kvm_vgic_hyp_init();
if (err)
goto out_free_context;
#ifdef CONFIG_KVM_ARM_VGIC
vgic_present = true;
#endif
/*
* Init HYP architected timer support
*/
err = kvm_timer_hyp_init();
if (err)
goto out_free_mappings;
#ifndef CONFIG_HOTPLUG_CPU
free_boot_hyp_pgd();
#endif
kvm_perf_init();
kvm_info("Hyp mode initialized successfully\n");
return 0;
out_free_context:
free_percpu(kvm_host_cpu_state);
out_free_mappings:
free_hyp_pgds();
out_free_stack_pages:
for_each_possible_cpu(cpu)
free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
out_err:
kvm_err("error initializing Hyp mode: %d\n", err);
return err;
}
static void check_kvm_target_cpu(void *ret)
{
*(int *)ret = kvm_target_cpu();
}
/**
* Initialize Hyp-mode and memory mappings on all CPUs.
*/
int kvm_arch_init(void *opaque)
{
int err;
int ret, cpu;
if (!is_hyp_mode_available()) {
kvm_err("HYP mode not available\n");
return -ENODEV;
}
for_each_online_cpu(cpu) {
smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
if (ret < 0) {
kvm_err("Error, CPU %d not supported!\n", cpu);
return -ENODEV;
}
}
err = init_hyp_mode();
if (err)
goto out_err;
err = register_cpu_notifier(&hyp_init_cpu_nb);
if (err) {
kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
goto out_err;
}
kvm_coproc_table_init();
return 0;
out_err:
return err;
}
/* NOP: Compiling as a module not supported */
void kvm_arch_exit(void)
{
kvm_perf_teardown();
}
static int arm_init(void)
{
int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
return rc;
}
module_init(arm_init);
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_5785_0 |
crossvul-cpp_data_bad_3719_0 | /*
* tsacct.c - System accounting over taskstats interface
*
* Copyright (C) Jay Lan, <jlan@sgi.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/tsacct_kern.h>
#include <linux/acct.h>
#include <linux/jiffies.h>
#define USEC_PER_TICK (USEC_PER_SEC/HZ)
/*
* fill in basic accounting fields
*/
void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
{
struct timespec uptime, ts;
s64 ac_etime;
BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN);
/* calculate task elapsed time in timespec */
do_posix_clock_monotonic_gettime(&uptime);
ts = timespec_sub(uptime, tsk->start_time);
/* rebase elapsed time to usec */
ac_etime = timespec_to_ns(&ts);
do_div(ac_etime, NSEC_PER_USEC);
stats->ac_etime = ac_etime;
stats->ac_btime = xtime.tv_sec - ts.tv_sec;
if (thread_group_leader(tsk)) {
stats->ac_exitcode = tsk->exit_code;
if (tsk->flags & PF_FORKNOEXEC)
stats->ac_flag |= AFORK;
}
if (tsk->flags & PF_SUPERPRIV)
stats->ac_flag |= ASU;
if (tsk->flags & PF_DUMPCORE)
stats->ac_flag |= ACORE;
if (tsk->flags & PF_SIGNALED)
stats->ac_flag |= AXSIG;
stats->ac_nice = task_nice(tsk);
stats->ac_sched = tsk->policy;
stats->ac_uid = tsk->uid;
stats->ac_gid = tsk->gid;
stats->ac_pid = tsk->pid;
rcu_read_lock();
stats->ac_ppid = pid_alive(tsk) ?
rcu_dereference(tsk->real_parent)->tgid : 0;
rcu_read_unlock();
stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC;
stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC;
stats->ac_minflt = tsk->min_flt;
stats->ac_majflt = tsk->maj_flt;
strncpy(stats->ac_comm, tsk->comm, sizeof(stats->ac_comm));
}
#ifdef CONFIG_TASK_XACCT
#define KB 1024
#define MB (1024*KB)
/*
* fill in extended accounting fields
*/
void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
{
/* convert pages-jiffies to Mbyte-usec */
stats->coremem = jiffies_to_usecs(p->acct_rss_mem1) * PAGE_SIZE / MB;
stats->virtmem = jiffies_to_usecs(p->acct_vm_mem1) * PAGE_SIZE / MB;
if (p->mm) {
/* adjust to KB unit */
stats->hiwater_rss = p->mm->hiwater_rss * PAGE_SIZE / KB;
stats->hiwater_vm = p->mm->hiwater_vm * PAGE_SIZE / KB;
}
stats->read_char = p->rchar;
stats->write_char = p->wchar;
stats->read_syscalls = p->syscr;
stats->write_syscalls = p->syscw;
}
#undef KB
#undef MB
/**
* acct_update_integrals - update mm integral fields in task_struct
* @tsk: task_struct for accounting
*/
void acct_update_integrals(struct task_struct *tsk)
{
if (likely(tsk->mm)) {
long delta = cputime_to_jiffies(
cputime_sub(tsk->stime, tsk->acct_stimexpd));
if (delta == 0)
return;
tsk->acct_stimexpd = tsk->stime;
tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm);
tsk->acct_vm_mem1 += delta * tsk->mm->total_vm;
}
}
/**
* acct_clear_integrals - clear the mm integral fields in task_struct
* @tsk: task_struct whose accounting fields are cleared
*/
void acct_clear_integrals(struct task_struct *tsk)
{
tsk->acct_stimexpd = 0;
tsk->acct_rss_mem1 = 0;
tsk->acct_vm_mem1 = 0;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3719_0 |
crossvul-cpp_data_good_944_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO GGGGG RRRR IIIII FFFFF Y Y %
% MM MM O O G R R I F Y Y %
% M M M O O G GGG RRRR I FFF Y %
% M M O O G G R R I F Y %
% M M OOO GGGG R R IIIII F Y %
% %
% %
% MagickWand Module Methods %
% %
% Software Design %
% Cristy %
% March 2000 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use the mogrify program to resize an image, blur, crop, despeckle, dither,
% draw on, flip, join, re-sample, and much more. This tool is similiar to
% convert except that the original image file is overwritten (unless you
% change the file suffix with the -format option) with any changes you
% request.
%
*/
/*
Include declarations.
*/
#include "MagickWand/studio.h"
#include "MagickWand/MagickWand.h"
#include "MagickWand/magick-wand-private.h"
#include "MagickWand/mogrify-private.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/image-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_HAVE_UTIME_H)
#include <utime.h>
#endif
/*
Constant declaration.
*/
static const char
MogrifyAlphaColor[] = "#bdbdbd", /* gray */
MogrifyBackgroundColor[] = "#ffffff", /* white */
MogrifyBorderColor[] = "#dfdfdf"; /* gray */
/*
Define declarations.
*/
#define UndefinedCompressionQuality 0UL
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C o m m a n d G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickCommandGenesis() applies image processing options to an image as
% prescribed by command line options.
%
% It wiil look for special options like "-debug", "-bench", and
% "-distribute-cache" that needs to be applied even before the main
% processing begins, and may completely overrule normal command processing.
% Such 'Genesis' Options can only be given on the CLI, (not in a script)
% and are typically ignored (as they have been handled) if seen later.
%
% The format of the MagickCommandGenesis method is:
%
% MagickBooleanType MagickCommandGenesis(ImageInfo *image_info,
% MagickCommand command,int argc,char **argv,char **metadata,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o command: Choose from ConvertImageCommand, IdentifyImageCommand,
% MogrifyImageCommand, CompositeImageCommand, CompareImagesCommand,
% ConjureImageCommand, StreamImageCommand, ImportImageCommand,
% DisplayImageCommand, or AnimateImageCommand.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o metadata: any metadata is returned here.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MagickCommandGenesis(ImageInfo *image_info,
MagickCommand command,int argc,char **argv,char **metadata,
ExceptionInfo *exception)
{
char
client_name[MaxTextExtent],
*option;
double
duration,
serial;
MagickBooleanType
concurrent,
regard_warnings,
status;
register ssize_t
i;
size_t
iterations,
number_threads;
ssize_t
n;
(void) setlocale(LC_ALL,"");
(void) setlocale(LC_NUMERIC,"C");
GetPathComponent(argv[0],TailPath,client_name);
(void) SetClientName(client_name);
concurrent=MagickFalse;
duration=(-1.0);
iterations=1;
status=MagickTrue;
regard_warnings=MagickFalse;
for (i=1; i < (ssize_t) (argc-1); i++)
{
option=argv[i];
if ((strlen(option) == 1) || ((*option != '-') && (*option != '+')))
continue;
if (LocaleCompare("-bench",option) == 0)
iterations=StringToUnsignedLong(argv[++i]);
if (LocaleCompare("-concurrent",option) == 0)
concurrent=MagickTrue;
if (LocaleCompare("-debug",option) == 0)
(void) SetLogEventMask(argv[++i]);
if (LocaleCompare("-distribute-cache",option) == 0)
{
DistributePixelCacheServer(StringToInteger(argv[++i]),exception);
exit(0);
}
if (LocaleCompare("-duration",option) == 0)
duration=StringToDouble(argv[++i],(char **) NULL);
if (LocaleCompare("-regard-warnings",option) == 0)
regard_warnings=MagickTrue;
}
if (iterations == 1)
{
char
*text;
text=(char *) NULL;
status=command(image_info,argc,argv,&text,exception);
if (exception->severity != UndefinedException)
{
if ((exception->severity > ErrorException) ||
(regard_warnings != MagickFalse))
status=MagickFalse;
CatchException(exception);
}
if (text != (char *) NULL)
{
if (metadata != (char **) NULL)
(void) ConcatenateString(&(*metadata),text);
text=DestroyString(text);
}
return(status);
}
number_threads=GetOpenMPMaximumThreads();
serial=0.0;
for (n=1; n <= (ssize_t) number_threads; n++)
{
double
e,
parallel,
user_time;
TimerInfo
*timer;
(void) SetMagickResourceLimit(ThreadResource,(MagickSizeType) n);
timer=AcquireTimerInfo();
if (concurrent == MagickFalse)
{
for (i=0; i < (ssize_t) iterations; i++)
{
char
*text;
text=(char *) NULL;
if (status == MagickFalse)
continue;
if (duration > 0)
{
if (GetElapsedTime(timer) > duration)
continue;
(void) ContinueTimer(timer);
}
status=command(image_info,argc,argv,&text,exception);
if (exception->severity != UndefinedException)
{
if ((exception->severity > ErrorException) ||
(regard_warnings != MagickFalse))
status=MagickFalse;
CatchException(exception);
}
if (text != (char *) NULL)
{
if (metadata != (char **) NULL)
(void) ConcatenateString(&(*metadata),text);
text=DestroyString(text);
}
}
}
else
{
SetOpenMPNested(1);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp parallel for shared(status)
#endif
for (i=0; i < (ssize_t) iterations; i++)
{
char
*text;
text=(char *) NULL;
if (status == MagickFalse)
continue;
if (duration > 0)
{
if (GetElapsedTime(timer) > duration)
continue;
(void) ContinueTimer(timer);
}
status=command(image_info,argc,argv,&text,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_MagickCommandGenesis)
#endif
{
if (exception->severity != UndefinedException)
{
if ((exception->severity > ErrorException) ||
(regard_warnings != MagickFalse))
status=MagickFalse;
CatchException(exception);
}
if (text != (char *) NULL)
{
if (metadata != (char **) NULL)
(void) ConcatenateString(&(*metadata),text);
text=DestroyString(text);
}
}
}
}
user_time=GetUserTime(timer);
parallel=GetElapsedTime(timer);
e=1.0;
if (n == 1)
serial=parallel;
else
e=((1.0/(1.0/((serial/(serial+parallel))+(1.0-(serial/(serial+parallel)))/
(double) n)))-(1.0/(double) n))/(1.0-1.0/(double) n);
(void) FormatLocaleFile(stderr,
" Performance[%.20g]: %.20gi %0.3fips %0.6fe %0.6fu %lu:%02lu.%03lu\n",
(double) n,(double) iterations,(double) iterations/parallel,e,user_time,
(unsigned long) (parallel/60.0),(unsigned long) floor(fmod(parallel,
60.0)),(unsigned long) (1000.0*(parallel-floor(parallel))+0.5));
timer=DestroyTimerInfo(timer);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImage() applies simple single image processing options to a single
% image that may be part of a large list, but also handles any 'region'
% image handling.
%
% The image in the list may be modified in three different ways...
%
% * directly modified (EG: -negate, -gamma, -level, -annotate, -draw),
% * replaced by a new image (EG: -spread, -resize, -rotate, -morphology)
% * replace by a list of images (only the -separate option!)
%
% In each case the result is returned into the list, and a pointer to the
% modified image (last image added if replaced by a list of images) is
% returned.
%
% ASIDE: The -crop is present but restricted to non-tile single image crops
%
% This means if all the images are being processed (such as by
% MogrifyImages(), next image to be processed will be as per the pointer
% (*image)->next. Also the image list may grow as a result of some specific
% operations but as images are never merged or deleted, it will never shrink
% in length. Typically the list will remain the same length.
%
% WARNING: As the image pointed to may be replaced, the first image in the
% list may also change. GetFirstImageInList() should be used by caller if
% they wish return the Image pointer to the first image in list.
%
%
% The format of the MogrifyImage method is:
%
% MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc,
% const char **argv,Image **image)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Image *GetImageCache(const ImageInfo *image_info,const char *path,
ExceptionInfo *exception)
{
char
key[MagickPathExtent];
ExceptionInfo
*sans_exception;
Image
*image;
ImageInfo
*read_info;
/*
Read an image into a image cache (for repeated usage) if not already in
cache. Then return the image that is in the cache.
*/
(void) FormatLocaleString(key,MagickPathExtent,"cache:%s",path);
sans_exception=AcquireExceptionInfo();
image=(Image *) GetImageRegistry(ImageRegistryType,key,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (image != (Image *) NULL)
return(image);
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(read_info->filename,path,MagickPathExtent);
image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
if (image != (Image *) NULL)
(void) SetImageRegistry(ImageRegistryType,key,image,exception);
return(image);
}
static inline MagickBooleanType IsPathWritable(const char *path)
{
if (IsPathAccessible(path) == MagickFalse)
return(MagickFalse);
if (access_utf8(path,W_OK) != 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType MonitorProgress(const char *text,
const MagickOffsetType offset,const MagickSizeType extent,
void *wand_unused(client_data))
{
char
message[MagickPathExtent],
tag[MagickPathExtent];
const char
*locale_message;
register char
*p;
magick_unreferenced(client_data);
if ((extent <= 1) || (offset < 0) || (offset >= (MagickOffsetType) extent))
return(MagickTrue);
if ((offset != (MagickOffsetType) (extent-1)) && ((offset % 50) != 0))
return(MagickTrue);
(void) CopyMagickString(tag,text,MagickPathExtent);
p=strrchr(tag,'/');
if (p != (char *) NULL)
*p='\0';
(void) FormatLocaleString(message,MagickPathExtent,"Monitor/%s",tag);
locale_message=GetLocaleMessage(message);
if (locale_message == message)
locale_message=tag;
if (p == (char *) NULL)
(void) FormatLocaleFile(stderr,"%s: %ld of %lu, %02ld%% complete\r",
locale_message,(long) offset,(unsigned long) extent,(long)
(100L*offset/(extent-1)));
else
(void) FormatLocaleFile(stderr,"%s[%s]: %ld of %lu, %02ld%% complete\r",
locale_message,p+1,(long) offset,(unsigned long) extent,(long)
(100L*offset/(extent-1)));
if (offset == (MagickOffsetType) (extent-1))
(void) FormatLocaleFile(stderr,"\n");
(void) fflush(stderr);
return(MagickTrue);
}
static Image *SparseColorOption(const Image *image,
const SparseColorMethod method,const char *arguments,
const MagickBooleanType color_from_image,ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p;
double
*sparse_arguments;
Image
*sparse_image;
PixelInfo
color;
MagickBooleanType
error;
register size_t
x;
size_t
number_arguments,
number_colors;
/*
SparseColorOption() parses the complex -sparse-color argument into an an
array of floating point values then calls SparseColorImage(). Argument is
a complex mix of floating-point pixel coodinates, and color specifications
(or direct floating point numbers). The number of floats needed to
represent a color varies depending on the current channel setting.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Limit channels according to image - and add up number of color channel.
*/
number_colors=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
number_colors++;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
number_colors++;
/*
Read string, to determine number of arguments needed,
*/
p=arguments;
x=0;
while( *p != '\0' )
{
GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == ',' ) continue;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
if ( color_from_image ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "'%s': %s", "sparse-color",
"Color arg given, when colors are coming from image");
return( (Image *) NULL);
}
x += number_colors; /* color argument */
}
else {
x++; /* floating point argument */
}
}
error=MagickTrue;
if ( color_from_image ) {
/* just the control points are being given */
error = ( x % 2 != 0 ) ? MagickTrue : MagickFalse;
number_arguments=(x/2)*(2+number_colors);
}
else {
/* control points and color values */
error = ( x % (2+number_colors) != 0 ) ? MagickTrue : MagickFalse;
number_arguments=x;
}
if ( error ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "'%s': %s", "sparse-color",
"Invalid number of Arguments");
return( (Image *) NULL);
}
/* Allocate and fill in the floating point arguments */
sparse_arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*sparse_arguments));
if (sparse_arguments == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),ResourceLimitError,
" MemoryAllocationFailed\n""%s","SparseColorOption");
return( (Image *) NULL);
}
(void) memset(sparse_arguments,0,number_arguments*
sizeof(*sparse_arguments));
p=arguments;
x=0;
while( *p != '\0' && x < number_arguments ) {
/* X coordinate */
token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "'%s': %s", "sparse-color",
"Color found, instead of X-coord");
error = MagickTrue;
break;
}
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
/* Y coordinate */
token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "'%s': %s", "sparse-color",
"Color found, instead of Y-coord");
error = MagickTrue;
break;
}
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
/* color values for this control point */
#if 0
if ( (color_from_image ) {
/* get color from image */
/* HOW??? */
}
else
#endif
{
/* color name or function given in string argument */
token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
/* Color string given */
(void) QueryColorCompliance(token,AllCompliance,&color,exception);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
sparse_arguments[x++] = QuantumScale*color.red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
sparse_arguments[x++] = QuantumScale*color.green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
sparse_arguments[x++] = QuantumScale*color.blue;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
sparse_arguments[x++] = QuantumScale*color.black;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
sparse_arguments[x++] = QuantumScale*color.alpha;
}
else {
/* Colors given as a set of floating point values - experimental */
/* NB: token contains the first floating point value to use! */
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
(image->alpha_trait != UndefinedPixelTrait))
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
}
}
}
if ( number_arguments != x && !error ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
" InvalidArgument","'%s': %s","sparse-color","Argument Parsing Error");
sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments);
return( (Image *) NULL);
}
if ( error )
return( (Image *) NULL);
/* Call the Interpolation function with the parsed arguments */
sparse_image=SparseColorImage(image,method,number_arguments,sparse_arguments,
exception);
sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments);
return( sparse_image );
}
WandExport MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc,
const char **argv,Image **image,ExceptionInfo *exception)
{
CompositeOperator
compose;
const char
*format,
*option;
double
attenuate;
DrawInfo
*draw_info;
GeometryInfo
geometry_info;
ImageInfo
*mogrify_info;
MagickStatusType
status;
PixelInfo
fill;
MagickStatusType
flags;
PixelInterpolateMethod
interpolate_method;
QuantizeInfo
*quantize_info;
RectangleInfo
geometry,
region_geometry;
register ssize_t
i;
/*
Initialize method variables.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (argc < 0)
return(MagickTrue);
mogrify_info=CloneImageInfo(image_info);
draw_info=CloneDrawInfo(mogrify_info,(DrawInfo *) NULL);
quantize_info=AcquireQuantizeInfo(mogrify_info);
SetGeometryInfo(&geometry_info);
GetPixelInfo(*image,&fill);
fill=(*image)->background_color;
attenuate=1.0;
compose=(*image)->compose;
interpolate_method=UndefinedInterpolatePixel;
format=GetImageOption(mogrify_info,"format");
SetGeometry(*image,®ion_geometry);
/*
Transmogrify the image.
*/
for (i=0; i < (ssize_t) argc; i++)
{
Image
*mogrify_image;
ssize_t
count;
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=MagickMax(ParseCommandOption(MagickCommandOptions,MagickFalse,option),
0L);
if ((i+count) >= (ssize_t) argc)
break;
status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception);
mogrify_image=(Image *) NULL;
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adaptive-blur",option+1) == 0)
{
/*
Adaptive blur image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=AdaptiveBlurImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("adaptive-resize",option+1) == 0)
{
/*
Adaptive resize image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=AdaptiveResizeImage(*image,geometry.width,
geometry.height,exception);
break;
}
if (LocaleCompare("adaptive-sharpen",option+1) == 0)
{
/*
Adaptive sharpen image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=AdaptiveSharpenImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("affine",option+1) == 0)
{
/*
Affine matrix.
*/
if (*option == '+')
{
GetAffineMatrix(&draw_info->affine);
break;
}
(void) ParseAffineGeometry(argv[i+1],&draw_info->affine,exception);
break;
}
if (LocaleCompare("alpha",option+1) == 0)
{
AlphaChannelOption
alpha_type;
(void) SyncImageSettings(mogrify_info,*image,exception);
alpha_type=(AlphaChannelOption) ParseCommandOption(
MagickAlphaChannelOptions,MagickFalse,argv[i+1]);
(void) SetImageAlphaChannel(*image,alpha_type,exception);
break;
}
if (LocaleCompare("annotate",option+1) == 0)
{
char
*text,
geometry_str[MagickPathExtent];
/*
Annotate image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
SetGeometryInfo(&geometry_info);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
text=InterpretImageProperties(mogrify_info,*image,argv[i+2],
exception);
if (text == (char *) NULL)
break;
(void) CloneString(&draw_info->text,text);
text=DestroyString(text);
(void) FormatLocaleString(geometry_str,MagickPathExtent,"%+f%+f",
geometry_info.xi,geometry_info.psi);
(void) CloneString(&draw_info->geometry,geometry_str);
draw_info->affine.sx=cos(DegreesToRadians(
fmod(geometry_info.rho,360.0)));
draw_info->affine.rx=sin(DegreesToRadians(
fmod(geometry_info.rho,360.0)));
draw_info->affine.ry=(-sin(DegreesToRadians(
fmod(geometry_info.sigma,360.0))));
draw_info->affine.sy=cos(DegreesToRadians(
fmod(geometry_info.sigma,360.0)));
(void) AnnotateImage(*image,draw_info,exception);
break;
}
if (LocaleCompare("antialias",option+1) == 0)
{
draw_info->stroke_antialias=(*option == '-') ? MagickTrue :
MagickFalse;
draw_info->text_antialias=(*option == '-') ? MagickTrue :
MagickFalse;
break;
}
if (LocaleCompare("attenuate",option+1) == 0)
{
if (*option == '+')
{
attenuate=1.0;
break;
}
attenuate=StringToDouble(argv[i+1],(char **) NULL);
break;
}
if (LocaleCompare("auto-gamma",option+1) == 0)
{
/*
Auto Adjust Gamma of image based on its mean
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) AutoGammaImage(*image,exception);
break;
}
if (LocaleCompare("auto-level",option+1) == 0)
{
/*
Perfectly Normalize (max/min stretch) the image
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) AutoLevelImage(*image,exception);
break;
}
if (LocaleCompare("auto-orient",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=AutoOrientImage(*image,(*image)->orientation,
exception);
break;
}
if (LocaleCompare("auto-threshold",option+1) == 0)
{
AutoThresholdMethod
method;
(void) SyncImageSettings(mogrify_info,*image,exception);
method=(AutoThresholdMethod) ParseCommandOption(
MagickAutoThresholdOptions,MagickFalse,argv[i+1]);
(void) AutoThresholdImage(*image,method,exception);
break;
}
break;
}
case 'b':
{
if (LocaleCompare("black-threshold",option+1) == 0)
{
/*
Black threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) BlackThresholdImage(*image,argv[i+1],exception);
break;
}
if (LocaleCompare("blue-shift",option+1) == 0)
{
/*
Blue shift image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
geometry_info.rho=1.5;
if (*option == '-')
flags=ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=BlueShiftImage(*image,geometry_info.rho,exception);
break;
}
if (LocaleCompare("blur",option+1) == 0)
{
/*
Gaussian blur image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.0;
mogrify_image=BlurImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("border",option+1) == 0)
{
/*
Surround image with a border of solid color.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=BorderImage(*image,&geometry,compose,exception);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
if (*option == '+')
{
(void) QueryColorCompliance(MogrifyBorderColor,AllCompliance,
&draw_info->border_color,exception);
break;
}
(void) QueryColorCompliance(argv[i+1],AllCompliance,
&draw_info->border_color,exception);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
(void) QueryColorCompliance(argv[i+1],AllCompliance,
&draw_info->undercolor,exception);
break;
}
if (LocaleCompare("brightness-contrast",option+1) == 0)
{
double
brightness,
contrast;
/*
Brightness / contrast image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
brightness=geometry_info.rho;
contrast=0.0;
if ((flags & SigmaValue) != 0)
contrast=geometry_info.sigma;
(void) BrightnessContrastImage(*image,brightness,contrast,
exception);
break;
}
break;
}
case 'c':
{
if (LocaleCompare("canny",option+1) == 0)
{
/*
Detect edges in the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.10;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.30;
if ((flags & PercentValue) != 0)
{
geometry_info.xi/=100.0;
geometry_info.psi/=100.0;
}
mogrify_image=CannyEdgeImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception);
break;
}
if (LocaleCompare("cdl",option+1) == 0)
{
char
*color_correction_collection;
/*
Color correct with a color decision list.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
color_correction_collection=FileToString(argv[i+1],~0UL,exception);
if (color_correction_collection == (char *) NULL)
break;
(void) ColorDecisionListImage(*image,color_correction_collection,
exception);
break;
}
if (LocaleCompare("channel",option+1) == 0)
{
ChannelType
channel;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
(void) SetPixelChannelMask(*image,DefaultChannels);
break;
}
channel=(ChannelType) ParseChannelOption(argv[i+1]);
(void) SetPixelChannelMask(*image,channel);
break;
}
if (LocaleCompare("charcoal",option+1) == 0)
{
/*
Charcoal image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=1.0;
mogrify_image=CharcoalImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("chop",option+1) == 0)
{
/*
Chop the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ChopImage(*image,&geometry,exception);
break;
}
if (LocaleCompare("clahe",option+1) == 0)
{
/*
Contrast limited adaptive histogram equalization.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
(void) CLAHEImage(*image,geometry.width,geometry.height,
(size_t) geometry.x,geometry_info.psi,exception);
break;
}
if (LocaleCompare("clip",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
(void) SetImageMask(*image,WritePixelMask,(Image *) NULL,
exception);
break;
}
(void) ClipImage(*image,exception);
break;
}
if (LocaleCompare("clip-mask",option+1) == 0)
{
Image
*clip_mask;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
/*
Remove a mask.
*/
(void) SetImageMask(*image,WritePixelMask,(Image *) NULL,
exception);
break;
}
/*
Set the image mask.
*/
clip_mask=GetImageCache(mogrify_info,argv[i+1],exception);
if (clip_mask == (Image *) NULL)
break;
(void) SetImageMask(*image,WritePixelMask,clip_mask,exception);
clip_mask=DestroyImage(clip_mask);
break;
}
if (LocaleCompare("clip-path",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ClipImagePath(*image,argv[i+1],*option == '-' ? MagickTrue :
MagickFalse,exception);
break;
}
if (LocaleCompare("colorize",option+1) == 0)
{
/*
Colorize the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=ColorizeImage(*image,argv[i+1],&fill,exception);
break;
}
if (LocaleCompare("color-matrix",option+1) == 0)
{
KernelInfo
*kernel;
(void) SyncImageSettings(mogrify_info,*image,exception);
kernel=AcquireKernelInfo(argv[i+1],exception);
if (kernel == (KernelInfo *) NULL)
break;
/* FUTURE: check on size of the matrix */
mogrify_image=ColorMatrixImage(*image,kernel,exception);
kernel=DestroyKernelInfo(kernel);
break;
}
if (LocaleCompare("colors",option+1) == 0)
{
/*
Reduce the number of colors in the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
quantize_info->number_colors=StringToUnsignedLong(argv[i+1]);
if (quantize_info->number_colors == 0)
break;
if (((*image)->storage_class == DirectClass) ||
(*image)->colors > quantize_info->number_colors)
(void) QuantizeImage(quantize_info,*image,exception);
else
(void) CompressImageColormap(*image,exception);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
ColorspaceType
colorspace;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
(void) TransformImageColorspace(*image,sRGBColorspace,
exception);
break;
}
colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
(void) TransformImageColorspace(*image,colorspace,exception);
break;
}
if (LocaleCompare("compose",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("connected-components",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=ConnectedComponentsImage(*image,(size_t)
StringToInteger(argv[i+1]),(CCObjectInfo **) NULL,exception);
break;
}
if (LocaleCompare("contrast",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ContrastImage(*image,(*option == '-') ? MagickTrue :
MagickFalse,exception);
break;
}
if (LocaleCompare("contrast-stretch",option+1) == 0)
{
double
black_point,
white_point;
/*
Contrast stretch image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
black_point=geometry_info.rho;
white_point=(flags & SigmaValue) != 0 ? geometry_info.sigma :
black_point;
if ((flags & PercentValue) != 0)
{
black_point*=(double) (*image)->columns*(*image)->rows/100.0;
white_point*=(double) (*image)->columns*(*image)->rows/100.0;
}
white_point=(double) (*image)->columns*(*image)->rows-
white_point;
(void) ContrastStretchImage(*image,black_point,white_point,
exception);
break;
}
if (LocaleCompare("convolve",option+1) == 0)
{
double
gamma;
KernelInfo
*kernel_info;
register ssize_t
j;
size_t
extent;
(void) SyncImageSettings(mogrify_info,*image,exception);
kernel_info=AcquireKernelInfo(argv[i+1],exception);
if (kernel_info == (KernelInfo *) NULL)
break;
extent=kernel_info->width*kernel_info->height;
gamma=0.0;
for (j=0; j < (ssize_t) extent; j++)
gamma+=kernel_info->values[j];
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
for (j=0; j < (ssize_t) extent; j++)
kernel_info->values[j]*=gamma;
mogrify_image=MorphologyImage(*image,CorrelateMorphology,1,
kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("crop",option+1) == 0)
{
/*
Crop a image to a smaller size
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=CropImageToTiles(*image,argv[i+1],exception);
break;
}
if (LocaleCompare("cycle",option+1) == 0)
{
/*
Cycle an image colormap.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) CycleColormapImage(*image,(ssize_t) StringToLong(argv[i+1]),
exception);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("decipher",option+1) == 0)
{
StringInfo
*passkey;
/*
Decipher pixels.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
passkey=FileToStringInfo(argv[i+1],~0UL,exception);
if (passkey != (StringInfo *) NULL)
{
(void) PasskeyDecipherImage(*image,passkey,exception);
passkey=DestroyStringInfo(passkey);
}
break;
}
if (LocaleCompare("density",option+1) == 0)
{
/*
Set image density.
*/
(void) CloneString(&draw_info->density,argv[i+1]);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
(void) SetImageDepth(*image,MAGICKCORE_QUANTUM_DEPTH,exception);
break;
}
(void) SetImageDepth(*image,StringToUnsignedLong(argv[i+1]),
exception);
break;
}
if (LocaleCompare("deskew",option+1) == 0)
{
double
threshold;
/*
Straighten the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
threshold=40.0*QuantumRange/100.0;
else
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
mogrify_image=DeskewImage(*image,threshold,exception);
break;
}
if (LocaleCompare("despeckle",option+1) == 0)
{
/*
Reduce the speckles within an image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=DespeckleImage(*image,exception);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
(void) CloneString(&draw_info->server_name,argv[i+1]);
break;
}
if (LocaleCompare("distort",option+1) == 0)
{
char
*args,
token[MagickPathExtent];
const char
*p;
DistortMethod
method;
double
*arguments;
register ssize_t
x;
size_t
number_arguments;
/*
Distort image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
method=(DistortMethod) ParseCommandOption(MagickDistortOptions,
MagickFalse,argv[i+1]);
if (method == ResizeDistortion)
{
double
resize_args[2];
/*
Special Case - Argument is actually a resize geometry!
Convert that to an appropriate distortion argument array.
*/
(void) ParseRegionGeometry(*image,argv[i+2],&geometry,
exception);
resize_args[0]=(double) geometry.width;
resize_args[1]=(double) geometry.height;
mogrify_image=DistortImage(*image,method,(size_t)2,
resize_args,MagickTrue,exception);
break;
}
args=InterpretImageProperties(mogrify_info,*image,argv[i+2],
exception);
if (args == (char *) NULL)
break;
p=(char *) args;
for (x=0; *p != '\0'; x++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
}
number_arguments=(size_t) x;
arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*arguments));
if (arguments == (double *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,
"MemoryAllocationFailed",(*image)->filename);
(void) memset(arguments,0,number_arguments*
sizeof(*arguments));
p=(char *) args;
for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arguments[x]=StringToDouble(token,(char **) NULL);
}
args=DestroyString(args);
mogrify_image=DistortImage(*image,method,number_arguments,arguments,
(*option == '+') ? MagickTrue : MagickFalse,exception);
arguments=(double *) RelinquishMagickMemory(arguments);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
quantize_info->dither_method=NoDitherMethod;
break;
}
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("draw",option+1) == 0)
{
/*
Draw image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) CloneString(&draw_info->primitive,argv[i+1]);
(void) DrawImage(*image,draw_info,exception);
break;
}
break;
}
case 'e':
{
if (LocaleCompare("edge",option+1) == 0)
{
/*
Enhance edges in the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=EdgeImage(*image,geometry_info.rho,exception);
break;
}
if (LocaleCompare("emboss",option+1) == 0)
{
/*
Emboss image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=EmbossImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("encipher",option+1) == 0)
{
StringInfo
*passkey;
/*
Encipher pixels.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
passkey=FileToStringInfo(argv[i+1],~0UL,exception);
if (passkey != (StringInfo *) NULL)
{
(void) PasskeyEncipherImage(*image,passkey,exception);
passkey=DestroyStringInfo(passkey);
}
break;
}
if (LocaleCompare("encoding",option+1) == 0)
{
(void) CloneString(&draw_info->encoding,argv[i+1]);
break;
}
if (LocaleCompare("enhance",option+1) == 0)
{
/*
Enhance image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=EnhanceImage(*image,exception);
break;
}
if (LocaleCompare("equalize",option+1) == 0)
{
/*
Equalize image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) EqualizeImage(*image,exception);
break;
}
if (LocaleCompare("evaluate",option+1) == 0)
{
double
constant;
MagickEvaluateOperator
op;
(void) SyncImageSettings(mogrify_info,*image,exception);
op=(MagickEvaluateOperator) ParseCommandOption(
MagickEvaluateOptions,MagickFalse,argv[i+1]);
constant=StringToDoubleInterval(argv[i+2],(double) QuantumRange+
1.0);
(void) EvaluateImage(*image,op,constant,exception);
break;
}
if (LocaleCompare("extent",option+1) == 0)
{
/*
Set the image extent.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
if (geometry.width == 0)
geometry.width=(*image)->columns;
if (geometry.height == 0)
geometry.height=(*image)->rows;
mogrify_image=ExtentImage(*image,&geometry,exception);
break;
}
break;
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
if (*option == '+')
{
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
break;
}
(void) CloneString(&draw_info->family,argv[i+1]);
break;
}
if (LocaleCompare("features",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageArtifact(*image,"identify:features");
break;
}
(void) SetImageArtifact(*image,"identify:features",argv[i+1]);
(void) SetImageArtifact(*image,"verbose","true");
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
ExceptionInfo
*sans;
PixelInfo
color;
GetPixelInfo(*image,&fill);
if (*option == '+')
{
(void) QueryColorCompliance("none",AllCompliance,&fill,
exception);
draw_info->fill=fill;
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
break;
}
sans=AcquireExceptionInfo();
status=QueryColorCompliance(argv[i+1],AllCompliance,&color,sans);
sans=DestroyExceptionInfo(sans);
if (status == MagickFalse)
draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1],
exception);
else
draw_info->fill=fill=color;
break;
}
if (LocaleCompare("flip",option+1) == 0)
{
/*
Flip image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=FlipImage(*image,exception);
break;
}
if (LocaleCompare("floodfill",option+1) == 0)
{
PixelInfo
target;
/*
Floodfill image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParsePageGeometry(*image,argv[i+1],&geometry,exception);
(void) QueryColorCompliance(argv[i+2],AllCompliance,&target,
exception);
(void) FloodfillPaintImage(*image,draw_info,&target,geometry.x,
geometry.y,*option == '-' ? MagickFalse : MagickTrue,exception);
break;
}
if (LocaleCompare("flop",option+1) == 0)
{
/*
Flop image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=FlopImage(*image,exception);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
if (*option == '+')
{
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
break;
}
(void) CloneString(&draw_info->font,argv[i+1]);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
format=argv[i+1];
break;
}
if (LocaleCompare("frame",option+1) == 0)
{
FrameInfo
frame_info;
/*
Surround image with an ornamental border.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
frame_info.width=geometry.width;
frame_info.height=geometry.height;
frame_info.outer_bevel=geometry.x;
frame_info.inner_bevel=geometry.y;
frame_info.x=(ssize_t) frame_info.width;
frame_info.y=(ssize_t) frame_info.height;
frame_info.width=(*image)->columns+2*frame_info.width;
frame_info.height=(*image)->rows+2*frame_info.height;
mogrify_image=FrameImage(*image,&frame_info,compose,exception);
break;
}
if (LocaleCompare("function",option+1) == 0)
{
char
*arguments,
token[MagickPathExtent];
const char
*p;
double
*parameters;
MagickFunction
function;
register ssize_t
x;
size_t
number_parameters;
/*
Function Modify Image Values
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
function=(MagickFunction) ParseCommandOption(MagickFunctionOptions,
MagickFalse,argv[i+1]);
arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2],
exception);
if (arguments == (char *) NULL)
break;
p=(char *) arguments;
for (x=0; *p != '\0'; x++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
}
number_parameters=(size_t) x;
parameters=(double *) AcquireQuantumMemory(number_parameters,
sizeof(*parameters));
if (parameters == (double *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,
"MemoryAllocationFailed",(*image)->filename);
(void) memset(parameters,0,number_parameters*
sizeof(*parameters));
p=(char *) arguments;
for (x=0; (x < (ssize_t) number_parameters) && (*p != '\0'); x++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
parameters[x]=StringToDouble(token,(char **) NULL);
}
arguments=DestroyString(arguments);
(void) FunctionImage(*image,function,number_parameters,parameters,
exception);
parameters=(double *) RelinquishMagickMemory(parameters);
break;
}
break;
}
case 'g':
{
if (LocaleCompare("gamma",option+1) == 0)
{
/*
Gamma image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
(*image)->gamma=StringToDouble(argv[i+1],(char **) NULL);
else
(void) GammaImage(*image,StringToDouble(argv[i+1],(char **) NULL),
exception);
break;
}
if ((LocaleCompare("gaussian-blur",option+1) == 0) ||
(LocaleCompare("gaussian",option+1) == 0))
{
/*
Gaussian blur image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=GaussianBlurImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("geometry",option+1) == 0)
{
/*
Record Image offset, Resize last image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
if ((*image)->geometry != (char *) NULL)
(*image)->geometry=DestroyString((*image)->geometry);
break;
}
flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
(void) CloneString(&(*image)->geometry,argv[i+1]);
else
mogrify_image=ResizeImage(*image,geometry.width,geometry.height,
(*image)->filter,exception);
break;
}
if (LocaleCompare("gravity",option+1) == 0)
{
if (*option == '+')
{
draw_info->gravity=UndefinedGravity;
break;
}
draw_info->gravity=(GravityType) ParseCommandOption(
MagickGravityOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("grayscale",option+1) == 0)
{
PixelIntensityMethod
method;
(void) SyncImageSettings(mogrify_info,*image,exception);
method=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,argv[i+1]);
(void) GrayscaleImage(*image,method,exception);
break;
}
break;
}
case 'h':
{
if (LocaleCompare("highlight-color",option+1) == 0)
{
(void) SetImageArtifact(*image,"compare:highlight-color",argv[i+1]);
break;
}
if (LocaleCompare("hough-lines",option+1) == 0)
{
/*
Detect edges in the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
if ((flags & XiValue) == 0)
geometry_info.xi=40;
mogrify_image=HoughLineImage(*image,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,(size_t) geometry_info.xi,exception);
break;
}
break;
}
case 'i':
{
if (LocaleCompare("identify",option+1) == 0)
{
char
*text;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (format == (char *) NULL)
{
(void) IdentifyImage(*image,stdout,mogrify_info->verbose,
exception);
break;
}
text=InterpretImageProperties(mogrify_info,*image,format,
exception);
if (text == (char *) NULL)
break;
(void) fputs(text,stdout);
text=DestroyString(text);
break;
}
if (LocaleCompare("implode",option+1) == 0)
{
/*
Implode image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=ImplodeImage(*image,geometry_info.rho,
interpolate_method,exception);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("0",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->interline_spacing=geometry_info.rho;
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
interpolate_method=(PixelInterpolateMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("0",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->interword_spacing=geometry_info.rho;
break;
}
if (LocaleCompare("interpolative-resize",option+1) == 0)
{
/*
Interpolative resize image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=InterpolativeResizeImage(*image,geometry.width,
geometry.height,interpolate_method,exception);
break;
}
break;
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("0",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->kerning=geometry_info.rho;
break;
}
if (LocaleCompare("kuwahara",option+1) == 0)
{
/*
Edge preserving blur.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho-0.5;
mogrify_image=KuwaharaImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("lat",option+1) == 0)
{
/*
Local adaptive threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
mogrify_image=AdaptiveThresholdImage(*image,(size_t)
geometry_info.rho,(size_t) geometry_info.sigma,(double)
geometry_info.xi,exception);
break;
}
if (LocaleCompare("level",option+1) == 0)
{
double
black_point,
gamma,
white_point;
/*
Parse levels.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
black_point=geometry_info.rho;
white_point=(double) QuantumRange;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
gamma=1.0;
if ((flags & XiValue) != 0)
gamma=geometry_info.xi;
if ((flags & PercentValue) != 0)
{
black_point*=(double) (QuantumRange/100.0);
white_point*=(double) (QuantumRange/100.0);
}
if ((flags & SigmaValue) == 0)
white_point=(double) QuantumRange-black_point;
if ((*option == '+') || ((flags & AspectValue) != 0))
(void) LevelizeImage(*image,black_point,white_point,gamma,
exception);
else
(void) LevelImage(*image,black_point,white_point,gamma,
exception);
break;
}
if (LocaleCompare("level-colors",option+1) == 0)
{
char
token[MagickPathExtent];
const char
*p;
PixelInfo
black_point,
white_point;
p=(const char *) argv[i+1];
GetNextToken(p,&p,MagickPathExtent,token); /* get black point color */
if ((isalpha((int) *token) != 0) || ((*token == '#') != 0))
(void) QueryColorCompliance(token,AllCompliance,
&black_point,exception);
else
(void) QueryColorCompliance("#000000",AllCompliance,
&black_point,exception);
if (isalpha((int) token[0]) || (token[0] == '#'))
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == '\0')
white_point=black_point; /* set everything to that color */
else
{
if ((isalpha((int) *token) == 0) && ((*token == '#') == 0))
GetNextToken(p,&p,MagickPathExtent,token); /* Get white point color. */
if ((isalpha((int) *token) != 0) || ((*token == '#') != 0))
(void) QueryColorCompliance(token,AllCompliance,
&white_point,exception);
else
(void) QueryColorCompliance("#ffffff",AllCompliance,
&white_point,exception);
}
(void) LevelImageColors(*image,&black_point,&white_point,
*option == '+' ? MagickTrue : MagickFalse,exception);
break;
}
if (LocaleCompare("linear-stretch",option+1) == 0)
{
double
black_point,
white_point;
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
black_point=geometry_info.rho;
white_point=(double) (*image)->columns*(*image)->rows;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
black_point*=(double) (*image)->columns*(*image)->rows/100.0;
white_point*=(double) (*image)->columns*(*image)->rows/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) (*image)->columns*(*image)->rows-
black_point;
(void) LinearStretchImage(*image,black_point,white_point,exception);
break;
}
if (LocaleCompare("liquid-rescale",option+1) == 0)
{
/*
Liquid rescale image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
if ((flags & XValue) == 0)
geometry.x=1;
if ((flags & YValue) == 0)
geometry.y=0;
mogrify_image=LiquidRescaleImage(*image,geometry.width,
geometry.height,1.0*geometry.x,1.0*geometry.y,exception);
break;
}
if (LocaleCompare("local-contrast",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & RhoValue) == 0)
geometry_info.rho=10;
if ((flags & SigmaValue) == 0)
geometry_info.sigma=12.5;
mogrify_image=LocalContrastImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("lowlight-color",option+1) == 0)
{
(void) SetImageArtifact(*image,"compare:lowlight-color",argv[i+1]);
break;
}
break;
}
case 'm':
{
if (LocaleCompare("magnify",option+1) == 0)
{
/*
Double image size.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=MagnifyImage(*image,exception);
break;
}
if (LocaleCompare("map",option+1) == 0)
{
Image
*remap_image;
/*
Transform image colors to match this set of colors.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
break;
remap_image=GetImageCache(mogrify_info,argv[i+1],exception);
if (remap_image == (Image *) NULL)
break;
(void) RemapImage(quantize_info,*image,remap_image,exception);
remap_image=DestroyImage(remap_image);
break;
}
if (LocaleCompare("mask",option+1) == 0)
{
Image
*mask;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
/*
Remove a mask.
*/
(void) SetImageMask(*image,WritePixelMask,(Image *) NULL,
exception);
break;
}
/*
Set the image mask.
*/
mask=GetImageCache(mogrify_info,argv[i+1],exception);
if (mask == (Image *) NULL)
break;
(void) SetImageMask(*image,WritePixelMask,mask,exception);
mask=DestroyImage(mask);
break;
}
if (LocaleCompare("matte",option+1) == 0)
{
(void) SetImageAlphaChannel(*image,(*option == '-') ?
SetAlphaChannel : DeactivateAlphaChannel,exception);
break;
}
if (LocaleCompare("mean-shift",option+1) == 0)
{
/*
Detect edges in the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
if ((flags & XiValue) == 0)
geometry_info.xi=0.10*QuantumRange;
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
mogrify_image=MeanShiftImage(*image,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,geometry_info.xi,exception);
break;
}
if (LocaleCompare("median",option+1) == 0)
{
/*
Median filter image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=StatisticImage(*image,MedianStatistic,(size_t)
geometry_info.rho,(size_t) geometry_info.sigma,exception);
break;
}
if (LocaleCompare("mode",option+1) == 0)
{
/*
Mode image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=StatisticImage(*image,ModeStatistic,(size_t)
geometry_info.rho,(size_t) geometry_info.sigma,exception);
break;
}
if (LocaleCompare("modulate",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ModulateImage(*image,argv[i+1],exception);
break;
}
if (LocaleCompare("moments",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageArtifact(*image,"identify:moments");
break;
}
(void) SetImageArtifact(*image,"identify:moments",argv[i+1]);
(void) SetImageArtifact(*image,"verbose","true");
break;
}
if (LocaleCompare("monitor",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageProgressMonitor(*image,
(MagickProgressMonitor) NULL,(void *) NULL);
break;
}
(void) SetImageProgressMonitor(*image,MonitorProgress,
(void *) NULL);
break;
}
if (LocaleCompare("monochrome",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) SetImageType(*image,BilevelType,exception);
break;
}
if (LocaleCompare("morphology",option+1) == 0)
{
char
token[MagickPathExtent];
const char
*p;
KernelInfo
*kernel;
MorphologyMethod
method;
ssize_t
iterations;
/*
Morphological Image Operation
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
p=argv[i+1];
GetNextToken(p,&p,MagickPathExtent,token);
method=(MorphologyMethod) ParseCommandOption(
MagickMorphologyOptions,MagickFalse,token);
iterations=1L;
GetNextToken(p,&p,MagickPathExtent,token);
if ((*p == ':') || (*p == ','))
GetNextToken(p,&p,MagickPathExtent,token);
if ((*p != '\0'))
iterations=(ssize_t) StringToLong(p);
kernel=AcquireKernelInfo(argv[i+2],exception);
if (kernel == (KernelInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnabletoParseKernel","morphology");
status=MagickFalse;
break;
}
mogrify_image=MorphologyImage(*image,method,iterations,kernel,
exception);
kernel=DestroyKernelInfo(kernel);
break;
}
if (LocaleCompare("motion-blur",option+1) == 0)
{
/*
Motion blur image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=MotionBlurImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,exception);
break;
}
break;
}
case 'n':
{
if (LocaleCompare("negate",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) NegateImage(*image,*option == '+' ? MagickTrue :
MagickFalse,exception);
break;
}
if (LocaleCompare("noise",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '-')
{
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=StatisticImage(*image,NonpeakStatistic,(size_t)
geometry_info.rho,(size_t) geometry_info.sigma,exception);
}
else
{
NoiseType
noise;
noise=(NoiseType) ParseCommandOption(MagickNoiseOptions,
MagickFalse,argv[i+1]);
mogrify_image=AddNoiseImage(*image,noise,attenuate,exception);
}
break;
}
if (LocaleCompare("normalize",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) NormalizeImage(*image,exception);
break;
}
break;
}
case 'o':
{
if (LocaleCompare("opaque",option+1) == 0)
{
PixelInfo
target;
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) QueryColorCompliance(argv[i+1],AllCompliance,&target,
exception);
(void) OpaquePaintImage(*image,&target,&fill,*option == '-' ?
MagickFalse : MagickTrue,exception);
break;
}
if (LocaleCompare("ordered-dither",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) OrderedDitherImage(*image,argv[i+1],exception);
break;
}
break;
}
case 'p':
{
if (LocaleCompare("paint",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=OilPaintImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("perceptible",option+1) == 0)
{
/*
Perceptible image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) PerceptibleImage(*image,StringToDouble(argv[i+1],
(char **) NULL),exception);
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("12",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->pointsize=geometry_info.rho;
break;
}
if (LocaleCompare("polaroid",option+1) == 0)
{
const char
*caption;
double
angle;
RandomInfo
*random_info;
/*
Simulate a Polaroid picture.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
random_info=AcquireRandomInfo();
angle=22.5*(GetPseudoRandomValue(random_info)-0.5);
random_info=DestroyRandomInfo(random_info);
if (*option == '-')
{
SetGeometryInfo(&geometry_info);
flags=ParseGeometry(argv[i+1],&geometry_info);
angle=geometry_info.rho;
}
caption=GetImageProperty(*image,"caption",exception);
mogrify_image=PolaroidImage(*image,draw_info,caption,angle,
interpolate_method,exception);
break;
}
if (LocaleCompare("posterize",option+1) == 0)
{
/*
Posterize image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) PosterizeImage(*image,StringToUnsignedLong(argv[i+1]),
quantize_info->dither_method,exception);
break;
}
if (LocaleCompare("preview",option+1) == 0)
{
PreviewType
preview_type;
/*
Preview image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
preview_type=UndefinedPreview;
else
preview_type=(PreviewType) ParseCommandOption(
MagickPreviewOptions,MagickFalse,argv[i+1]);
mogrify_image=PreviewImage(*image,preview_type,exception);
break;
}
if (LocaleCompare("profile",option+1) == 0)
{
const char
*name;
const StringInfo
*profile;
Image
*profile_image;
ImageInfo
*profile_info;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
/*
Remove a profile from the image.
*/
(void) ProfileImage(*image,argv[i+1],(const unsigned char *)
NULL,0,exception);
break;
}
/*
Associate a profile with the image.
*/
profile_info=CloneImageInfo(mogrify_info);
profile=GetImageProfile(*image,"iptc");
if (profile != (StringInfo *) NULL)
profile_info->profile=(void *) CloneStringInfo(profile);
profile_image=GetImageCache(profile_info,argv[i+1],exception);
profile_info=DestroyImageInfo(profile_info);
if (profile_image == (Image *) NULL)
{
StringInfo
*file_data;
profile_info=CloneImageInfo(mogrify_info);
(void) CopyMagickString(profile_info->filename,argv[i+1],
MagickPathExtent);
file_data=FileToStringInfo(profile_info->filename,~0UL,
exception);
if (file_data != (StringInfo *) NULL)
{
(void) SetImageInfo(profile_info,0,exception);
(void) ProfileImage(*image,profile_info->magick,
GetStringInfoDatum(file_data),
GetStringInfoLength(file_data),exception);
file_data=DestroyStringInfo(file_data);
}
profile_info=DestroyImageInfo(profile_info);
break;
}
ResetImageProfileIterator(profile_image);
name=GetNextImageProfile(profile_image);
while (name != (const char *) NULL)
{
profile=GetImageProfile(profile_image,name);
if (profile != (StringInfo *) NULL)
(void) ProfileImage(*image,name,GetStringInfoDatum(profile),
(size_t) GetStringInfoLength(profile),exception);
name=GetNextImageProfile(profile_image);
}
profile_image=DestroyImage(profile_image);
break;
}
break;
}
case 'q':
{
if (LocaleCompare("quantize",option+1) == 0)
{
if (*option == '+')
{
quantize_info->colorspace=UndefinedColorspace;
break;
}
quantize_info->colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("rotational-blur",option+1) == 0)
{
/*
Rotational blur image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=RotationalBlurImage(*image,geometry_info.rho,
exception);
break;
}
if (LocaleCompare("raise",option+1) == 0)
{
/*
Surround image with a raise of solid color.
*/
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
(void) RaiseImage(*image,&geometry,*option == '-' ? MagickTrue :
MagickFalse,exception);
break;
}
if (LocaleCompare("random-threshold",option+1) == 0)
{
/*
Random threshold image.
*/
double
min_threshold,
max_threshold;
(void) SyncImageSettings(mogrify_info,*image,exception);
min_threshold=0.0;
max_threshold=(double) QuantumRange;
flags=ParseGeometry(argv[i+1],&geometry_info);
min_threshold=geometry_info.rho;
max_threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
max_threshold=min_threshold;
if (strchr(argv[i+1],'%') != (char *) NULL)
{
max_threshold*=(double) (0.01*QuantumRange);
min_threshold*=(double) (0.01*QuantumRange);
}
(void) RandomThresholdImage(*image,min_threshold,max_threshold,
exception);
break;
}
if (LocaleCompare("range-threshold",option+1) == 0)
{
/*
Range threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
if ((flags & XiValue) == 0)
geometry_info.xi=geometry_info.sigma;
if ((flags & PsiValue) == 0)
geometry_info.psi=geometry_info.xi;
if (strchr(argv[i+1],'%') != (char *) NULL)
{
geometry_info.rho*=(double) (0.01*QuantumRange);
geometry_info.sigma*=(double) (0.01*QuantumRange);
geometry_info.xi*=(double) (0.01*QuantumRange);
geometry_info.psi*=(double) (0.01*QuantumRange);
}
(void) RangeThresholdImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception);
break;
}
if (LocaleCompare("read-mask",option+1) == 0)
{
Image
*mask;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
/*
Remove a mask.
*/
(void) SetImageMask(*image,ReadPixelMask,(Image *) NULL,
exception);
break;
}
/*
Set the image mask.
*/
mask=GetImageCache(mogrify_info,argv[i+1],exception);
if (mask == (Image *) NULL)
break;
(void) SetImageMask(*image,ReadPixelMask,mask,exception);
mask=DestroyImage(mask);
break;
}
if (LocaleCompare("region",option+1) == 0)
{
/*
Apply read mask as defined by a region geometry.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
(void) SetImageRegionMask(*image,WritePixelMask,
(const RectangleInfo *) NULL,exception);
break;
}
(void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
(void) SetImageRegionMask(*image,WritePixelMask,&geometry,
exception);
break;
}
if (LocaleCompare("render",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image,exception);
draw_info->render=(*option == '+') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("remap",option+1) == 0)
{
Image
*remap_image;
/*
Transform image colors to match this set of colors.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
break;
remap_image=GetImageCache(mogrify_info,argv[i+1],exception);
if (remap_image == (Image *) NULL)
break;
(void) RemapImage(quantize_info,*image,remap_image,exception);
remap_image=DestroyImage(remap_image);
break;
}
if (LocaleCompare("repage",option+1) == 0)
{
if (*option == '+')
{
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
break;
}
(void) ResetImagePage(*image,argv[i+1]);
break;
}
if (LocaleCompare("resample",option+1) == 0)
{
/*
Resample image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=ResampleImage(*image,geometry_info.rho,
geometry_info.sigma,(*image)->filter,exception);
break;
}
if (LocaleCompare("resize",option+1) == 0)
{
/*
Resize image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ResizeImage(*image,geometry.width,geometry.height,
(*image)->filter,exception);
break;
}
if (LocaleCompare("roll",option+1) == 0)
{
/*
Roll image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
if ((flags & PercentValue) != 0)
{
geometry.x*=(double) (*image)->columns/100.0;
geometry.y*=(double) (*image)->rows/100.0;
}
mogrify_image=RollImage(*image,geometry.x,geometry.y,exception);
break;
}
if (LocaleCompare("rotate",option+1) == 0)
{
char
*rotation;
/*
Check for conditional image rotation.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (strchr(argv[i+1],'>') != (char *) NULL)
if ((*image)->columns <= (*image)->rows)
break;
if (strchr(argv[i+1],'<') != (char *) NULL)
if ((*image)->columns >= (*image)->rows)
break;
/*
Rotate image.
*/
rotation=ConstantString(argv[i+1]);
(void) SubstituteString(&rotation,">","");
(void) SubstituteString(&rotation,"<","");
(void) ParseGeometry(rotation,&geometry_info);
rotation=DestroyString(rotation);
mogrify_image=RotateImage(*image,geometry_info.rho,exception);
break;
}
break;
}
case 's':
{
if (LocaleCompare("sample",option+1) == 0)
{
/*
Sample image with pixel replication.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=SampleImage(*image,geometry.width,geometry.height,
exception);
break;
}
if (LocaleCompare("scale",option+1) == 0)
{
/*
Resize image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ScaleImage(*image,geometry.width,geometry.height,
exception);
break;
}
if (LocaleCompare("selective-blur",option+1) == 0)
{
/*
Selectively blur pixels within a contrast threshold.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
mogrify_image=SelectiveBlurImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,exception);
break;
}
if (LocaleCompare("separate",option+1) == 0)
{
/*
Break channels into separate images.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=SeparateImages(*image,exception);
break;
}
if (LocaleCompare("sepia-tone",option+1) == 0)
{
double
threshold;
/*
Sepia-tone image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
mogrify_image=SepiaToneImage(*image,threshold,exception);
break;
}
if (LocaleCompare("segment",option+1) == 0)
{
/*
Segment image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
(void) SegmentImage(*image,(*image)->colorspace,
mogrify_info->verbose,geometry_info.rho,geometry_info.sigma,
exception);
break;
}
if (LocaleCompare("set",option+1) == 0)
{
char
*value;
/*
Set image option.
*/
if (*option == '+')
{
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
(void) DeleteImageRegistry(argv[i+1]+9);
else
if (LocaleNCompare(argv[i+1],"option:",7) == 0)
{
(void) DeleteImageOption(mogrify_info,argv[i+1]+7);
(void) DeleteImageArtifact(*image,argv[i+1]+7);
}
else
(void) DeleteImageProperty(*image,argv[i+1]);
break;
}
value=InterpretImageProperties(mogrify_info,*image,argv[i+2],
exception);
if (value == (char *) NULL)
break;
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
(void) SetImageRegistry(StringRegistryType,argv[i+1]+9,value,
exception);
else
if (LocaleNCompare(argv[i+1],"option:",7) == 0)
{
(void) SetImageOption(image_info,argv[i+1]+7,value);
(void) SetImageOption(mogrify_info,argv[i+1]+7,value);
(void) SetImageArtifact(*image,argv[i+1]+7,value);
}
else
(void) SetImageProperty(*image,argv[i+1],value,exception);
value=DestroyString(value);
break;
}
if (LocaleCompare("shade",option+1) == 0)
{
/*
Shade image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=ShadeImage(*image,(*option == '-') ? MagickTrue :
MagickFalse,geometry_info.rho,geometry_info.sigma,exception);
break;
}
if (LocaleCompare("shadow",option+1) == 0)
{
/*
Shadow image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=4.0;
if ((flags & PsiValue) == 0)
geometry_info.psi=4.0;
mogrify_image=ShadowImage(*image,geometry_info.rho,
geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5),
(ssize_t) ceil(geometry_info.psi-0.5),exception);
break;
}
if (LocaleCompare("sharpen",option+1) == 0)
{
/*
Sharpen image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.0;
mogrify_image=SharpenImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("shave",option+1) == 0)
{
/*
Shave the image edges.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ShaveImage(*image,&geometry,exception);
break;
}
if (LocaleCompare("shear",option+1) == 0)
{
/*
Shear image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=ShearImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("sigmoidal-contrast",option+1) == 0)
{
/*
Sigmoidal non-linearity contrast control.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=(double) QuantumRange/2.0;
if ((flags & PercentValue) != 0)
geometry_info.sigma=(double) QuantumRange*geometry_info.sigma/
100.0;
(void) SigmoidalContrastImage(*image,(*option == '-') ?
MagickTrue : MagickFalse,geometry_info.rho,geometry_info.sigma,
exception);
break;
}
if (LocaleCompare("sketch",option+1) == 0)
{
/*
Sketch image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=SketchImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,exception);
break;
}
if (LocaleCompare("solarize",option+1) == 0)
{
double
threshold;
(void) SyncImageSettings(mogrify_info,*image,exception);
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
(void) SolarizeImage(*image,threshold,exception);
break;
}
if (LocaleCompare("sparse-color",option+1) == 0)
{
SparseColorMethod
method;
char
*arguments;
/*
Sparse Color Interpolated Gradient
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
method=(SparseColorMethod) ParseCommandOption(
MagickSparseColorOptions,MagickFalse,argv[i+1]);
arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2],
exception);
if (arguments == (char *) NULL)
break;
mogrify_image=SparseColorOption(*image,method,arguments,
option[0] == '+' ? MagickTrue : MagickFalse,exception);
arguments=DestroyString(arguments);
break;
}
if (LocaleCompare("splice",option+1) == 0)
{
/*
Splice a solid color into the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=SpliceImage(*image,&geometry,exception);
break;
}
if (LocaleCompare("spread",option+1) == 0)
{
/*
Spread an image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=SpreadImage(*image,interpolate_method,
geometry_info.rho,exception);
break;
}
if (LocaleCompare("statistic",option+1) == 0)
{
StatisticType
type;
(void) SyncImageSettings(mogrify_info,*image,exception);
type=(StatisticType) ParseCommandOption(MagickStatisticOptions,
MagickFalse,argv[i+1]);
(void) ParseGeometry(argv[i+2],&geometry_info);
mogrify_image=StatisticImage(*image,type,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,exception);
break;
}
if (LocaleCompare("stretch",option+1) == 0)
{
if (*option == '+')
{
draw_info->stretch=UndefinedStretch;
break;
}
draw_info->stretch=(StretchType) ParseCommandOption(
MagickStretchOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("strip",option+1) == 0)
{
/*
Strip image of profiles and comments.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) StripImage(*image,exception);
break;
}
if (LocaleCompare("stroke",option+1) == 0)
{
ExceptionInfo
*sans;
PixelInfo
color;
if (*option == '+')
{
(void) QueryColorCompliance("none",AllCompliance,
&draw_info->stroke,exception);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(
draw_info->stroke_pattern);
break;
}
sans=AcquireExceptionInfo();
status=QueryColorCompliance(argv[i+1],AllCompliance,&color,sans);
sans=DestroyExceptionInfo(sans);
if (status == MagickFalse)
draw_info->stroke_pattern=GetImageCache(mogrify_info,argv[i+1],
exception);
else
draw_info->stroke=color;
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
draw_info->stroke_width=StringToDouble(argv[i+1],(char **) NULL);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
if (*option == '+')
{
draw_info->style=UndefinedStyle;
break;
}
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("swirl",option+1) == 0)
{
/*
Swirl image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=SwirlImage(*image,geometry_info.rho,
interpolate_method,exception);
break;
}
break;
}
case 't':
{
if (LocaleCompare("threshold",option+1) == 0)
{
double
threshold;
/*
Threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
threshold=(double) QuantumRange/2;
else
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
(void) BilevelImage(*image,threshold,exception);
break;
}
if (LocaleCompare("thumbnail",option+1) == 0)
{
/*
Thumbnail image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ThumbnailImage(*image,geometry.width,geometry.height,
exception);
break;
}
if (LocaleCompare("tile",option+1) == 0)
{
if (*option == '+')
{
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
break;
}
draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1],
exception);
break;
}
if (LocaleCompare("tint",option+1) == 0)
{
/*
Tint the image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=TintImage(*image,argv[i+1],&fill,exception);
break;
}
if (LocaleCompare("transform",option+1) == 0)
{
/*
Affine transform image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=AffineTransformImage(*image,&draw_info->affine,
exception);
break;
}
if (LocaleCompare("transparent",option+1) == 0)
{
PixelInfo
target;
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) QueryColorCompliance(argv[i+1],AllCompliance,&target,
exception);
(void) TransparentPaintImage(*image,&target,(Quantum)
TransparentAlpha,*option == '-' ? MagickFalse : MagickTrue,
exception);
break;
}
if (LocaleCompare("transpose",option+1) == 0)
{
/*
Transpose image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=TransposeImage(*image,exception);
break;
}
if (LocaleCompare("transverse",option+1) == 0)
{
/*
Transverse image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=TransverseImage(*image,exception);
break;
}
if (LocaleCompare("treedepth",option+1) == 0)
{
quantize_info->tree_depth=StringToUnsignedLong(argv[i+1]);
break;
}
if (LocaleCompare("trim",option+1) == 0)
{
/*
Trim image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=TrimImage(*image,exception);
break;
}
if (LocaleCompare("type",option+1) == 0)
{
ImageType
type;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
type=UndefinedType;
else
type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
argv[i+1]);
(*image)->type=UndefinedType;
(void) SetImageType(*image,type,exception);
break;
}
break;
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
(void) QueryColorCompliance(argv[i+1],AllCompliance,
&draw_info->undercolor,exception);
break;
}
if (LocaleCompare("unique",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageArtifact(*image,"identify:unique-colors");
break;
}
(void) SetImageArtifact(*image,"identify:unique-colors","true");
(void) SetImageArtifact(*image,"verbose","true");
break;
}
if (LocaleCompare("unique-colors",option+1) == 0)
{
/*
Unique image colors.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
mogrify_image=UniqueImageColors(*image,exception);
break;
}
if (LocaleCompare("unsharp",option+1) == 0)
{
/*
Unsharp mask image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=1.0;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.05;
mogrify_image=UnsharpMaskImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,geometry_info.psi,
exception);
break;
}
break;
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
(void) SetImageArtifact(*image,option+1,
*option == '+' ? "false" : "true");
break;
}
if (LocaleCompare("vignette",option+1) == 0)
{
/*
Vignette image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.1*(*image)->columns;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.1*(*image)->rows;
if ((flags & PercentValue) != 0)
{
geometry_info.xi*=(double) (*image)->columns/100.0;
geometry_info.psi*=(double) (*image)->rows/100.0;
}
mogrify_image=VignetteImage(*image,geometry_info.rho,
geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5),
(ssize_t) ceil(geometry_info.psi-0.5),exception);
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageVirtualPixelMethod(*image,
UndefinedVirtualPixelMethod,exception);
break;
}
(void) SetImageVirtualPixelMethod(*image,(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,
argv[i+1]),exception);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("wave",option+1) == 0)
{
/*
Wave image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=WaveImage(*image,geometry_info.rho,
geometry_info.sigma,interpolate_method,exception);
break;
}
if (LocaleCompare("wavelet-denoise",option+1) == 0)
{
/*
Wavelet denoise image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & PercentValue) != 0)
{
geometry_info.rho=QuantumRange*geometry_info.rho/100.0;
geometry_info.sigma=QuantumRange*geometry_info.sigma/100.0;
}
if ((flags & SigmaValue) == 0)
geometry_info.sigma=0.0;
mogrify_image=WaveletDenoiseImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("weight",option+1) == 0)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,
argv[i+1]);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(argv[i+1]);
draw_info->weight=(size_t) weight;
break;
}
if (LocaleCompare("white-threshold",option+1) == 0)
{
/*
White threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image,exception);
(void) WhiteThresholdImage(*image,argv[i+1],exception);
break;
}
if (LocaleCompare("write-mask",option+1) == 0)
{
Image
*mask;
(void) SyncImageSettings(mogrify_info,*image,exception);
if (*option == '+')
{
/*
Remove a mask.
*/
(void) SetImageMask(*image,WritePixelMask,(Image *) NULL,
exception);
break;
}
/*
Set the image mask.
*/
mask=GetImageCache(mogrify_info,argv[i+1],exception);
if (mask == (Image *) NULL)
break;
(void) SetImageMask(*image,WritePixelMask,mask,exception);
mask=DestroyImage(mask);
break;
}
break;
}
default:
break;
}
/*
Replace current image with any image that was generated
*/
if (mogrify_image != (Image *) NULL)
ReplaceImageInListReturnLast(image,mogrify_image);
i+=count;
}
/*
Free resources.
*/
quantize_info=DestroyQuantizeInfo(quantize_info);
draw_info=DestroyDrawInfo(draw_info);
mogrify_info=DestroyImageInfo(mogrify_info);
status=(MagickStatusType) (exception->severity < ErrorException ? 1 : 0);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e C o m m a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImageCommand() transforms an image or a sequence of images. These
% transforms include image scaling, image rotation, color reduction, and
% others. The transmogrified image overwrites the original image.
%
% The format of the MogrifyImageCommand method is:
%
% MagickBooleanType MogrifyImageCommand(ImageInfo *image_info,int argc,
% const char **argv,char **metadata,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o argc: the number of elements in the argument vector.
%
% o argv: A text array containing the command line arguments.
%
% o metadata: any metadata is returned here.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType MogrifyUsage(void)
{
static const char
channel_operators[] =
" -channel-fx expression\n"
" exchange, extract, or transfer one or more image channels\n"
" -separate separate an image channel into a grayscale image",
miscellaneous[] =
" -debug events display copious debugging information\n"
" -distribute-cache port\n"
" distributed pixel cache spanning one or more servers\n"
" -help print program options\n"
" -list type print a list of supported option arguments\n"
" -log format format of debugging information\n"
" -version print version information",
operators[] =
" -adaptive-blur geometry\n"
" adaptively blur pixels; decrease effect near edges\n"
" -adaptive-resize geometry\n"
" adaptively resize image using 'mesh' interpolation\n"
" -adaptive-sharpen geometry\n"
" adaptively sharpen pixels; increase effect near edges\n"
" -alpha option on, activate, off, deactivate, set, opaque, copy\n"
" transparent, extract, background, or shape\n"
" -annotate geometry text\n"
" annotate the image with text\n"
" -auto-gamma automagically adjust gamma level of image\n"
" -auto-level automagically adjust color levels of image\n"
" -auto-orient automagically orient (rotate) image\n"
" -auto-threshold method\n"
" automatically perform image thresholding\n"
" -bench iterations measure performance\n"
" -black-threshold value\n"
" force all pixels below the threshold into black\n"
" -blue-shift simulate a scene at nighttime in the moonlight\n"
" -blur geometry reduce image noise and reduce detail levels\n"
" -border geometry surround image with a border of color\n"
" -bordercolor color border color\n"
" -brightness-contrast geometry\n"
" improve brightness / contrast of the image\n"
" -canny geometry detect edges in the image\n"
" -cdl filename color correct with a color decision list\n"
" -channel mask set the image channel mask\n"
" -charcoal geometry simulate a charcoal drawing\n"
" -chop geometry remove pixels from the image interior\n"
" -clahe geometry contrast limited adaptive histogram equalization\n"
" -clamp keep pixel values in range (0-QuantumRange)\n"
" -clip clip along the first path from the 8BIM profile\n"
" -clip-mask filename associate a clip mask with the image\n"
" -clip-path id clip along a named path from the 8BIM profile\n"
" -colorize value colorize the image with the fill color\n"
" -color-matrix matrix apply color correction to the image\n"
" -connected-components connectivity\n"
" connected-components uniquely labeled\n"
" -contrast enhance or reduce the image contrast\n"
" -contrast-stretch geometry\n"
" improve contrast by 'stretching' the intensity range\n"
" -convolve coefficients\n"
" apply a convolution kernel to the image\n"
" -cycle amount cycle the image colormap\n"
" -decipher filename convert cipher pixels to plain pixels\n"
" -deskew threshold straighten an image\n"
" -despeckle reduce the speckles within an image\n"
" -distort method args\n"
" distort images according to given method ad args\n"
" -draw string annotate the image with a graphic primitive\n"
" -edge radius apply a filter to detect edges in the image\n"
" -encipher filename convert plain pixels to cipher pixels\n"
" -emboss radius emboss an image\n"
" -enhance apply a digital filter to enhance a noisy image\n"
" -equalize perform histogram equalization to an image\n"
" -evaluate operator value\n"
" evaluate an arithmetic, relational, or logical expression\n"
" -extent geometry set the image size\n"
" -extract geometry extract area from image\n"
" -fft implements the discrete Fourier transform (DFT)\n"
" -flip flip image vertically\n"
" -floodfill geometry color\n"
" floodfill the image with color\n"
" -flop flop image horizontally\n"
" -frame geometry surround image with an ornamental border\n"
" -function name parameters\n"
" apply function over image values\n"
" -gamma value level of gamma correction\n"
" -gaussian-blur geometry\n"
" reduce image noise and reduce detail levels\n"
" -geometry geometry preferred size or location of the image\n"
" -grayscale method convert image to grayscale\n"
" -hough-lines geometry\n"
" identify lines in the image\n"
" -identify identify the format and characteristics of the image\n"
" -ift implements the inverse discrete Fourier transform (DFT)\n"
" -implode amount implode image pixels about the center\n"
" -interpolative-resize geometry\n"
" resize image using interpolation\n"
" -kuwahara geometry edge preserving noise reduction filter\n"
" -lat geometry local adaptive thresholding\n"
" -level value adjust the level of image contrast\n"
" -level-colors color,color\n"
" level image with the given colors\n"
" -linear-stretch geometry\n"
" improve contrast by 'stretching with saturation'\n"
" -liquid-rescale geometry\n"
" rescale image with seam-carving\n"
" -local-contrast geometry\n"
" enhance local contrast\n"
" -magnify double the size of the image with pixel art scaling\n"
" -mean-shift geometry delineate arbitrarily shaped clusters in the image\n"
" -median geometry apply a median filter to the image\n"
" -mode geometry make each pixel the 'predominant color' of the\n"
" neighborhood\n"
" -modulate value vary the brightness, saturation, and hue\n"
" -monochrome transform image to black and white\n"
" -morphology method kernel\n"
" apply a morphology method to the image\n"
" -motion-blur geometry\n"
" simulate motion blur\n"
" -negate replace every pixel with its complementary color \n"
" -noise geometry add or reduce noise in an image\n"
" -normalize transform image to span the full range of colors\n"
" -opaque color change this color to the fill color\n"
" -ordered-dither NxN\n"
" add a noise pattern to the image with specific\n"
" amplitudes\n"
" -paint radius simulate an oil painting\n"
" -perceptible epsilon\n"
" pixel value less than |epsilon| become epsilon or\n"
" -epsilon\n"
" -polaroid angle simulate a Polaroid picture\n"
" -posterize levels reduce the image to a limited number of color levels\n"
" -profile filename add, delete, or apply an image profile\n"
" -quantize colorspace reduce colors in this colorspace\n"
" -raise value lighten/darken image edges to create a 3-D effect\n"
" -random-threshold low,high\n"
" random threshold the image\n"
" -range-threshold values\n"
" perform either hard or soft thresholding within some range of values in an image\n"
" -region geometry apply options to a portion of the image\n"
" -render render vector graphics\n"
" -repage geometry size and location of an image canvas\n"
" -resample geometry change the resolution of an image\n"
" -resize geometry resize the image\n"
" -roll geometry roll an image vertically or horizontally\n"
" -rotate degrees apply Paeth rotation to the image\n"
" -rotational-blur angle\n"
" rotational blur the image\n"
" -sample geometry scale image with pixel sampling\n"
" -scale geometry scale the image\n"
" -segment values segment an image\n"
" -selective-blur geometry\n"
" selectively blur pixels within a contrast threshold\n"
" -sepia-tone threshold\n"
" simulate a sepia-toned photo\n"
" -set property value set an image property\n"
" -shade degrees shade the image using a distant light source\n"
" -shadow geometry simulate an image shadow\n"
" -sharpen geometry sharpen the image\n"
" -shave geometry shave pixels from the image edges\n"
" -shear geometry slide one edge of the image along the X or Y axis\n"
" -sigmoidal-contrast geometry\n"
" increase the contrast without saturating highlights or\n"
" shadows\n"
" -sketch geometry simulate a pencil sketch\n"
" -solarize threshold negate all pixels above the threshold level\n"
" -sparse-color method args\n"
" fill in a image based on a few color points\n"
" -splice geometry splice the background color into the image\n"
" -spread radius displace image pixels by a random amount\n"
" -statistic type radius\n"
" replace each pixel with corresponding statistic from the neighborhood\n"
" -strip strip image of all profiles and comments\n"
" -swirl degrees swirl image pixels about the center\n"
" -threshold value threshold the image\n"
" -thumbnail geometry create a thumbnail of the image\n"
" -tile filename tile image when filling a graphic primitive\n"
" -tint value tint the image with the fill color\n"
" -transform affine transform image\n"
" -transparent color make this color transparent within the image\n"
" -transpose flip image vertically and rotate 90 degrees\n"
" -transverse flop image horizontally and rotate 270 degrees\n"
" -trim trim image edges\n"
" -type type image type\n"
" -unique-colors discard all but one of any pixel color\n"
" -unsharp geometry sharpen the image\n"
" -vignette geometry soften the edges of the image in vignette style\n"
" -wave geometry alter an image along a sine wave\n"
" -wavelet-denoise threshold\n"
" removes noise from the image using a wavelet transform\n"
" -white-threshold value\n"
" force all pixels above the threshold into white",
sequence_operators[] =
" -affinity filename transform image colors to match this set of colors\n"
" -append append an image sequence\n"
" -clut apply a color lookup table to the image\n"
" -coalesce merge a sequence of images\n"
" -combine combine a sequence of images\n"
" -compare mathematically and visually annotate the difference between an image and its reconstruction\n"
" -complex operator perform complex mathematics on an image sequence\n"
" -composite composite image\n"
" -copy geometry offset\n"
" copy pixels from one area of an image to another\n"
" -crop geometry cut out a rectangular region of the image\n"
" -deconstruct break down an image sequence into constituent parts\n"
" -evaluate-sequence operator\n"
" evaluate an arithmetic, relational, or logical expression\n"
" -flatten flatten a sequence of images\n"
" -fx expression apply mathematical expression to an image channel(s)\n"
" -hald-clut apply a Hald color lookup table to the image\n"
" -layers method optimize, merge, or compare image layers\n"
" -morph value morph an image sequence\n"
" -mosaic create a mosaic from an image sequence\n"
" -poly terms build a polynomial from the image sequence and the corresponding\n"
" terms (coefficients and degree pairs).\n"
" -print string interpret string and print to console\n"
" -process arguments process the image with a custom image filter\n"
" -smush geometry smush an image sequence together\n"
" -write filename write images to this file",
settings[] =
" -adjoin join images into a single multi-image file\n"
" -affine matrix affine transform matrix\n"
" -alpha option activate, deactivate, reset, or set the alpha channel\n"
" -antialias remove pixel-aliasing\n"
" -authenticate password\n"
" decipher image with this password\n"
" -attenuate value lessen (or intensify) when adding noise to an image\n"
" -background color background color\n"
" -bias value add bias when convolving an image\n"
" -black-point-compensation\n"
" use black point compensation\n"
" -blue-primary point chromaticity blue primary point\n"
" -bordercolor color border color\n"
" -caption string assign a caption to an image\n"
" -colors value preferred number of colors in the image\n"
" -colorspace type alternate image colorspace\n"
" -comment string annotate image with comment\n"
" -compose operator set image composite operator\n"
" -compress type type of pixel compression when writing the image\n"
" -define format:option=value\n"
" define one or more image format options\n"
" -delay value display the next image after pausing\n"
" -density geometry horizontal and vertical density of the image\n"
" -depth value image depth\n"
" -direction type render text right-to-left or left-to-right\n"
" -display server get image or font from this X server\n"
" -dispose method layer disposal method\n"
" -dither method apply error diffusion to image\n"
" -encoding type text encoding type\n"
" -endian type endianness (MSB or LSB) of the image\n"
" -family name render text with this font family\n"
" -features distance analyze image features (e.g. contrast, correlation)\n"
" -fill color color to use when filling a graphic primitive\n"
" -filter type use this filter when resizing an image\n"
" -font name render text with this font\n"
" -format \"string\" output formatted image characteristics\n"
" -fuzz distance colors within this distance are considered equal\n"
" -gravity type horizontal and vertical text placement\n"
" -green-primary point chromaticity green primary point\n"
" -intensity method method to generate an intensity value from a pixel\n"
" -intent type type of rendering intent when managing the image color\n"
" -interlace type type of image interlacing scheme\n"
" -interline-spacing value\n"
" set the space between two text lines\n"
" -interpolate method pixel color interpolation method\n"
" -interword-spacing value\n"
" set the space between two words\n"
" -kerning value set the space between two letters\n"
" -label string assign a label to an image\n"
" -limit type value pixel cache resource limit\n"
" -loop iterations add Netscape loop extension to your GIF animation\n"
" -matte store matte channel if the image has one\n"
" -mattecolor color frame color\n"
" -monitor monitor progress\n"
" -orient type image orientation\n"
" -page geometry size and location of an image canvas (setting)\n"
" -path path write images to this path on disk\n"
" -ping efficiently determine image attributes\n"
" -pointsize value font point size\n"
" -precision value maximum number of significant digits to print\n"
" -preview type image preview type\n"
" -quality value JPEG/MIFF/PNG compression level\n"
" -quiet suppress all warning messages\n"
" -read-mask filename associate a read mask with the image\n"
" -red-primary point chromaticity red primary point\n"
" -regard-warnings pay attention to warning messages\n"
" -remap filename transform image colors to match this set of colors\n"
" -respect-parentheses settings remain in effect until parenthesis boundary\n"
" -sampling-factor geometry\n"
" horizontal and vertical sampling factor\n"
" -scene value image scene number\n"
" -seed value seed a new sequence of pseudo-random numbers\n"
" -size geometry width and height of image\n"
" -stretch type render text with this font stretch\n"
" -stroke color graphic primitive stroke color\n"
" -strokewidth value graphic primitive stroke width\n"
" -style type render text with this font style\n"
" -synchronize synchronize image to storage device\n"
" -taint declare the image as modified\n"
" -texture filename name of texture to tile onto the image background\n"
" -tile-offset geometry\n"
" tile offset\n"
" -treedepth value color tree depth\n"
" -transparent-color color\n"
" transparent color\n"
" -undercolor color annotation bounding box color\n"
" -units type the units of image resolution\n"
" -verbose print detailed information about the image\n"
" -view FlashPix viewing transforms\n"
" -virtual-pixel method\n"
" virtual pixel access method\n"
" -weight type render text with this font weight\n"
" -white-point point chromaticity white point\n"
" -write-mask filename associate a write mask with the image",
stack_operators[] =
" -delete indexes delete the image from the image sequence\n"
" -duplicate count,indexes\n"
" duplicate an image one or more times\n"
" -insert index insert last image into the image sequence\n"
" -reverse reverse image sequence\n"
" -swap indexes swap two images in the image sequence";
ListMagickVersion(stdout);
(void) printf("Usage: %s [options ...] file [ [options ...] file ...]\n",
GetClientName());
(void) printf("\nImage Settings:\n");
(void) puts(settings);
(void) printf("\nImage Operators:\n");
(void) puts(operators);
(void) printf("\nImage Channel Operators:\n");
(void) puts(channel_operators);
(void) printf("\nImage Sequence Operators:\n");
(void) puts(sequence_operators);
(void) printf("\nImage Stack Operators:\n");
(void) puts(stack_operators);
(void) printf("\nMiscellaneous Options:\n");
(void) puts(miscellaneous);
(void) printf(
"\nBy default, the image format of 'file' is determined by its magic\n");
(void) printf(
"number. To specify a particular image format, precede the filename\n");
(void) printf(
"with an image format name and a colon (i.e. ps:image) or specify the\n");
(void) printf(
"image type as the filename suffix (i.e. image.ps). Specify 'file' as\n");
(void) printf("'-' for standard input or output.\n");
return(MagickFalse);
}
WandExport MagickBooleanType MogrifyImageCommand(ImageInfo *image_info,
int argc,char **argv,char **wand_unused(metadata),ExceptionInfo *exception)
{
#define DestroyMogrify() \
{ \
if (format != (char *) NULL) \
format=DestroyString(format); \
if (path != (char *) NULL) \
path=DestroyString(path); \
DestroyImageStack(); \
for (i=0; i < (ssize_t) argc; i++) \
argv[i]=DestroyString(argv[i]); \
argv=(char **) RelinquishMagickMemory(argv); \
}
#define ThrowMogrifyException(asperity,tag,option) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),asperity,tag,"`%s'", \
option); \
DestroyMogrify(); \
return(MagickFalse); \
}
#define ThrowMogrifyInvalidArgumentException(option,argument) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),OptionError, \
"InvalidArgument","'%s': %s",argument,option); \
DestroyMogrify(); \
return(MagickFalse); \
}
char
*format,
*option,
*path;
Image
*image;
ImageStack
image_stack[MaxImageStackDepth+1];
MagickBooleanType
global_colormap;
MagickBooleanType
fire,
pend,
respect_parenthesis;
MagickStatusType
status;
register ssize_t
i;
ssize_t
j,
k;
wand_unreferenced(metadata);
/*
Set defaults.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(exception != (ExceptionInfo *) NULL);
if (argc == 2)
{
option=argv[1];
if ((LocaleCompare("version",option+1) == 0) ||
(LocaleCompare("-version",option+1) == 0))
{
ListMagickVersion(stdout);
return(MagickTrue);
}
}
if (argc < 2)
return(MogrifyUsage());
format=(char *) NULL;
path=(char *) NULL;
global_colormap=MagickFalse;
k=0;
j=1;
NewImageStack();
option=(char *) NULL;
pend=MagickFalse;
respect_parenthesis=MagickFalse;
status=MagickTrue;
/*
Parse command line.
*/
ReadCommandlLine(argc,&argv);
status=ExpandFilenames(&argc,&argv);
if (status == MagickFalse)
ThrowMogrifyException(ResourceLimitError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
for (i=1; i < (ssize_t) argc; i++)
{
option=argv[i];
if (LocaleCompare(option,"(") == 0)
{
FireImageStack(MagickFalse,MagickTrue,pend);
if (k == MaxImageStackDepth)
ThrowMogrifyException(OptionError,"ParenthesisNestedTooDeeply",
option);
PushImageStack();
continue;
}
if (LocaleCompare(option,")") == 0)
{
FireImageStack(MagickFalse,MagickTrue,MagickTrue);
if (k == 0)
ThrowMogrifyException(OptionError,"UnableToParseExpression",option);
PopImageStack();
continue;
}
if (IsCommandOption(option) == MagickFalse)
{
char
backup_filename[MagickPathExtent],
*filename;
Image
*images;
struct stat
properties;
/*
Option is a file name: begin by reading image from specified file.
*/
FireImageStack(MagickFalse,MagickFalse,pend);
filename=argv[i];
if ((LocaleCompare(filename,"--") == 0) && (i < (ssize_t) (argc-1)))
filename=argv[++i];
images=ReadImages(image_info,filename,exception);
status&=(images != (Image *) NULL) &&
(exception->severity < ErrorException);
if (images == (Image *) NULL)
continue;
properties=(*GetBlobProperties(images));
if (format != (char *) NULL)
(void) CopyMagickString(images->filename,images->magick_filename,
MagickPathExtent);
if (path != (char *) NULL)
{
GetPathComponent(option,TailPath,filename);
(void) FormatLocaleString(images->filename,MagickPathExtent,
"%s%c%s",path,*DirectorySeparator,filename);
}
if (format != (char *) NULL)
AppendImageFormat(format,images->filename);
AppendImageStack(images);
FinalizeImageSettings(image_info,image,MagickFalse);
if (global_colormap != MagickFalse)
{
QuantizeInfo
*quantize_info;
quantize_info=AcquireQuantizeInfo(image_info);
(void) RemapImages(quantize_info,images,(Image *) NULL,exception);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
*backup_filename='\0';
if ((LocaleCompare(image->filename,"-") != 0) &&
(IsPathWritable(image->filename) != MagickFalse))
{
/*
Rename image file as backup.
*/
(void) CopyMagickString(backup_filename,image->filename,
MagickPathExtent);
for (j=0; j < 6; j++)
{
(void) ConcatenateMagickString(backup_filename,"~",
MagickPathExtent);
if (IsPathAccessible(backup_filename) == MagickFalse)
break;
}
if ((IsPathAccessible(backup_filename) != MagickFalse) ||
(rename_utf8(image->filename,backup_filename) != 0))
*backup_filename='\0';
}
/*
Write transmogrified image to disk.
*/
image_info->synchronize=MagickTrue;
status&=WriteImages(image_info,image,image->filename,exception);
if (status != MagickFalse)
{
#if defined(MAGICKCORE_HAVE_UTIME)
{
MagickBooleanType
preserve_timestamp;
preserve_timestamp=IsStringTrue(GetImageOption(image_info,
"preserve-timestamp"));
if (preserve_timestamp != MagickFalse)
{
struct utimbuf
timestamp;
timestamp.actime=properties.st_atime;
timestamp.modtime=properties.st_mtime;
(void) utime(image->filename,×tamp);
}
}
#endif
if (*backup_filename != '\0')
(void) remove_utf8(backup_filename);
}
RemoveAllImageStack();
continue;
}
pend=image != (Image *) NULL ? MagickTrue : MagickFalse;
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adaptive-blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("adaptive-resize",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("adaptive-sharpen",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("affine",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("alpha",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickAlphaChannelOptions,MagickFalse,
argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,
"UnrecognizedAlphaChannelOption",argv[i]);
break;
}
if (LocaleCompare("annotate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
i++;
break;
}
if (LocaleCompare("antialias",option+1) == 0)
break;
if (LocaleCompare("append",option+1) == 0)
break;
if (LocaleCompare("attenuate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("authenticate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("auto-gamma",option+1) == 0)
break;
if (LocaleCompare("auto-level",option+1) == 0)
break;
if (LocaleCompare("auto-orient",option+1) == 0)
break;
if (LocaleCompare("auto-threshold",option+1) == 0)
{
ssize_t
method;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
method=ParseCommandOption(MagickAutoThresholdOptions,MagickFalse,
argv[i]);
if (method < 0)
ThrowMogrifyException(OptionError,"UnrecognizedThresholdMethod",
argv[i]);
break;
}
if (LocaleCompare("average",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'b':
{
if (LocaleCompare("background",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("bias",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("black-point-compensation",option+1) == 0)
break;
if (LocaleCompare("black-threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("blue-primary",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("blue-shift",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("border",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("brightness-contrast",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'c':
{
if (LocaleCompare("cache",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("canny",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("caption",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("channel",option+1) == 0)
{
ssize_t
channel;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
channel=ParseChannelOption(argv[i]);
if (channel < 0)
ThrowMogrifyException(OptionError,"UnrecognizedChannelType",
argv[i]);
break;
}
if (LocaleCompare("channel-fx",option+1) == 0)
{
ssize_t
channel;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
channel=ParsePixelChannelOption(argv[i]);
if (channel < 0)
ThrowMogrifyException(OptionError,"UnrecognizedChannelType",
argv[i]);
break;
}
if (LocaleCompare("cdl",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("charcoal",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("chop",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("clahe",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("clamp",option+1) == 0)
break;
if (LocaleCompare("clip",option+1) == 0)
break;
if (LocaleCompare("clip-mask",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("clut",option+1) == 0)
break;
if (LocaleCompare("coalesce",option+1) == 0)
break;
if (LocaleCompare("colorize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("color-matrix",option+1) == 0)
{
KernelInfo
*kernel_info;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
kernel_info=AcquireKernelInfo(argv[i],exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("colors",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
ssize_t
colorspace;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,
argv[i]);
if (colorspace < 0)
ThrowMogrifyException(OptionError,"UnrecognizedColorspace",
argv[i]);
break;
}
if (LocaleCompare("combine",option+1) == 0)
{
ssize_t
colorspace;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,
argv[i]);
if (colorspace < 0)
ThrowMogrifyException(OptionError,"UnrecognizedColorspace",
argv[i]);
break;
}
if (LocaleCompare("compare",option+1) == 0)
break;
if (LocaleCompare("comment",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("composite",option+1) == 0)
break;
if (LocaleCompare("compress",option+1) == 0)
{
ssize_t
compress;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
compress=ParseCommandOption(MagickCompressOptions,MagickFalse,
argv[i]);
if (compress < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageCompression",
argv[i]);
break;
}
if (LocaleCompare("concurrent",option+1) == 0)
break;
if (LocaleCompare("connected-components",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("contrast",option+1) == 0)
break;
if (LocaleCompare("contrast-stretch",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("convolve",option+1) == 0)
{
KernelInfo
*kernel_info;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
kernel_info=AcquireKernelInfo(argv[i],exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("copy",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("crop",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("cycle",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'd':
{
if (LocaleCompare("decipher",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("deconstruct",option+1) == 0)
break;
if (LocaleCompare("debug",option+1) == 0)
{
ssize_t
event;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
event=ParseCommandOption(MagickLogEventOptions,MagickFalse,argv[i]);
if (event < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEventType",
argv[i]);
(void) SetLogEventMask(argv[i]);
break;
}
if (LocaleCompare("define",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (*option == '+')
{
const char
*define;
define=GetImageOption(image_info,argv[i]);
if (define == (const char *) NULL)
ThrowMogrifyException(OptionError,"NoSuchOption",argv[i]);
break;
}
break;
}
if (LocaleCompare("delay",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("delete",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("density",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("deskew",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("despeckle",option+1) == 0)
break;
if (LocaleCompare("dft",option+1) == 0)
break;
if (LocaleCompare("direction",option+1) == 0)
{
ssize_t
direction;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
argv[i]);
if (direction < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDirectionType",
argv[i]);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("dispose",option+1) == 0)
{
ssize_t
dispose;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse,
argv[i]);
if (dispose < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDisposeMethod",
argv[i]);
break;
}
if (LocaleCompare("distort",option+1) == 0)
{
ssize_t
op;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickDistortOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDistortMethod",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
ssize_t
method;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
method=ParseCommandOption(MagickDitherOptions,MagickFalse,argv[i]);
if (method < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDitherMethod",
argv[i]);
break;
}
if (LocaleCompare("draw",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("duplicate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("duration",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'e':
{
if (LocaleCompare("edge",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("emboss",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("encipher",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("encoding",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("endian",option+1) == 0)
{
ssize_t
endian;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
endian=ParseCommandOption(MagickEndianOptions,MagickFalse,argv[i]);
if (endian < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEndianType",
argv[i]);
break;
}
if (LocaleCompare("enhance",option+1) == 0)
break;
if (LocaleCompare("equalize",option+1) == 0)
break;
if (LocaleCompare("evaluate",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("evaluate-sequence",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator",
argv[i]);
break;
}
if (LocaleCompare("extent",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("extract",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("features",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("filter",option+1) == 0)
{
ssize_t
filter;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
filter=ParseCommandOption(MagickFilterOptions,MagickFalse,argv[i]);
if (filter < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageFilter",
argv[i]);
break;
}
if (LocaleCompare("flatten",option+1) == 0)
break;
if (LocaleCompare("flip",option+1) == 0)
break;
if (LocaleCompare("flop",option+1) == 0)
break;
if (LocaleCompare("floodfill",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
(void) CopyMagickString(argv[i]+1,"sans",MagickPathExtent);
(void) CloneString(&format,(char *) NULL);
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
(void) CloneString(&format,argv[i]);
(void) CopyMagickString(image_info->filename,format,
MagickPathExtent);
(void) ConcatenateMagickString(image_info->filename,":",
MagickPathExtent);
(void) SetImageInfo(image_info,0,exception);
if (*image_info->magick == '\0')
ThrowMogrifyException(OptionError,"UnrecognizedImageFormat",
format);
break;
}
if (LocaleCompare("frame",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("function",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickFunctionOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedFunction",argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("fuzz",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("fx",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'g':
{
if (LocaleCompare("gamma",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if ((LocaleCompare("gaussian-blur",option+1) == 0) ||
(LocaleCompare("gaussian",option+1) == 0))
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("geometry",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("gravity",option+1) == 0)
{
ssize_t
gravity;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,
argv[i]);
if (gravity < 0)
ThrowMogrifyException(OptionError,"UnrecognizedGravityType",
argv[i]);
break;
}
if (LocaleCompare("grayscale",option+1) == 0)
{
ssize_t
method;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
method=ParseCommandOption(MagickPixelIntensityOptions,MagickFalse,
argv[i]);
if (method < 0)
ThrowMogrifyException(OptionError,"UnrecognizedIntensityMethod",
argv[i]);
break;
}
if (LocaleCompare("green-primary",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'h':
{
if (LocaleCompare("hald-clut",option+1) == 0)
break;
if ((LocaleCompare("help",option+1) == 0) ||
(LocaleCompare("-help",option+1) == 0))
return(MogrifyUsage());
if (LocaleCompare("hough-lines",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'i':
{
if (LocaleCompare("identify",option+1) == 0)
break;
if (LocaleCompare("idft",option+1) == 0)
break;
if (LocaleCompare("implode",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("intensity",option+1) == 0)
{
ssize_t
intensity;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
intensity=ParseCommandOption(MagickPixelIntensityOptions,
MagickFalse,argv[i]);
if (intensity < 0)
ThrowMogrifyException(OptionError,
"UnrecognizedPixelIntensityMethod",argv[i]);
break;
}
if (LocaleCompare("intent",option+1) == 0)
{
ssize_t
intent;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
intent=ParseCommandOption(MagickIntentOptions,MagickFalse,argv[i]);
if (intent < 0)
ThrowMogrifyException(OptionError,"UnrecognizedIntentType",
argv[i]);
break;
}
if (LocaleCompare("interlace",option+1) == 0)
{
ssize_t
interlace;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
interlace=ParseCommandOption(MagickInterlaceOptions,MagickFalse,
argv[i]);
if (interlace < 0)
ThrowMogrifyException(OptionError,"UnrecognizedInterlaceType",
argv[i]);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
ssize_t
interpolate;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
interpolate=ParseCommandOption(MagickInterpolateOptions,MagickFalse,
argv[i]);
if (interpolate < 0)
ThrowMogrifyException(OptionError,"UnrecognizedInterpolateMethod",
argv[i]);
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("kuwahara",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'l':
{
if (LocaleCompare("label",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("lat",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
}
if (LocaleCompare("layers",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickLayerOptions,MagickFalse,argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,"UnrecognizedLayerMethod",
argv[i]);
break;
}
if (LocaleCompare("level",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("level-colors",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("limit",option+1) == 0)
{
char
*p;
double
value;
ssize_t
resource;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
resource=ParseCommandOption(MagickResourceOptions,MagickFalse,
argv[i]);
if (resource < 0)
ThrowMogrifyException(OptionError,"UnrecognizedResourceType",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
value=StringToDouble(argv[i],&p);
(void) value;
if ((p == argv[i]) && (LocaleCompare("unlimited",argv[i]) != 0))
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("liquid-rescale",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("list",option+1) == 0)
{
ssize_t
list;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i]);
if (list < 0)
ThrowMogrifyException(OptionError,"UnrecognizedListType",argv[i]);
status=MogrifyImageInfo(image_info,(int) (i-j+1),(const char **)
argv+j,exception);
return(status == 0 ? MagickFalse : MagickTrue);
}
if (LocaleCompare("log",option+1) == 0)
{
if (*option == '+')
break;
i++;
if ((i == (ssize_t) argc) ||
(strchr(argv[i],'%') == (char *) NULL))
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("loop",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'm':
{
if (LocaleCompare("magnify",option+1) == 0)
break;
if (LocaleCompare("map",option+1) == 0)
{
global_colormap=(*option == '+') ? MagickTrue : MagickFalse;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("mask",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("matte",option+1) == 0)
break;
if (LocaleCompare("mattecolor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("maximum",option+1) == 0)
break;
if (LocaleCompare("mean-shift",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("median",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickMetricOptions,MagickTrue,argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,"UnrecognizedMetricType",
argv[i]);
break;
}
if (LocaleCompare("minimum",option+1) == 0)
break;
if (LocaleCompare("modulate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("mode",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("monitor",option+1) == 0)
break;
if (LocaleCompare("monochrome",option+1) == 0)
break;
if (LocaleCompare("morph",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("morphology",option+1) == 0)
{
char
token[MagickPathExtent];
KernelInfo
*kernel_info;
ssize_t
op;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
GetNextToken(argv[i],(const char **) NULL,MagickPathExtent,token);
op=ParseCommandOption(MagickMorphologyOptions,MagickFalse,token);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedMorphologyMethod",
token);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
kernel_info=AcquireKernelInfo(argv[i],exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("mosaic",option+1) == 0)
break;
if (LocaleCompare("motion-blur",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'n':
{
if (LocaleCompare("negate",option+1) == 0)
break;
if (LocaleCompare("noise",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (*option == '+')
{
ssize_t
noise;
noise=ParseCommandOption(MagickNoiseOptions,MagickFalse,
argv[i]);
if (noise < 0)
ThrowMogrifyException(OptionError,"UnrecognizedNoiseType",
argv[i]);
break;
}
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("noop",option+1) == 0)
break;
if (LocaleCompare("normalize",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'o':
{
if (LocaleCompare("opaque",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("ordered-dither",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("orient",option+1) == 0)
{
ssize_t
orientation;
orientation=UndefinedOrientation;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
orientation=ParseCommandOption(MagickOrientationOptions,MagickFalse,
argv[i]);
if (orientation < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageOrientation",
argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'p':
{
if (LocaleCompare("page",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("paint",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("path",option+1) == 0)
{
(void) CloneString(&path,(char *) NULL);
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
(void) CloneString(&path,argv[i]);
break;
}
if (LocaleCompare("perceptible",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("polaroid",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("poly",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("posterize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("precision",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("print",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("process",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("profile",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'q':
{
if (LocaleCompare("quality",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("quantize",option+1) == 0)
{
ssize_t
colorspace;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,
argv[i]);
if (colorspace < 0)
ThrowMogrifyException(OptionError,"UnrecognizedColorspace",
argv[i]);
break;
}
if (LocaleCompare("quiet",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'r':
{
if (LocaleCompare("rotational-blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("raise",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("random-threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("range-threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("read-mask",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("red-primary",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
}
if (LocaleCompare("regard-warnings",option+1) == 0)
break;
if (LocaleCompare("region",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("remap",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("render",option+1) == 0)
break;
if (LocaleCompare("repage",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("resample",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("resize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleNCompare("respect-parentheses",option+1,17) == 0)
{
respect_parenthesis=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("reverse",option+1) == 0)
break;
if (LocaleCompare("roll",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("rotate",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 's':
{
if (LocaleCompare("sample",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sampling-factor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("scale",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("scene",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("seed",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("segment",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("selective-blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("separate",option+1) == 0)
break;
if (LocaleCompare("sepia-tone",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("set",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("shade",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("shadow",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sharpen",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("shave",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("shear",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sigmoidal-contrast",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("size",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sketch",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("smush",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
i++;
break;
}
if (LocaleCompare("solarize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sparse-color",option+1) == 0)
{
ssize_t
op;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickSparseColorOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedSparseColorMethod",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("splice",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("spread",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("statistic",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickStatisticOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedStatisticType",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("stretch",option+1) == 0)
{
ssize_t
stretch;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,
argv[i]);
if (stretch < 0)
ThrowMogrifyException(OptionError,"UnrecognizedStyleType",
argv[i]);
break;
}
if (LocaleCompare("strip",option+1) == 0)
break;
if (LocaleCompare("stroke",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
ssize_t
style;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,argv[i]);
if (style < 0)
ThrowMogrifyException(OptionError,"UnrecognizedStyleType",
argv[i]);
break;
}
if (LocaleCompare("swap",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("swirl",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("synchronize",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 't':
{
if (LocaleCompare("taint",option+1) == 0)
break;
if (LocaleCompare("texture",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("tile",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("tile-offset",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("tint",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("transform",option+1) == 0)
break;
if (LocaleCompare("transpose",option+1) == 0)
break;
if (LocaleCompare("transverse",option+1) == 0)
break;
if (LocaleCompare("threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("thumbnail",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("transparent",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("transparent-color",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("treedepth",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("trim",option+1) == 0)
break;
if (LocaleCompare("type",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickTypeOptions,MagickFalse,argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageType",
argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("unique-colors",option+1) == 0)
break;
if (LocaleCompare("units",option+1) == 0)
{
ssize_t
units;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
units=ParseCommandOption(MagickResolutionOptions,MagickFalse,
argv[i]);
if (units < 0)
ThrowMogrifyException(OptionError,"UnrecognizedUnitsType",
argv[i]);
break;
}
if (LocaleCompare("unsharp",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
image_info->verbose=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if ((LocaleCompare("version",option+1) == 0) ||
(LocaleCompare("-version",option+1) == 0))
{
ListMagickVersion(stdout);
break;
}
if (LocaleCompare("vignette",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
ssize_t
method;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
method=ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,
argv[i]);
if (method < 0)
ThrowMogrifyException(OptionError,
"UnrecognizedVirtualPixelMethod",argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'w':
{
if (LocaleCompare("wave",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("wavelet-denoise",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("weight",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("white-point",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("white-threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("write",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("write-mask",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case '?':
break;
default:
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
fire=(GetCommandOptionFlags(MagickCommandOptions,MagickFalse,option) &
FireOptionFlag) == 0 ? MagickFalse : MagickTrue;
if (fire != MagickFalse)
FireImageStack(MagickFalse,MagickTrue,MagickTrue);
}
if (k != 0)
ThrowMogrifyException(OptionError,"UnbalancedParenthesis",argv[i]);
if (i != (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingAnImageFilename",argv[i]);
DestroyMogrify();
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImageInfo() applies image processing settings to the image as
% prescribed by command line options.
%
% The format of the MogrifyImageInfo method is:
%
% MagickBooleanType MogrifyImageInfo(ImageInfo *image_info,const int argc,
% const char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MogrifyImageInfo(ImageInfo *image_info,
const int argc,const char **argv,ExceptionInfo *exception)
{
const char
*option;
GeometryInfo
geometry_info;
ssize_t
count;
register ssize_t
i;
/*
Initialize method variables.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (argc < 0)
return(MagickTrue);
/*
Set the image settings.
*/
for (i=0; i < (ssize_t) argc; i++)
{
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=ParseCommandOption(MagickCommandOptions,MagickFalse,option);
count=MagickMax(count,0L);
if ((i+count) >= (ssize_t) argc)
break;
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adjoin",option+1) == 0)
{
image_info->adjoin=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("antialias",option+1) == 0)
{
image_info->antialias=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("authenticate",option+1) == 0)
{
if (*option == '+')
(void) DeleteImageOption(image_info,option+1);
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'b':
{
if (LocaleCompare("background",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) QueryColorCompliance(MogrifyBackgroundColor,
AllCompliance,&image_info->background_color,exception);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorCompliance(argv[i+1],AllCompliance,
&image_info->background_color,exception);
break;
}
if (LocaleCompare("bias",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,"convolve:bias","0.0");
break;
}
(void) SetImageOption(image_info,"convolve:bias",argv[i+1]);
break;
}
if (LocaleCompare("black-point-compensation",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("blue-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) QueryColorCompliance(MogrifyBorderColor,AllCompliance,
&image_info->border_color,exception);
break;
}
(void) QueryColorCompliance(argv[i+1],AllCompliance,
&image_info->border_color,exception);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,"undercolor","none");
break;
}
(void) SetImageOption(image_info,"undercolor",argv[i+1]);
break;
}
break;
}
case 'c':
{
if (LocaleCompare("cache",option+1) == 0)
{
MagickSizeType
limit;
limit=MagickResourceInfinity;
if (LocaleCompare("unlimited",argv[i+1]) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+1],
100.0);
(void) SetMagickResourceLimit(MemoryResource,limit);
(void) SetMagickResourceLimit(MapResource,2*limit);
break;
}
if (LocaleCompare("caption",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
if (*option == '+')
{
image_info->colorspace=UndefinedColorspace;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("comment",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("compose",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("compress",option+1) == 0)
{
if (*option == '+')
{
image_info->compression=UndefinedCompression;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("debug",option+1) == 0)
{
if (*option == '+')
(void) SetLogEventMask("none");
else
(void) SetLogEventMask(argv[i+1]);
image_info->debug=IsEventLogging();
break;
}
if (LocaleCompare("define",option+1) == 0)
{
if (*option == '+')
{
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
(void) DeleteImageRegistry(argv[i+1]+9);
else
(void) DeleteImageOption(image_info,argv[i+1]);
break;
}
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
{
(void) DefineImageRegistry(StringRegistryType,argv[i+1]+9,
exception);
break;
}
(void) DefineImageOption(image_info,argv[i+1]);
break;
}
if (LocaleCompare("delay",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("density",option+1) == 0)
{
/*
Set image density.
*/
if (*option == '+')
{
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
(void) SetImageOption(image_info,option+1,"72");
break;
}
(void) CloneString(&image_info->density,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
if (*option == '+')
{
image_info->depth=MAGICKCORE_QUANTUM_DEPTH;
break;
}
image_info->depth=StringToUnsignedLong(argv[i+1]);
break;
}
if (LocaleCompare("direction",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
if (*option == '+')
{
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
break;
}
(void) CloneString(&image_info->server_name,argv[i+1]);
break;
}
if (LocaleCompare("dispose",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
image_info->dither=MagickFalse;
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
image_info->dither=MagickTrue;
break;
}
break;
}
case 'e':
{
if (LocaleCompare("encoding",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("endian",option+1) == 0)
{
if (*option == '+')
{
image_info->endian=UndefinedEndian;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->endian=(EndianType) ParseCommandOption(
MagickEndianOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("extract",option+1) == 0)
{
/*
Set image extract geometry.
*/
if (*option == '+')
{
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
break;
}
(void) CloneString(&image_info->extract,argv[i+1]);
break;
}
break;
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
if (*option != '+')
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("filter",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
if (*option == '+')
{
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
break;
}
(void) CloneString(&image_info->font,argv[i+1]);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
register const char
*q;
for (q=strchr(argv[i+1],'%'); q != (char *) NULL; q=strchr(q+1,'%'))
if (strchr("Agkrz@[#",*(q+1)) != (char *) NULL)
image_info->ping=MagickFalse;
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("fuzz",option+1) == 0)
{
if (*option == '+')
{
image_info->fuzz=0.0;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->fuzz=StringToDoubleInterval(argv[i+1],(double)
QuantumRange+1.0);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'g':
{
if (LocaleCompare("gravity",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("green-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'i':
{
if (LocaleCompare("intensity",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("intent",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interlace",option+1) == 0)
{
if (*option == '+')
{
image_info->interlace=UndefinedInterlace;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->interlace=(InterlaceType) ParseCommandOption(
MagickInterlaceOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("label",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("limit",option+1) == 0)
{
MagickSizeType
limit;
ResourceType
type;
if (*option == '+')
break;
type=(ResourceType) ParseCommandOption(MagickResourceOptions,
MagickFalse,argv[i+1]);
limit=MagickResourceInfinity;
if (LocaleCompare("unlimited",argv[i+2]) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+2],100.0);
(void) SetMagickResourceLimit(type,limit);
break;
}
if (LocaleCompare("list",option+1) == 0)
{
ssize_t
list;
/*
Display configuration list.
*/
list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i+1]);
switch (list)
{
case MagickCoderOptions:
{
(void) ListCoderInfo((FILE *) NULL,exception);
break;
}
case MagickColorOptions:
{
(void) ListColorInfo((FILE *) NULL,exception);
break;
}
case MagickConfigureOptions:
{
(void) ListConfigureInfo((FILE *) NULL,exception);
break;
}
case MagickDelegateOptions:
{
(void) ListDelegateInfo((FILE *) NULL,exception);
break;
}
case MagickFontOptions:
{
(void) ListTypeInfo((FILE *) NULL,exception);
break;
}
case MagickFormatOptions:
{
(void) ListMagickInfo((FILE *) NULL,exception);
break;
}
case MagickLocaleOptions:
{
(void) ListLocaleInfo((FILE *) NULL,exception);
break;
}
case MagickLogOptions:
{
(void) ListLogInfo((FILE *) NULL,exception);
break;
}
case MagickMagicOptions:
{
(void) ListMagicInfo((FILE *) NULL,exception);
break;
}
case MagickMimeOptions:
{
(void) ListMimeInfo((FILE *) NULL,exception);
break;
}
case MagickModuleOptions:
{
(void) ListModuleInfo((FILE *) NULL,exception);
break;
}
case MagickPolicyOptions:
{
(void) ListPolicyInfo((FILE *) NULL,exception);
break;
}
case MagickResourceOptions:
{
(void) ListMagickResourceInfo((FILE *) NULL,exception);
break;
}
case MagickThresholdOptions:
{
(void) ListThresholdMaps((FILE *) NULL,exception);
break;
}
default:
{
(void) ListCommandOptions((FILE *) NULL,(CommandOption) list,
exception);
break;
}
}
break;
}
if (LocaleCompare("log",option+1) == 0)
{
if (*option == '+')
break;
(void) SetLogFormat(argv[i+1]);
break;
}
if (LocaleCompare("loop",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'm':
{
if (LocaleCompare("matte",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("mattecolor",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorCompliance(MogrifyAlphaColor,AllCompliance,
&image_info->matte_color,exception);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorCompliance(argv[i+1],AllCompliance,
&image_info->matte_color,exception);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
if (*option == '+')
(void) DeleteImageOption(image_info,option+1);
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("monitor",option+1) == 0)
{
(void) SetImageInfoProgressMonitor(image_info,MonitorProgress,
(void *) NULL);
break;
}
if (LocaleCompare("monochrome",option+1) == 0)
{
image_info->monochrome=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
break;
}
case 'o':
{
if (LocaleCompare("orient",option+1) == 0)
{
if (*option == '+')
{
image_info->orientation=UndefinedOrientation;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
}
case 'p':
{
if (LocaleCompare("page",option+1) == 0)
{
char
*canonical_page,
page[MagickPathExtent];
const char
*image_option;
MagickStatusType
flags;
RectangleInfo
geometry;
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) CloneString(&image_info->page,(char *) NULL);
break;
}
(void) memset(&geometry,0,sizeof(geometry));
image_option=GetImageOption(image_info,"page");
if (image_option != (const char *) NULL)
flags=ParseAbsoluteGeometry(image_option,&geometry);
canonical_page=GetPageGeometry(argv[i+1]);
flags=ParseAbsoluteGeometry(canonical_page,&geometry);
canonical_page=DestroyString(canonical_page);
(void) FormatLocaleString(page,MagickPathExtent,"%lux%lu",
(unsigned long) geometry.width,(unsigned long) geometry.height);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
(void) FormatLocaleString(page,MagickPathExtent,"%lux%lu%+ld%+ld",
(unsigned long) geometry.width,(unsigned long) geometry.height,
(long) geometry.x,(long) geometry.y);
(void) SetImageOption(image_info,option+1,page);
(void) CloneString(&image_info->page,page);
break;
}
if (LocaleCompare("ping",option+1) == 0)
{
image_info->ping=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (*option == '+')
geometry_info.rho=0.0;
else
(void) ParseGeometry(argv[i+1],&geometry_info);
image_info->pointsize=geometry_info.rho;
break;
}
if (LocaleCompare("precision",option+1) == 0)
{
(void) SetMagickPrecision(StringToInteger(argv[i+1]));
break;
}
break;
}
case 'q':
{
if (LocaleCompare("quality",option+1) == 0)
{
/*
Set image compression quality.
*/
if (*option == '+')
{
image_info->quality=UndefinedCompressionQuality;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->quality=StringToUnsignedLong(argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("quiet",option+1) == 0)
{
static WarningHandler
warning_handler = (WarningHandler) NULL;
if (*option == '+')
{
/*
Restore error or warning messages.
*/
warning_handler=SetWarningHandler(warning_handler);
break;
}
/*
Suppress error or warning messages.
*/
warning_handler=SetWarningHandler((WarningHandler) NULL);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("red-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 's':
{
if (LocaleCompare("sampling-factor",option+1) == 0)
{
/*
Set image sampling factor.
*/
if (*option == '+')
{
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
break;
}
(void) CloneString(&image_info->sampling_factor,argv[i+1]);
break;
}
if (LocaleCompare("scene",option+1) == 0)
{
/*
Set image scene.
*/
if (*option == '+')
{
image_info->scene=0;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->scene=StringToUnsignedLong(argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("seed",option+1) == 0)
{
unsigned long
seed;
if (*option == '+')
{
seed=(unsigned long) time((time_t *) NULL);
SetRandomSecretKey(seed);
break;
}
seed=StringToUnsignedLong(argv[i+1]);
SetRandomSecretKey(seed);
break;
}
if (LocaleCompare("size",option+1) == 0)
{
if (*option == '+')
{
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
break;
}
(void) CloneString(&image_info->size,argv[i+1]);
break;
}
if (LocaleCompare("stroke",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("synchronize",option+1) == 0)
{
if (*option == '+')
{
image_info->synchronize=MagickFalse;
break;
}
image_info->synchronize=MagickTrue;
break;
}
break;
}
case 't':
{
if (LocaleCompare("taint",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("texture",option+1) == 0)
{
if (*option == '+')
{
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
break;
}
(void) CloneString(&image_info->texture,argv[i+1]);
break;
}
if (LocaleCompare("tile-offset",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("transparent-color",option+1) == 0)
{
if (*option == '+')
{
(void) QueryColorCompliance("none",AllCompliance,
&image_info->transparent_color,exception);
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) QueryColorCompliance(argv[i+1],AllCompliance,
&image_info->transparent_color,exception);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("type",option+1) == 0)
{
if (*option == '+')
{
image_info->type=UndefinedType;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->type=(ImageType) ParseCommandOption(MagickTypeOptions,
MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
if (*option == '+')
(void) DeleteImageOption(image_info,option+1);
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("units",option+1) == 0)
{
if (*option == '+')
{
image_info->units=UndefinedResolution;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->units=(ResolutionType) ParseCommandOption(
MagickResolutionOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
if (*option == '+')
{
image_info->verbose=MagickFalse;
break;
}
image_info->verbose=MagickTrue;
image_info->ping=MagickFalse;
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"undefined");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("weight",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("white-point",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0.0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
default:
break;
}
i+=count;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImageList() applies any command line options that might affect the
% entire image list (e.g. -append, -coalesce, etc.).
%
% The format of the MogrifyImage method is:
%
% MagickBooleanType MogrifyImageList(ImageInfo *image_info,const int argc,
% const char **argv,Image **images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o images: pointer to pointer of the first image in image list.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MogrifyImageList(ImageInfo *image_info,
const int argc,const char **argv,Image **images,ExceptionInfo *exception)
{
const char
*option;
ImageInfo
*mogrify_info;
MagickStatusType
status;
PixelInterpolateMethod
interpolate_method;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
count,
index;
/*
Apply options to the image list.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image **) NULL);
assert((*images)->previous == (Image *) NULL);
assert((*images)->signature == MagickCoreSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
if ((argc <= 0) || (*argv == (char *) NULL))
return(MagickTrue);
interpolate_method=UndefinedInterpolatePixel;
mogrify_info=CloneImageInfo(image_info);
quantize_info=AcquireQuantizeInfo(mogrify_info);
status=MagickTrue;
for (i=0; i < (ssize_t) argc; i++)
{
if (*images == (Image *) NULL)
break;
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=ParseCommandOption(MagickCommandOptions,MagickFalse,option);
count=MagickMax(count,0L);
if ((i+count) >= (ssize_t) argc)
break;
status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception);
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("affinity",option+1) == 0)
{
(void) SyncImagesSettings(mogrify_info,*images,exception);
if (*option == '+')
{
(void) RemapImages(quantize_info,*images,(Image *) NULL,
exception);
break;
}
i++;
break;
}
if (LocaleCompare("append",option+1) == 0)
{
Image
*append_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
append_image=AppendImages(*images,*option == '-' ? MagickTrue :
MagickFalse,exception);
if (append_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=append_image;
break;
}
if (LocaleCompare("average",option+1) == 0)
{
Image
*average_image;
/*
Average an image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images,exception);
average_image=EvaluateImages(*images,MeanEvaluateOperator,
exception);
if (average_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=average_image;
break;
}
break;
}
case 'c':
{
if (LocaleCompare("channel-fx",option+1) == 0)
{
Image
*channel_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
channel_image=ChannelFxImage(*images,argv[i+1],exception);
if (channel_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=channel_image;
break;
}
if (LocaleCompare("clut",option+1) == 0)
{
Image
*clut_image,
*image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
image=RemoveFirstImageFromList(images);
clut_image=RemoveFirstImageFromList(images);
if (clut_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
(void) ClutImage(image,clut_image,interpolate_method,exception);
clut_image=DestroyImage(clut_image);
*images=DestroyImageList(*images);
*images=image;
break;
}
if (LocaleCompare("coalesce",option+1) == 0)
{
Image
*coalesce_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
coalesce_image=CoalesceImages(*images,exception);
if (coalesce_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=coalesce_image;
break;
}
if (LocaleCompare("combine",option+1) == 0)
{
ColorspaceType
colorspace;
Image
*combine_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
colorspace=(*images)->colorspace;
if ((*images)->number_channels < GetImageListLength(*images))
colorspace=sRGBColorspace;
if (*option == '+')
colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
combine_image=CombineImages(*images,colorspace,exception);
if (combine_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=combine_image;
break;
}
if (LocaleCompare("compare",option+1) == 0)
{
double
distortion;
Image
*difference_image,
*image,
*reconstruct_image;
MetricType
metric;
/*
Mathematically and visually annotate the difference between an
image and its reconstruction.
*/
(void) SyncImagesSettings(mogrify_info,*images,exception);
image=RemoveFirstImageFromList(images);
reconstruct_image=RemoveFirstImageFromList(images);
if (reconstruct_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
metric=UndefinedErrorMetric;
option=GetImageOption(mogrify_info,"metric");
if (option != (const char *) NULL)
metric=(MetricType) ParseCommandOption(MagickMetricOptions,
MagickFalse,option);
difference_image=CompareImages(image,reconstruct_image,metric,
&distortion,exception);
if (difference_image == (Image *) NULL)
break;
reconstruct_image=DestroyImage(reconstruct_image);
image=DestroyImage(image);
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=difference_image;
break;
}
if (LocaleCompare("complex",option+1) == 0)
{
ComplexOperator
op;
Image
*complex_images;
(void) SyncImageSettings(mogrify_info,*images,exception);
op=(ComplexOperator) ParseCommandOption(MagickComplexOptions,
MagickFalse,argv[i+1]);
complex_images=ComplexImages(*images,op,exception);
if (complex_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=complex_images;
break;
}
if (LocaleCompare("composite",option+1) == 0)
{
CompositeOperator
compose;
const char*
value;
MagickBooleanType
clip_to_self;
Image
*mask_image,
*new_images,
*source_image;
RectangleInfo
geometry;
/* Compose value from "-compose" option only */
(void) SyncImageSettings(mogrify_info,*images,exception);
value=GetImageOption(mogrify_info,"compose");
if (value == (const char *) NULL)
compose=OverCompositeOp; /* use Over not source_image->compose */
else
compose=(CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,value);
/* Get "clip-to-self" expert setting (false is normal) */
clip_to_self=GetCompositeClipToSelf(compose);
value=GetImageOption(mogrify_info,"compose:clip-to-self");
if (value != (const char *) NULL)
clip_to_self=IsStringTrue(value);
value=GetImageOption(mogrify_info,"compose:outside-overlay");
if (value != (const char *) NULL)
clip_to_self=IsStringFalse(value); /* deprecated */
new_images=RemoveFirstImageFromList(images);
source_image=RemoveFirstImageFromList(images);
if (source_image == (Image *) NULL)
break; /* FUTURE - produce Exception, rather than silent fail */
/* FUTURE: this should not be here! - should be part of -geometry */
if (source_image->geometry != (char *) NULL)
{
RectangleInfo
resize_geometry;
(void) ParseRegionGeometry(source_image,source_image->geometry,
&resize_geometry,exception);
if ((source_image->columns != resize_geometry.width) ||
(source_image->rows != resize_geometry.height))
{
Image
*resize_image;
resize_image=ResizeImage(source_image,resize_geometry.width,
resize_geometry.height,source_image->filter,exception);
if (resize_image != (Image *) NULL)
{
source_image=DestroyImage(source_image);
source_image=resize_image;
}
}
}
SetGeometry(source_image,&geometry);
(void) ParseAbsoluteGeometry(source_image->geometry,&geometry);
GravityAdjustGeometry(new_images->columns,new_images->rows,
new_images->gravity,&geometry);
mask_image=RemoveFirstImageFromList(images);
if (mask_image == (Image *) NULL)
status&=CompositeImage(new_images,source_image,compose,
clip_to_self,geometry.x,geometry.y,exception);
else
{
if ((compose == DisplaceCompositeOp) ||
(compose == DistortCompositeOp))
{
status&=CompositeImage(source_image,mask_image,
CopyGreenCompositeOp,MagickTrue,0,0,exception);
status&=CompositeImage(new_images,source_image,compose,
clip_to_self,geometry.x,geometry.y,exception);
}
else
{
Image
*clone_image;
clone_image=CloneImage(new_images,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
break;
status&=CompositeImage(new_images,source_image,compose,
clip_to_self,geometry.x,geometry.y,exception);
status&=CompositeImage(new_images,mask_image,
CopyAlphaCompositeOp,MagickTrue,0,0,exception);
status&=CompositeImage(clone_image,new_images,
OverCompositeOp,clip_to_self,0,0,exception);
new_images=DestroyImageList(new_images);
new_images=clone_image;
}
mask_image=DestroyImage(mask_image);
}
source_image=DestroyImage(source_image);
*images=DestroyImageList(*images);
*images=new_images;
break;
}
if (LocaleCompare("copy",option+1) == 0)
{
Image
*source_image;
OffsetInfo
offset;
RectangleInfo
geometry;
/*
Copy image pixels.
*/
(void) SyncImageSettings(mogrify_info,*images,exception);
(void) ParsePageGeometry(*images,argv[i+2],&geometry,exception);
offset.x=geometry.x;
offset.y=geometry.y;
source_image=(*images);
if (source_image->next != (Image *) NULL)
source_image=source_image->next;
(void) ParsePageGeometry(source_image,argv[i+1],&geometry,
exception);
status=CopyImagePixels(*images,source_image,&geometry,&offset,
exception);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("deconstruct",option+1) == 0)
{
Image
*deconstruct_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
deconstruct_image=CompareImagesLayers(*images,CompareAnyLayer,
exception);
if (deconstruct_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=deconstruct_image;
break;
}
if (LocaleCompare("delete",option+1) == 0)
{
if (*option == '+')
DeleteImages(images,"-1",exception);
else
DeleteImages(images,argv[i+1],exception);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
quantize_info->dither_method=NoDitherMethod;
break;
}
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("duplicate",option+1) == 0)
{
Image
*duplicate_images;
if (*option == '+')
duplicate_images=DuplicateImages(*images,1,"-1",exception);
else
{
const char
*p;
size_t
number_duplicates;
number_duplicates=(size_t) StringToLong(argv[i+1]);
p=strchr(argv[i+1],',');
if (p == (const char *) NULL)
duplicate_images=DuplicateImages(*images,number_duplicates,
"-1",exception);
else
duplicate_images=DuplicateImages(*images,number_duplicates,p,
exception);
}
AppendImageToList(images, duplicate_images);
(void) SyncImagesSettings(mogrify_info,*images,exception);
break;
}
break;
}
case 'e':
{
if (LocaleCompare("evaluate-sequence",option+1) == 0)
{
Image
*evaluate_image;
MagickEvaluateOperator
op;
(void) SyncImageSettings(mogrify_info,*images,exception);
op=(MagickEvaluateOperator) ParseCommandOption(
MagickEvaluateOptions,MagickFalse,argv[i+1]);
evaluate_image=EvaluateImages(*images,op,exception);
if (evaluate_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=evaluate_image;
break;
}
break;
}
case 'f':
{
if (LocaleCompare("fft",option+1) == 0)
{
Image
*fourier_image;
/*
Implements the discrete Fourier transform (DFT).
*/
(void) SyncImageSettings(mogrify_info,*images,exception);
fourier_image=ForwardFourierTransformImage(*images,*option == '-' ?
MagickTrue : MagickFalse,exception);
if (fourier_image == (Image *) NULL)
break;
*images=DestroyImageList(*images);
*images=fourier_image;
break;
}
if (LocaleCompare("flatten",option+1) == 0)
{
Image
*flatten_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
flatten_image=MergeImageLayers(*images,FlattenLayer,exception);
if (flatten_image == (Image *) NULL)
break;
*images=DestroyImageList(*images);
*images=flatten_image;
break;
}
if (LocaleCompare("fx",option+1) == 0)
{
Image
*fx_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
fx_image=FxImage(*images,argv[i+1],exception);
if (fx_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=fx_image;
break;
}
break;
}
case 'h':
{
if (LocaleCompare("hald-clut",option+1) == 0)
{
Image
*hald_image,
*image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
image=RemoveFirstImageFromList(images);
hald_image=RemoveFirstImageFromList(images);
if (hald_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
(void) HaldClutImage(image,hald_image,exception);
hald_image=DestroyImage(hald_image);
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=image;
break;
}
break;
}
case 'i':
{
if (LocaleCompare("ift",option+1) == 0)
{
Image
*fourier_image,
*magnitude_image,
*phase_image;
/*
Implements the inverse fourier discrete Fourier transform (DFT).
*/
(void) SyncImagesSettings(mogrify_info,*images,exception);
magnitude_image=RemoveFirstImageFromList(images);
phase_image=RemoveFirstImageFromList(images);
if (phase_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
fourier_image=InverseFourierTransformImage(magnitude_image,
phase_image,*option == '-' ? MagickTrue : MagickFalse,exception);
if (fourier_image == (Image *) NULL)
break;
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=fourier_image;
break;
}
if (LocaleCompare("insert",option+1) == 0)
{
Image
*p,
*q;
index=0;
if (*option != '+')
index=(ssize_t) StringToLong(argv[i+1]);
p=RemoveLastImageFromList(images);
if (p == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",argv[i+1]);
status=MagickFalse;
break;
}
q=p;
if (index == 0)
PrependImageToList(images,q);
else
if (index == (ssize_t) GetImageListLength(*images))
AppendImageToList(images,q);
else
{
q=GetImageFromList(*images,index-1);
if (q == (Image *) NULL)
{
p=DestroyImage(p);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",argv[i+1]);
status=MagickFalse;
break;
}
InsertImageInList(&q,p);
}
*images=GetFirstImageInList(q);
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
interpolate_method=(PixelInterpolateMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,argv[i+1]);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("layers",option+1) == 0)
{
Image
*layers;
LayerMethod
method;
(void) SyncImagesSettings(mogrify_info,*images,exception);
layers=(Image *) NULL;
method=(LayerMethod) ParseCommandOption(MagickLayerOptions,
MagickFalse,argv[i+1]);
switch (method)
{
case CoalesceLayer:
{
layers=CoalesceImages(*images,exception);
break;
}
case CompareAnyLayer:
case CompareClearLayer:
case CompareOverlayLayer:
default:
{
layers=CompareImagesLayers(*images,method,exception);
break;
}
case MergeLayer:
case FlattenLayer:
case MosaicLayer:
case TrimBoundsLayer:
{
layers=MergeImageLayers(*images,method,exception);
break;
}
case DisposeLayer:
{
layers=DisposeImages(*images,exception);
break;
}
case OptimizeImageLayer:
{
layers=OptimizeImageLayers(*images,exception);
break;
}
case OptimizePlusLayer:
{
layers=OptimizePlusImageLayers(*images,exception);
break;
}
case OptimizeTransLayer:
{
OptimizeImageTransparency(*images,exception);
break;
}
case RemoveDupsLayer:
{
RemoveDuplicateLayers(images,exception);
break;
}
case RemoveZeroLayer:
{
RemoveZeroDelayLayers(images,exception);
break;
}
case OptimizeLayer:
{
/*
General Purpose, GIF Animation Optimizer.
*/
layers=CoalesceImages(*images,exception);
if (layers == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=layers;
layers=OptimizeImageLayers(*images,exception);
if (layers == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=layers;
layers=(Image *) NULL;
OptimizeImageTransparency(*images,exception);
(void) RemapImages(quantize_info,*images,(Image *) NULL,
exception);
break;
}
case CompositeLayer:
{
CompositeOperator
compose;
Image
*source;
RectangleInfo
geometry;
/*
Split image sequence at the first 'NULL:' image.
*/
source=(*images);
while (source != (Image *) NULL)
{
source=GetNextImageInList(source);
if ((source != (Image *) NULL) &&
(LocaleCompare(source->magick,"NULL") == 0))
break;
}
if (source != (Image *) NULL)
{
if ((GetPreviousImageInList(source) == (Image *) NULL) ||
(GetNextImageInList(source) == (Image *) NULL))
source=(Image *) NULL;
else
{
/*
Separate the two lists, junk the null: image.
*/
source=SplitImageList(source->previous);
DeleteImageFromList(&source);
}
}
if (source == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"MissingNullSeparator","layers Composite");
status=MagickFalse;
break;
}
/*
Adjust offset with gravity and virtual canvas.
*/
SetGeometry(*images,&geometry);
(void) ParseAbsoluteGeometry((*images)->geometry,&geometry);
geometry.width=source->page.width != 0 ?
source->page.width : source->columns;
geometry.height=source->page.height != 0 ?
source->page.height : source->rows;
GravityAdjustGeometry((*images)->page.width != 0 ?
(*images)->page.width : (*images)->columns,
(*images)->page.height != 0 ? (*images)->page.height :
(*images)->rows,(*images)->gravity,&geometry);
compose=OverCompositeOp;
option=GetImageOption(mogrify_info,"compose");
if (option != (const char *) NULL)
compose=(CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,option);
CompositeLayers(*images,compose,source,geometry.x,geometry.y,
exception);
source=DestroyImageList(source);
break;
}
}
if (layers == (Image *) NULL)
break;
*images=DestroyImageList(*images);
*images=layers;
break;
}
break;
}
case 'm':
{
if (LocaleCompare("map",option+1) == 0)
{
(void) SyncImagesSettings(mogrify_info,*images,exception);
if (*option == '+')
{
(void) RemapImages(quantize_info,*images,(Image *) NULL,
exception);
break;
}
i++;
break;
}
if (LocaleCompare("maximum",option+1) == 0)
{
Image
*maximum_image;
/*
Maximum image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images,exception);
maximum_image=EvaluateImages(*images,MaxEvaluateOperator,exception);
if (maximum_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=maximum_image;
break;
}
if (LocaleCompare("minimum",option+1) == 0)
{
Image
*minimum_image;
/*
Minimum image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images,exception);
minimum_image=EvaluateImages(*images,MinEvaluateOperator,exception);
if (minimum_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=minimum_image;
break;
}
if (LocaleCompare("morph",option+1) == 0)
{
Image
*morph_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
morph_image=MorphImages(*images,StringToUnsignedLong(argv[i+1]),
exception);
if (morph_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=morph_image;
break;
}
if (LocaleCompare("mosaic",option+1) == 0)
{
Image
*mosaic_image;
(void) SyncImagesSettings(mogrify_info,*images,exception);
mosaic_image=MergeImageLayers(*images,MosaicLayer,exception);
if (mosaic_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=mosaic_image;
break;
}
break;
}
case 'p':
{
if (LocaleCompare("poly",option+1) == 0)
{
char
*args,
token[MagickPathExtent];
const char
*p;
double
*arguments;
Image
*polynomial_image;
register ssize_t
x;
size_t
number_arguments;
/*
Polynomial image.
*/
(void) SyncImageSettings(mogrify_info,*images,exception);
args=InterpretImageProperties(mogrify_info,*images,argv[i+1],
exception);
if (args == (char *) NULL)
break;
p=(char *) args;
for (x=0; *p != '\0'; x++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
}
number_arguments=(size_t) x;
arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*arguments));
if (arguments == (double *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,
"MemoryAllocationFailed",(*images)->filename);
(void) memset(arguments,0,number_arguments*
sizeof(*arguments));
p=(char *) args;
for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++)
{
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
GetNextToken(p,&p,MagickPathExtent,token);
arguments[x]=StringToDouble(token,(char **) NULL);
}
args=DestroyString(args);
polynomial_image=PolynomialImage(*images,number_arguments >> 1,
arguments,exception);
arguments=(double *) RelinquishMagickMemory(arguments);
if (polynomial_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=polynomial_image;
}
if (LocaleCompare("print",option+1) == 0)
{
char
*string;
(void) SyncImagesSettings(mogrify_info,*images,exception);
string=InterpretImageProperties(mogrify_info,*images,argv[i+1],
exception);
if (string == (char *) NULL)
break;
(void) FormatLocaleFile(stdout,"%s",string);
string=DestroyString(string);
}
if (LocaleCompare("process",option+1) == 0)
{
char
**arguments;
int
j,
number_arguments;
(void) SyncImagesSettings(mogrify_info,*images,exception);
arguments=StringToArgv(argv[i+1],&number_arguments);
if (arguments == (char **) NULL)
break;
if ((argc > 1) && (strchr(arguments[1],'=') != (char *) NULL))
{
char
breaker,
quote,
*token;
const char
*argument;
int
next,
token_status;
size_t
length;
TokenInfo
*token_info;
/*
Support old style syntax, filter="-option arg".
*/
length=strlen(argv[i+1]);
token=(char *) NULL;
if (~length >= (MagickPathExtent-1))
token=(char *) AcquireQuantumMemory(length+MagickPathExtent,
sizeof(*token));
if (token == (char *) NULL)
break;
next=0;
argument=argv[i+1];
token_info=AcquireTokenInfo();
token_status=Tokenizer(token_info,0,token,length,argument,"",
"=","\"",'\0',&breaker,&next,"e);
token_info=DestroyTokenInfo(token_info);
if (token_status == 0)
{
const char
*arg;
arg=(&(argument[next]));
(void) InvokeDynamicImageFilter(token,&(*images),1,&arg,
exception);
}
token=DestroyString(token);
break;
}
(void) SubstituteString(&arguments[1],"-","");
(void) InvokeDynamicImageFilter(arguments[1],&(*images),
number_arguments-2,(const char **) arguments+2,exception);
for (j=0; j < number_arguments; j++)
arguments[j]=DestroyString(arguments[j]);
arguments=(char **) RelinquishMagickMemory(arguments);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("reverse",option+1) == 0)
{
ReverseImageList(images);
break;
}
break;
}
case 's':
{
if (LocaleCompare("smush",option+1) == 0)
{
Image
*smush_image;
ssize_t
offset;
(void) SyncImagesSettings(mogrify_info,*images,exception);
offset=(ssize_t) StringToLong(argv[i+1]);
smush_image=SmushImages(*images,*option == '-' ? MagickTrue :
MagickFalse,offset,exception);
if (smush_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=smush_image;
break;
}
if (LocaleCompare("swap",option+1) == 0)
{
Image
*p,
*q,
*u,
*v;
ssize_t
swap_index;
index=(-1);
swap_index=(-2);
if (*option != '+')
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
swap_index=(-1);
flags=ParseGeometry(argv[i+1],&geometry_info);
index=(ssize_t) geometry_info.rho;
if ((flags & SigmaValue) != 0)
swap_index=(ssize_t) geometry_info.sigma;
}
p=GetImageFromList(*images,index);
q=GetImageFromList(*images,swap_index);
if ((p == (Image *) NULL) || (q == (Image *) NULL))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",(*images)->filename);
status=MagickFalse;
break;
}
if (p == q)
break;
u=CloneImage(p,0,0,MagickTrue,exception);
if (u == (Image *) NULL)
break;
v=CloneImage(q,0,0,MagickTrue,exception);
if (v == (Image *) NULL)
{
u=DestroyImage(u);
break;
}
ReplaceImageInList(&p,v);
ReplaceImageInList(&q,u);
*images=GetFirstImageInList(q);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("write",option+1) == 0)
{
char
key[MagickPathExtent];
Image
*write_images;
ImageInfo
*write_info;
(void) SyncImagesSettings(mogrify_info,*images,exception);
(void) FormatLocaleString(key,MagickPathExtent,"cache:%s",
argv[i+1]);
(void) DeleteImageRegistry(key);
write_images=(*images);
if (*option == '+')
write_images=CloneImageList(*images,exception);
write_info=CloneImageInfo(mogrify_info);
status&=WriteImages(write_info,write_images,argv[i+1],exception);
write_info=DestroyImageInfo(write_info);
if (*option == '+')
write_images=DestroyImageList(write_images);
break;
}
break;
}
default:
break;
}
i+=count;
}
quantize_info=DestroyQuantizeInfo(quantize_info);
mogrify_info=DestroyImageInfo(mogrify_info);
status&=MogrifyImageInfo(image_info,argc,argv,exception);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImages() applies image processing options to a sequence of images as
% prescribed by command line options.
%
% The format of the MogrifyImage method is:
%
% MagickBooleanType MogrifyImages(ImageInfo *image_info,
% const MagickBooleanType post,const int argc,const char **argv,
% Image **images,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o post: If true, post process image list operators otherwise pre-process.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o images: pointer to a pointer of the first image in image list.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MogrifyImages(ImageInfo *image_info,
const MagickBooleanType post,const int argc,const char **argv,
Image **images,ExceptionInfo *exception)
{
#define MogrifyImageTag "Mogrify/Image"
MagickStatusType
status;
MagickBooleanType
proceed;
size_t
n;
register ssize_t
i;
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (images == (Image **) NULL)
return(MogrifyImage(image_info,argc,argv,images,exception));
assert((*images)->previous == (Image *) NULL);
assert((*images)->signature == MagickCoreSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
if ((argc <= 0) || (*argv == (char *) NULL))
return(MagickTrue);
(void) SetImageInfoProgressMonitor(image_info,(MagickProgressMonitor) NULL,
(void *) NULL);
status=MagickTrue;
#if 0
(void) FormatLocaleFile(stderr, "mogrify start %s %d (%s)\n",argv[0],argc,
post?"post":"pre");
#endif
/*
Pre-process multi-image sequence operators
*/
if (post == MagickFalse)
status&=MogrifyImageList(image_info,argc,argv,images,exception);
/*
For each image, process simple single image operators
*/
i=0;
n=GetImageListLength(*images);
for ( ; ; )
{
#if 0
(void) FormatLocaleFile(stderr,"mogrify %ld of %ld\n",(long)
GetImageIndexInList(*images),(long)GetImageListLength(*images));
#endif
status&=MogrifyImage(image_info,argc,argv,images,exception);
proceed=SetImageProgress(*images,MogrifyImageTag,(MagickOffsetType) i, n);
if (proceed == MagickFalse)
break;
if ( (*images)->next == (Image *) NULL )
break;
*images=(*images)->next;
i++;
}
assert( *images != (Image *) NULL );
#if 0
(void) FormatLocaleFile(stderr,"mogrify end %ld of %ld\n",(long)
GetImageIndexInList(*images),(long)GetImageListLength(*images));
#endif
/*
Post-process, multi-image sequence operators
*/
*images=GetFirstImageInList(*images);
if (post != MagickFalse)
status&=MogrifyImageList(image_info,argc,argv,images,exception);
return(status != 0 ? MagickTrue : MagickFalse);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_944_0 |
crossvul-cpp_data_bad_1497_0 | /* crypto/cms/cms_smime.c */
/* Written by Dr Stephen N Henson (steve@openssl.org) for the OpenSSL
* project.
*/
/* ====================================================================
* Copyright (c) 2008 The OpenSSL Project. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. All advertising materials mentioning features or use of this
* software must display the following acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
*
* 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
* endorse or promote products derived from this software without
* prior written permission. For written permission, please contact
* licensing@OpenSSL.org.
*
* 5. Products derived from this software may not be called "OpenSSL"
* nor may "OpenSSL" appear in their names without prior written
* permission of the OpenSSL Project.
*
* 6. Redistributions of any form whatsoever must retain the following
* acknowledgment:
* "This product includes software developed by the OpenSSL Project
* for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
*
* THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
* EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
* ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
* OF THE POSSIBILITY OF SUCH DAMAGE.
* ====================================================================
*/
#include "cryptlib.h"
#include <openssl/asn1t.h>
#include <openssl/x509.h>
#include <openssl/x509v3.h>
#include <openssl/err.h>
#include <openssl/cms.h>
#include "cms_lcl.h"
#include "asn1_locl.h"
static int cms_copy_content(BIO *out, BIO *in, unsigned int flags)
{
unsigned char buf[4096];
int r = 0, i;
BIO *tmpout = NULL;
if (out == NULL)
tmpout = BIO_new(BIO_s_null());
else if (flags & CMS_TEXT)
{
tmpout = BIO_new(BIO_s_mem());
BIO_set_mem_eof_return(tmpout, 0);
}
else
tmpout = out;
if(!tmpout)
{
CMSerr(CMS_F_CMS_COPY_CONTENT,ERR_R_MALLOC_FAILURE);
goto err;
}
/* Read all content through chain to process digest, decrypt etc */
for (;;)
{
i=BIO_read(in,buf,sizeof(buf));
if (i <= 0)
{
if (BIO_method_type(in) == BIO_TYPE_CIPHER)
{
if (!BIO_get_cipher_status(in))
goto err;
}
if (i < 0)
goto err;
break;
}
if (tmpout && (BIO_write(tmpout, buf, i) != i))
goto err;
}
if(flags & CMS_TEXT)
{
if(!SMIME_text(tmpout, out))
{
CMSerr(CMS_F_CMS_COPY_CONTENT,CMS_R_SMIME_TEXT_ERROR);
goto err;
}
}
r = 1;
err:
if (tmpout && (tmpout != out))
BIO_free(tmpout);
return r;
}
static int check_content(CMS_ContentInfo *cms)
{
ASN1_OCTET_STRING **pos = CMS_get0_content(cms);
if (!pos || !*pos)
{
CMSerr(CMS_F_CHECK_CONTENT, CMS_R_NO_CONTENT);
return 0;
}
return 1;
}
static void do_free_upto(BIO *f, BIO *upto)
{
if (upto)
{
BIO *tbio;
do
{
tbio = BIO_pop(f);
BIO_free(f);
f = tbio;
}
while (f != upto);
}
else
BIO_free_all(f);
}
int CMS_data(CMS_ContentInfo *cms, BIO *out, unsigned int flags)
{
BIO *cont;
int r;
if (OBJ_obj2nid(CMS_get0_type(cms)) != NID_pkcs7_data)
{
CMSerr(CMS_F_CMS_DATA, CMS_R_TYPE_NOT_DATA);
return 0;
}
cont = CMS_dataInit(cms, NULL);
if (!cont)
return 0;
r = cms_copy_content(out, cont, flags);
BIO_free_all(cont);
return r;
}
CMS_ContentInfo *CMS_data_create(BIO *in, unsigned int flags)
{
CMS_ContentInfo *cms;
cms = cms_Data_create();
if (!cms)
return NULL;
if ((flags & CMS_STREAM) || CMS_final(cms, in, NULL, flags))
return cms;
CMS_ContentInfo_free(cms);
return NULL;
}
int CMS_digest_verify(CMS_ContentInfo *cms, BIO *dcont, BIO *out,
unsigned int flags)
{
BIO *cont;
int r;
if (OBJ_obj2nid(CMS_get0_type(cms)) != NID_pkcs7_digest)
{
CMSerr(CMS_F_CMS_DIGEST_VERIFY, CMS_R_TYPE_NOT_DIGESTED_DATA);
return 0;
}
if (!dcont && !check_content(cms))
return 0;
cont = CMS_dataInit(cms, dcont);
if (!cont)
return 0;
r = cms_copy_content(out, cont, flags);
if (r)
r = cms_DigestedData_do_final(cms, cont, 1);
do_free_upto(cont, dcont);
return r;
}
CMS_ContentInfo *CMS_digest_create(BIO *in, const EVP_MD *md,
unsigned int flags)
{
CMS_ContentInfo *cms;
if (!md)
md = EVP_sha1();
cms = cms_DigestedData_create(md);
if (!cms)
return NULL;
if(!(flags & CMS_DETACHED))
CMS_set_detached(cms, 0);
if ((flags & CMS_STREAM) || CMS_final(cms, in, NULL, flags))
return cms;
CMS_ContentInfo_free(cms);
return NULL;
}
int CMS_EncryptedData_decrypt(CMS_ContentInfo *cms,
const unsigned char *key, size_t keylen,
BIO *dcont, BIO *out, unsigned int flags)
{
BIO *cont;
int r;
if (OBJ_obj2nid(CMS_get0_type(cms)) != NID_pkcs7_encrypted)
{
CMSerr(CMS_F_CMS_ENCRYPTEDDATA_DECRYPT,
CMS_R_TYPE_NOT_ENCRYPTED_DATA);
return 0;
}
if (!dcont && !check_content(cms))
return 0;
if (CMS_EncryptedData_set1_key(cms, NULL, key, keylen) <= 0)
return 0;
cont = CMS_dataInit(cms, dcont);
if (!cont)
return 0;
r = cms_copy_content(out, cont, flags);
do_free_upto(cont, dcont);
return r;
}
CMS_ContentInfo *CMS_EncryptedData_encrypt(BIO *in, const EVP_CIPHER *cipher,
const unsigned char *key, size_t keylen,
unsigned int flags)
{
CMS_ContentInfo *cms;
if (!cipher)
{
CMSerr(CMS_F_CMS_ENCRYPTEDDATA_ENCRYPT, CMS_R_NO_CIPHER);
return NULL;
}
cms = CMS_ContentInfo_new();
if (!cms)
return NULL;
if (!CMS_EncryptedData_set1_key(cms, cipher, key, keylen))
return NULL;
if(!(flags & CMS_DETACHED))
CMS_set_detached(cms, 0);
if ((flags & (CMS_STREAM|CMS_PARTIAL))
|| CMS_final(cms, in, NULL, flags))
return cms;
CMS_ContentInfo_free(cms);
return NULL;
}
static int cms_signerinfo_verify_cert(CMS_SignerInfo *si,
X509_STORE *store,
STACK_OF(X509) *certs,
STACK_OF(X509_CRL) *crls,
unsigned int flags)
{
X509_STORE_CTX ctx;
X509 *signer;
int i, j, r = 0;
CMS_SignerInfo_get0_algs(si, NULL, &signer, NULL, NULL);
if (!X509_STORE_CTX_init(&ctx, store, signer, certs))
{
CMSerr(CMS_F_CMS_SIGNERINFO_VERIFY_CERT,
CMS_R_STORE_INIT_ERROR);
goto err;
}
X509_STORE_CTX_set_default(&ctx, "smime_sign");
if (crls)
X509_STORE_CTX_set0_crls(&ctx, crls);
i = X509_verify_cert(&ctx);
if (i <= 0)
{
j = X509_STORE_CTX_get_error(&ctx);
CMSerr(CMS_F_CMS_SIGNERINFO_VERIFY_CERT,
CMS_R_CERTIFICATE_VERIFY_ERROR);
ERR_add_error_data(2, "Verify error:",
X509_verify_cert_error_string(j));
goto err;
}
r = 1;
err:
X509_STORE_CTX_cleanup(&ctx);
return r;
}
int CMS_verify(CMS_ContentInfo *cms, STACK_OF(X509) *certs,
X509_STORE *store, BIO *dcont, BIO *out, unsigned int flags)
{
CMS_SignerInfo *si;
STACK_OF(CMS_SignerInfo) *sinfos;
STACK_OF(X509) *cms_certs = NULL;
STACK_OF(X509_CRL) *crls = NULL;
X509 *signer;
int i, scount = 0, ret = 0;
BIO *cmsbio = NULL, *tmpin = NULL;
if (!dcont && !check_content(cms))
return 0;
/* Attempt to find all signer certificates */
sinfos = CMS_get0_SignerInfos(cms);
if (sk_CMS_SignerInfo_num(sinfos) <= 0)
{
CMSerr(CMS_F_CMS_VERIFY, CMS_R_NO_SIGNERS);
goto err;
}
for (i = 0; i < sk_CMS_SignerInfo_num(sinfos); i++)
{
si = sk_CMS_SignerInfo_value(sinfos, i);
CMS_SignerInfo_get0_algs(si, NULL, &signer, NULL, NULL);
if (signer)
scount++;
}
if (scount != sk_CMS_SignerInfo_num(sinfos))
scount += CMS_set1_signers_certs(cms, certs, flags);
if (scount != sk_CMS_SignerInfo_num(sinfos))
{
CMSerr(CMS_F_CMS_VERIFY, CMS_R_SIGNER_CERTIFICATE_NOT_FOUND);
goto err;
}
/* Attempt to verify all signers certs */
if (!(flags & CMS_NO_SIGNER_CERT_VERIFY))
{
cms_certs = CMS_get1_certs(cms);
if (!(flags & CMS_NOCRL))
crls = CMS_get1_crls(cms);
for (i = 0; i < sk_CMS_SignerInfo_num(sinfos); i++)
{
si = sk_CMS_SignerInfo_value(sinfos, i);
if (!cms_signerinfo_verify_cert(si, store,
cms_certs, crls, flags))
goto err;
}
}
/* Attempt to verify all SignerInfo signed attribute signatures */
if (!(flags & CMS_NO_ATTR_VERIFY))
{
for (i = 0; i < sk_CMS_SignerInfo_num(sinfos); i++)
{
si = sk_CMS_SignerInfo_value(sinfos, i);
if (CMS_signed_get_attr_count(si) < 0)
continue;
if (CMS_SignerInfo_verify(si) <= 0)
goto err;
}
}
/* Performance optimization: if the content is a memory BIO then
* store its contents in a temporary read only memory BIO. This
* avoids potentially large numbers of slow copies of data which will
* occur when reading from a read write memory BIO when signatures
* are calculated.
*/
if (dcont && (BIO_method_type(dcont) == BIO_TYPE_MEM))
{
char *ptr;
long len;
len = BIO_get_mem_data(dcont, &ptr);
tmpin = BIO_new_mem_buf(ptr, len);
if (tmpin == NULL)
{
CMSerr(CMS_F_CMS_VERIFY,ERR_R_MALLOC_FAILURE);
return 0;
}
}
else
tmpin = dcont;
cmsbio=CMS_dataInit(cms, tmpin);
if (!cmsbio)
goto err;
if (!cms_copy_content(out, cmsbio, flags))
goto err;
if (!(flags & CMS_NO_CONTENT_VERIFY))
{
for (i = 0; i < sk_CMS_SignerInfo_num(sinfos); i++)
{
si = sk_CMS_SignerInfo_value(sinfos, i);
if (CMS_SignerInfo_verify_content(si, cmsbio) <= 0)
{
CMSerr(CMS_F_CMS_VERIFY,
CMS_R_CONTENT_VERIFY_ERROR);
goto err;
}
}
}
ret = 1;
err:
if (dcont && (tmpin == dcont))
do_free_upto(cmsbio, dcont);
else
BIO_free_all(cmsbio);
if (cms_certs)
sk_X509_pop_free(cms_certs, X509_free);
if (crls)
sk_X509_CRL_pop_free(crls, X509_CRL_free);
return ret;
}
int CMS_verify_receipt(CMS_ContentInfo *rcms, CMS_ContentInfo *ocms,
STACK_OF(X509) *certs,
X509_STORE *store, unsigned int flags)
{
int r;
flags &= ~(CMS_DETACHED|CMS_TEXT);
r = CMS_verify(rcms, certs, store, NULL, NULL, flags);
if (r <= 0)
return r;
return cms_Receipt_verify(rcms, ocms);
}
CMS_ContentInfo *CMS_sign(X509 *signcert, EVP_PKEY *pkey, STACK_OF(X509) *certs,
BIO *data, unsigned int flags)
{
CMS_ContentInfo *cms;
int i;
cms = CMS_ContentInfo_new();
if (!cms || !CMS_SignedData_init(cms))
goto merr;
if (pkey && !CMS_add1_signer(cms, signcert, pkey, NULL, flags))
{
CMSerr(CMS_F_CMS_SIGN, CMS_R_ADD_SIGNER_ERROR);
goto err;
}
for (i = 0; i < sk_X509_num(certs); i++)
{
X509 *x = sk_X509_value(certs, i);
if (!CMS_add1_cert(cms, x))
goto merr;
}
if(!(flags & CMS_DETACHED))
CMS_set_detached(cms, 0);
if ((flags & (CMS_STREAM|CMS_PARTIAL))
|| CMS_final(cms, data, NULL, flags))
return cms;
else
goto err;
merr:
CMSerr(CMS_F_CMS_SIGN, ERR_R_MALLOC_FAILURE);
err:
if (cms)
CMS_ContentInfo_free(cms);
return NULL;
}
CMS_ContentInfo *CMS_sign_receipt(CMS_SignerInfo *si,
X509 *signcert, EVP_PKEY *pkey,
STACK_OF(X509) *certs,
unsigned int flags)
{
CMS_SignerInfo *rct_si;
CMS_ContentInfo *cms = NULL;
ASN1_OCTET_STRING **pos, *os;
BIO *rct_cont = NULL;
int r = 0;
flags &= ~(CMS_STREAM|CMS_TEXT);
/* Not really detached but avoids content being allocated */
flags |= CMS_PARTIAL|CMS_BINARY|CMS_DETACHED;
if (!pkey || !signcert)
{
CMSerr(CMS_F_CMS_SIGN_RECEIPT, CMS_R_NO_KEY_OR_CERT);
return NULL;
}
/* Initialize signed data */
cms = CMS_sign(NULL, NULL, certs, NULL, flags);
if (!cms)
goto err;
/* Set inner content type to signed receipt */
if (!CMS_set1_eContentType(cms, OBJ_nid2obj(NID_id_smime_ct_receipt)))
goto err;
rct_si = CMS_add1_signer(cms, signcert, pkey, NULL, flags);
if (!rct_si)
{
CMSerr(CMS_F_CMS_SIGN_RECEIPT, CMS_R_ADD_SIGNER_ERROR);
goto err;
}
os = cms_encode_Receipt(si);
if (!os)
goto err;
/* Set content to digest */
rct_cont = BIO_new_mem_buf(os->data, os->length);
if (!rct_cont)
goto err;
/* Add msgSigDigest attribute */
if (!cms_msgSigDigest_add1(rct_si, si))
goto err;
/* Finalize structure */
if (!CMS_final(cms, rct_cont, NULL, flags))
goto err;
/* Set embedded content */
pos = CMS_get0_content(cms);
*pos = os;
r = 1;
err:
if (rct_cont)
BIO_free(rct_cont);
if (r)
return cms;
CMS_ContentInfo_free(cms);
return NULL;
}
CMS_ContentInfo *CMS_encrypt(STACK_OF(X509) *certs, BIO *data,
const EVP_CIPHER *cipher, unsigned int flags)
{
CMS_ContentInfo *cms;
int i;
X509 *recip;
cms = CMS_EnvelopedData_create(cipher);
if (!cms)
goto merr;
for (i = 0; i < sk_X509_num(certs); i++)
{
recip = sk_X509_value(certs, i);
if (!CMS_add1_recipient_cert(cms, recip, flags))
{
CMSerr(CMS_F_CMS_ENCRYPT, CMS_R_RECIPIENT_ERROR);
goto err;
}
}
if(!(flags & CMS_DETACHED))
CMS_set_detached(cms, 0);
if ((flags & (CMS_STREAM|CMS_PARTIAL))
|| CMS_final(cms, data, NULL, flags))
return cms;
else
goto err;
merr:
CMSerr(CMS_F_CMS_ENCRYPT, ERR_R_MALLOC_FAILURE);
err:
if (cms)
CMS_ContentInfo_free(cms);
return NULL;
}
static int cms_kari_set1_pkey(CMS_ContentInfo *cms, CMS_RecipientInfo *ri,
EVP_PKEY *pk, X509 *cert)
{
int i;
STACK_OF(CMS_RecipientEncryptedKey) *reks;
CMS_RecipientEncryptedKey *rek;
reks = CMS_RecipientInfo_kari_get0_reks(ri);
if (!cert)
return 0;
for (i = 0; i < sk_CMS_RecipientEncryptedKey_num(reks); i++)
{
int rv;
rek = sk_CMS_RecipientEncryptedKey_value(reks, i);
if (CMS_RecipientEncryptedKey_cert_cmp(rek, cert))
continue;
CMS_RecipientInfo_kari_set0_pkey(ri, pk);
rv = CMS_RecipientInfo_kari_decrypt(cms, ri, rek);
CMS_RecipientInfo_kari_set0_pkey(ri, NULL);
if (rv > 0)
return 1;
return -1;
}
return 0;
}
int CMS_decrypt_set1_pkey(CMS_ContentInfo *cms, EVP_PKEY *pk, X509 *cert)
{
STACK_OF(CMS_RecipientInfo) *ris;
CMS_RecipientInfo *ri;
int i, r, ri_type;
int debug = 0;
ris = CMS_get0_RecipientInfos(cms);
if (ris)
debug = cms->d.envelopedData->encryptedContentInfo->debug;
ri_type = cms_pkey_get_ri_type(pk);
if (ri_type == CMS_RECIPINFO_NONE)
{
CMSerr(CMS_F_CMS_DECRYPT_SET1_PKEY,
CMS_R_NOT_SUPPORTED_FOR_THIS_KEY_TYPE);
return 0;
}
for (i = 0; i < sk_CMS_RecipientInfo_num(ris); i++)
{
ri = sk_CMS_RecipientInfo_value(ris, i);
if (CMS_RecipientInfo_type(ri) != ri_type)
continue;
if (ri_type == CMS_RECIPINFO_AGREE)
{
r = cms_kari_set1_pkey(cms, ri, pk, cert);
if (r > 0)
return 1;
if (r < 0)
return 0;
}
/* If we have a cert try matching RecipientInfo
* otherwise try them all.
*/
else if (!cert || !CMS_RecipientInfo_ktri_cert_cmp(ri, cert))
{
CMS_RecipientInfo_set0_pkey(ri, pk);
r = CMS_RecipientInfo_decrypt(cms, ri);
CMS_RecipientInfo_set0_pkey(ri, NULL);
if (cert)
{
/* If not debugging clear any error and
* return success to avoid leaking of
* information useful to MMA
*/
if (!debug)
{
ERR_clear_error();
return 1;
}
if (r > 0)
return 1;
CMSerr(CMS_F_CMS_DECRYPT_SET1_PKEY,
CMS_R_DECRYPT_ERROR);
return 0;
}
/* If no cert and not debugging don't leave loop
* after first successful decrypt. Always attempt
* to decrypt all recipients to avoid leaking timing
* of a successful decrypt.
*/
else if (r > 0 && debug)
return 1;
}
}
/* If no cert and not debugging always return success */
if (!cert && !debug)
{
ERR_clear_error();
return 1;
}
CMSerr(CMS_F_CMS_DECRYPT_SET1_PKEY, CMS_R_NO_MATCHING_RECIPIENT);
return 0;
}
int CMS_decrypt_set1_key(CMS_ContentInfo *cms,
unsigned char *key, size_t keylen,
unsigned char *id, size_t idlen)
{
STACK_OF(CMS_RecipientInfo) *ris;
CMS_RecipientInfo *ri;
int i, r;
ris = CMS_get0_RecipientInfos(cms);
for (i = 0; i < sk_CMS_RecipientInfo_num(ris); i++)
{
ri = sk_CMS_RecipientInfo_value(ris, i);
if (CMS_RecipientInfo_type(ri) != CMS_RECIPINFO_KEK)
continue;
/* If we have an id try matching RecipientInfo
* otherwise try them all.
*/
if (!id || (CMS_RecipientInfo_kekri_id_cmp(ri, id, idlen) == 0))
{
CMS_RecipientInfo_set0_key(ri, key, keylen);
r = CMS_RecipientInfo_decrypt(cms, ri);
CMS_RecipientInfo_set0_key(ri, NULL, 0);
if (r > 0)
return 1;
if (id)
{
CMSerr(CMS_F_CMS_DECRYPT_SET1_KEY,
CMS_R_DECRYPT_ERROR);
return 0;
}
ERR_clear_error();
}
}
CMSerr(CMS_F_CMS_DECRYPT_SET1_KEY, CMS_R_NO_MATCHING_RECIPIENT);
return 0;
}
int CMS_decrypt_set1_password(CMS_ContentInfo *cms,
unsigned char *pass, ossl_ssize_t passlen)
{
STACK_OF(CMS_RecipientInfo) *ris;
CMS_RecipientInfo *ri;
int i, r;
ris = CMS_get0_RecipientInfos(cms);
for (i = 0; i < sk_CMS_RecipientInfo_num(ris); i++)
{
ri = sk_CMS_RecipientInfo_value(ris, i);
if (CMS_RecipientInfo_type(ri) != CMS_RECIPINFO_PASS)
continue;
CMS_RecipientInfo_set0_password(ri, pass, passlen);
r = CMS_RecipientInfo_decrypt(cms, ri);
CMS_RecipientInfo_set0_password(ri, NULL, 0);
if (r > 0)
return 1;
}
CMSerr(CMS_F_CMS_DECRYPT_SET1_PASSWORD, CMS_R_NO_MATCHING_RECIPIENT);
return 0;
}
int CMS_decrypt(CMS_ContentInfo *cms, EVP_PKEY *pk, X509 *cert,
BIO *dcont, BIO *out,
unsigned int flags)
{
int r;
BIO *cont;
if (OBJ_obj2nid(CMS_get0_type(cms)) != NID_pkcs7_enveloped)
{
CMSerr(CMS_F_CMS_DECRYPT, CMS_R_TYPE_NOT_ENVELOPED_DATA);
return 0;
}
if (!dcont && !check_content(cms))
return 0;
if (flags & CMS_DEBUG_DECRYPT)
cms->d.envelopedData->encryptedContentInfo->debug = 1;
else
cms->d.envelopedData->encryptedContentInfo->debug = 0;
if (!pk && !cert && !dcont && !out)
return 1;
if (pk && !CMS_decrypt_set1_pkey(cms, pk, cert))
return 0;
cont = CMS_dataInit(cms, dcont);
if (!cont)
return 0;
r = cms_copy_content(out, cont, flags);
do_free_upto(cont, dcont);
return r;
}
int CMS_final(CMS_ContentInfo *cms, BIO *data, BIO *dcont, unsigned int flags)
{
BIO *cmsbio;
int ret = 0;
if (!(cmsbio = CMS_dataInit(cms, dcont)))
{
CMSerr(CMS_F_CMS_FINAL,ERR_R_MALLOC_FAILURE);
return 0;
}
SMIME_crlf_copy(data, cmsbio, flags);
(void)BIO_flush(cmsbio);
if (!CMS_dataFinal(cms, cmsbio))
{
CMSerr(CMS_F_CMS_FINAL,CMS_R_CMS_DATAFINAL_ERROR);
goto err;
}
ret = 1;
err:
do_free_upto(cmsbio, dcont);
return ret;
}
#ifdef ZLIB
int CMS_uncompress(CMS_ContentInfo *cms, BIO *dcont, BIO *out,
unsigned int flags)
{
BIO *cont;
int r;
if (OBJ_obj2nid(CMS_get0_type(cms)) != NID_id_smime_ct_compressedData)
{
CMSerr(CMS_F_CMS_UNCOMPRESS,
CMS_R_TYPE_NOT_COMPRESSED_DATA);
return 0;
}
if (!dcont && !check_content(cms))
return 0;
cont = CMS_dataInit(cms, dcont);
if (!cont)
return 0;
r = cms_copy_content(out, cont, flags);
do_free_upto(cont, dcont);
return r;
}
CMS_ContentInfo *CMS_compress(BIO *in, int comp_nid, unsigned int flags)
{
CMS_ContentInfo *cms;
if (comp_nid <= 0)
comp_nid = NID_zlib_compression;
cms = cms_CompressedData_create(comp_nid);
if (!cms)
return NULL;
if(!(flags & CMS_DETACHED))
CMS_set_detached(cms, 0);
if ((flags & CMS_STREAM) || CMS_final(cms, in, NULL, flags))
return cms;
CMS_ContentInfo_free(cms);
return NULL;
}
#else
int CMS_uncompress(CMS_ContentInfo *cms, BIO *dcont, BIO *out,
unsigned int flags)
{
CMSerr(CMS_F_CMS_UNCOMPRESS, CMS_R_UNSUPPORTED_COMPRESSION_ALGORITHM);
return 0;
}
CMS_ContentInfo *CMS_compress(BIO *in, int comp_nid, unsigned int flags)
{
CMSerr(CMS_F_CMS_COMPRESS, CMS_R_UNSUPPORTED_COMPRESSION_ALGORITHM);
return NULL;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_1497_0 |
crossvul-cpp_data_good_3486_1 | /*
* ARMv6 Performance counter handling code.
*
* Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
*
* ARMv6 has 2 configurable performance counters and a single cycle counter.
* They all share a single reset bit but can be written to zero so we can use
* that for a reset.
*
* The counters can't be individually enabled or disabled so when we remove
* one event and replace it with another we could get spurious counts from the
* wrong event. However, we can take advantage of the fact that the
* performance counters can export events to the event bus, and the event bus
* itself can be monitored. This requires that we *don't* export the events to
* the event bus. The procedure for disabling a configurable counter is:
* - change the counter to count the ETMEXTOUT[0] signal (0x20). This
* effectively stops the counter from counting.
* - disable the counter's interrupt generation (each counter has it's
* own interrupt enable bit).
* Once stopped, the counter value can be written as 0 to reset.
*
* To enable a counter:
* - enable the counter's interrupt generation.
* - set the new event type.
*
* Note: the dedicated cycle counter only counts cycles and can't be
* enabled/disabled independently of the others. When we want to disable the
* cycle counter, we have to just disable the interrupt reporting and start
* ignoring that counter. When re-enabling, we have to reset the value and
* enable the interrupt.
*/
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
enum armv6_perf_types {
ARMV6_PERFCTR_ICACHE_MISS = 0x0,
ARMV6_PERFCTR_IBUF_STALL = 0x1,
ARMV6_PERFCTR_DDEP_STALL = 0x2,
ARMV6_PERFCTR_ITLB_MISS = 0x3,
ARMV6_PERFCTR_DTLB_MISS = 0x4,
ARMV6_PERFCTR_BR_EXEC = 0x5,
ARMV6_PERFCTR_BR_MISPREDICT = 0x6,
ARMV6_PERFCTR_INSTR_EXEC = 0x7,
ARMV6_PERFCTR_DCACHE_HIT = 0x9,
ARMV6_PERFCTR_DCACHE_ACCESS = 0xA,
ARMV6_PERFCTR_DCACHE_MISS = 0xB,
ARMV6_PERFCTR_DCACHE_WBACK = 0xC,
ARMV6_PERFCTR_SW_PC_CHANGE = 0xD,
ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF,
ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10,
ARMV6_PERFCTR_LSU_FULL_STALL = 0x11,
ARMV6_PERFCTR_WBUF_DRAINED = 0x12,
ARMV6_PERFCTR_CPU_CYCLES = 0xFF,
ARMV6_PERFCTR_NOP = 0x20,
};
enum armv6_counters {
ARMV6_CYCLE_COUNTER = 1,
ARMV6_COUNTER0,
ARMV6_COUNTER1,
};
/*
* The hardware events that we support. We do support cache operations but
* we have harvard caches and no way to combine instruction and data
* accesses/misses in hardware.
*/
static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
};
static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
/*
* The performance counters don't differentiate between read
* and write accesses/misses so this isn't strictly correct,
* but it's the best we can do. Writes and reads get
* combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
/*
* The ARM performance counters can count micro DTLB misses,
* micro ITLB misses and main TLB misses. There isn't an event
* for TLB misses, so use the micro misses here and if users
* want the main TLB misses they can use a raw counter.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
enum armv6mpcore_perf_types {
ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0,
ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1,
ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2,
ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3,
ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4,
ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5,
ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6,
ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7,
ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8,
ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB,
ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD,
ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF,
ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10,
ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12,
ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13,
ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF,
};
/*
* The hardware events that we support. We do support cache operations but
* we have harvard caches and no way to combine instruction and data
* accesses/misses in hardware.
*/
static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
};
static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] =
ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
[C(RESULT_MISS)] =
ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] =
ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
[C(RESULT_MISS)] =
ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
/*
* The ARM performance counters can count micro DTLB misses,
* micro ITLB misses and main TLB misses. There isn't an event
* for TLB misses, so use the micro misses here and if users
* want the main TLB misses they can use a raw counter.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
static inline unsigned long
armv6_pmcr_read(void)
{
u32 val;
asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val));
return val;
}
static inline void
armv6_pmcr_write(unsigned long val)
{
asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val));
}
#define ARMV6_PMCR_ENABLE (1 << 0)
#define ARMV6_PMCR_CTR01_RESET (1 << 1)
#define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
#define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
#define ARMV6_PMCR_COUNT0_IEN (1 << 4)
#define ARMV6_PMCR_COUNT1_IEN (1 << 5)
#define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
#define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
#define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
#define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
#define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
#define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
#define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
#define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
#define ARMV6_PMCR_OVERFLOWED_MASK \
(ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
ARMV6_PMCR_CCOUNT_OVERFLOW)
static inline int
armv6_pmcr_has_overflowed(unsigned long pmcr)
{
return pmcr & ARMV6_PMCR_OVERFLOWED_MASK;
}
static inline int
armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
enum armv6_counters counter)
{
int ret = 0;
if (ARMV6_CYCLE_COUNTER == counter)
ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
else if (ARMV6_COUNTER0 == counter)
ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
else if (ARMV6_COUNTER1 == counter)
ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
else
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
return ret;
}
static inline u32
armv6pmu_read_counter(int counter)
{
unsigned long value = 0;
if (ARMV6_CYCLE_COUNTER == counter)
asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value));
else if (ARMV6_COUNTER0 == counter)
asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value));
else if (ARMV6_COUNTER1 == counter)
asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value));
else
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
return value;
}
static inline void
armv6pmu_write_counter(int counter,
u32 value)
{
if (ARMV6_CYCLE_COUNTER == counter)
asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
else if (ARMV6_COUNTER0 == counter)
asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value));
else if (ARMV6_COUNTER1 == counter)
asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value));
else
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
}
static void
armv6pmu_enable_event(struct hw_perf_event *hwc,
int idx)
{
unsigned long val, mask, evt, flags;
if (ARMV6_CYCLE_COUNTER == idx) {
mask = 0;
evt = ARMV6_PMCR_CCOUNT_IEN;
} else if (ARMV6_COUNTER0 == idx) {
mask = ARMV6_PMCR_EVT_COUNT0_MASK;
evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
ARMV6_PMCR_COUNT0_IEN;
} else if (ARMV6_COUNTER1 == idx) {
mask = ARMV6_PMCR_EVT_COUNT1_MASK;
evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
ARMV6_PMCR_COUNT1_IEN;
} else {
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
/*
* Mask out the current event and set the counter to count the event
* that we're interested in.
*/
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static irqreturn_t
armv6pmu_handle_irq(int irq_num,
void *dev)
{
unsigned long pmcr = armv6_pmcr_read();
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
if (!armv6_pmcr_has_overflowed(pmcr))
return IRQ_NONE;
regs = get_irq_regs();
/*
* The interrupts are cleared by writing the overflow flags back to
* the control register. All of the other bits don't have any effect
* if they are rewritten, so write the whole value back.
*/
armv6_pmcr_write(pmcr);
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
/*
* We have a single interrupt for all counters. Check that
* each counter has overflowed before we process it.
*/
if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, &data, regs))
armpmu->disable(hwc, idx);
}
/*
* Handle the pending perf events.
*
* Note: this call *must* be run with interrupts disabled. For
* platforms that can have the PMU interrupts raised as an NMI, this
* will not work.
*/
irq_work_run();
return IRQ_HANDLED;
}
static void
armv6pmu_start(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val |= ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
armv6pmu_stop(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~ARMV6_PMCR_ENABLE;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int
armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
struct hw_perf_event *event)
{
/* Always place a cycle counter into the cycle counter. */
if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
return ARMV6_CYCLE_COUNTER;
} else {
/*
* For anything other than a cycle counter, try and use
* counter0 and counter1.
*/
if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask))
return ARMV6_COUNTER1;
if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask))
return ARMV6_COUNTER0;
/* The counters are all in use. */
return -EAGAIN;
}
}
static void
armv6pmu_disable_event(struct hw_perf_event *hwc,
int idx)
{
unsigned long val, mask, evt, flags;
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
evt = 0;
} else if (ARMV6_COUNTER0 == idx) {
mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
} else if (ARMV6_COUNTER1 == idx) {
mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
} else {
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
/*
* Mask out the current event and set the counter to count the number
* of ETM bus signal assertion cycles. The external reporting should
* be disabled and so this should never increment.
*/
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
int idx)
{
unsigned long val, mask, flags, evt = 0;
if (ARMV6_CYCLE_COUNTER == idx) {
mask = ARMV6_PMCR_CCOUNT_IEN;
} else if (ARMV6_COUNTER0 == idx) {
mask = ARMV6_PMCR_COUNT0_IEN;
} else if (ARMV6_COUNTER1 == idx) {
mask = ARMV6_PMCR_COUNT1_IEN;
} else {
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
/*
* Unlike UP ARMv6, we don't have a way of stopping the counters. We
* simply disable the interrupt reporting.
*/
raw_spin_lock_irqsave(&pmu_lock, flags);
val = armv6_pmcr_read();
val &= ~mask;
val |= evt;
armv6_pmcr_write(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static const struct arm_pmu armv6pmu = {
.id = ARM_PERF_PMU_ID_V6,
.name = "v6",
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6pmu_disable_event,
.read_counter = armv6pmu_read_counter,
.write_counter = armv6pmu_write_counter,
.get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start,
.stop = armv6pmu_stop,
.cache_map = &armv6_perf_cache_map,
.event_map = &armv6_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
static const struct arm_pmu *__init armv6pmu_init(void)
{
return &armv6pmu;
}
/*
* ARMv6mpcore is almost identical to single core ARMv6 with the exception
* that some of the events have different enumerations and that there is no
* *hack* to stop the programmable counters. To stop the counters we simply
* disable the interrupt reporting and update the event. When unthrottling we
* reset the period and enable the interrupt reporting.
*/
static const struct arm_pmu armv6mpcore_pmu = {
.id = ARM_PERF_PMU_ID_V6MP,
.name = "v6mpcore",
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6mpcore_pmu_disable_event,
.read_counter = armv6pmu_read_counter,
.write_counter = armv6pmu_write_counter,
.get_event_idx = armv6pmu_get_event_idx,
.start = armv6pmu_start,
.stop = armv6pmu_stop,
.cache_map = &armv6mpcore_perf_cache_map,
.event_map = &armv6mpcore_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
{
return &armv6mpcore_pmu;
}
#else
static const struct arm_pmu *__init armv6pmu_init(void)
{
return NULL;
}
static const struct arm_pmu *__init armv6mpcore_pmu_init(void)
{
return NULL;
}
#endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3486_1 |
crossvul-cpp_data_bad_5594_1 | /*
* Kernel-based Virtual Machine driver for Linux
*
* derived from drivers/kvm/kvm_main.c
*
* Copyright (C) 2006 Qumranet, Inc.
* Copyright (C) 2008 Qumranet, Inc.
* Copyright IBM Corporation, 2008
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Avi Kivity <avi@qumranet.com>
* Yaniv Kamay <yaniv@qumranet.com>
* Amit Shah <amit.shah@qumranet.com>
* Ben-Ami Yassour <benami@il.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#include <linux/kvm_host.h>
#include "irq.h"
#include "mmu.h"
#include "i8254.h"
#include "tss.h"
#include "kvm_cache_regs.h"
#include "x86.h"
#include "cpuid.h"
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/kvm.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
#include <linux/cpufreq.h>
#include <linux/user-return-notifier.h>
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <linux/hash.h>
#include <linux/pci.h>
#include <linux/timekeeper_internal.h>
#include <linux/pvclock_gtod.h>
#include <trace/events/kvm.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
#include <asm/debugreg.h>
#include <asm/msr.h>
#include <asm/desc.h>
#include <asm/mtrr.h>
#include <asm/mce.h>
#include <asm/i387.h>
#include <asm/fpu-internal.h> /* Ugh! */
#include <asm/xcr.h>
#include <asm/pvclock.h>
#include <asm/div64.h>
#define MAX_IO_MSRS 256
#define KVM_MAX_MCE_BANKS 32
#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
#define emul_to_vcpu(ctxt) \
container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
/* EFER defaults:
* - enable syscall per default because its emulated by KVM
* - enable LME and LMA per default on 64 bit KVM
*/
#ifdef CONFIG_X86_64
static
u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
#else
static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
#endif
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu);
struct kvm_x86_ops *kvm_x86_ops;
EXPORT_SYMBOL_GPL(kvm_x86_ops);
static bool ignore_msrs = 0;
module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
bool kvm_has_tsc_control;
EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
u32 kvm_max_guest_tsc_khz;
EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
static u32 tsc_tolerance_ppm = 250;
module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
#define KVM_NR_SHARED_MSRS 16
struct kvm_shared_msrs_global {
int nr;
u32 msrs[KVM_NR_SHARED_MSRS];
};
struct kvm_shared_msrs {
struct user_return_notifier urn;
bool registered;
struct kvm_shared_msr_values {
u64 host;
u64 curr;
} values[KVM_NR_SHARED_MSRS];
};
static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
static struct kvm_shared_msrs __percpu *shared_msrs;
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pf_fixed", VCPU_STAT(pf_fixed) },
{ "pf_guest", VCPU_STAT(pf_guest) },
{ "tlb_flush", VCPU_STAT(tlb_flush) },
{ "invlpg", VCPU_STAT(invlpg) },
{ "exits", VCPU_STAT(exits) },
{ "io_exits", VCPU_STAT(io_exits) },
{ "mmio_exits", VCPU_STAT(mmio_exits) },
{ "signal_exits", VCPU_STAT(signal_exits) },
{ "irq_window", VCPU_STAT(irq_window_exits) },
{ "nmi_window", VCPU_STAT(nmi_window_exits) },
{ "halt_exits", VCPU_STAT(halt_exits) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "hypercalls", VCPU_STAT(hypercalls) },
{ "request_irq", VCPU_STAT(request_irq_exits) },
{ "irq_exits", VCPU_STAT(irq_exits) },
{ "host_state_reload", VCPU_STAT(host_state_reload) },
{ "efer_reload", VCPU_STAT(efer_reload) },
{ "fpu_reload", VCPU_STAT(fpu_reload) },
{ "insn_emulation", VCPU_STAT(insn_emulation) },
{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
{ "irq_injections", VCPU_STAT(irq_injections) },
{ "nmi_injections", VCPU_STAT(nmi_injections) },
{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
{ "mmu_flooded", VM_STAT(mmu_flooded) },
{ "mmu_recycled", VM_STAT(mmu_recycled) },
{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
{ "mmu_unsync", VM_STAT(mmu_unsync) },
{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
{ "largepages", VM_STAT(lpages) },
{ NULL }
};
u64 __read_mostly host_xcr0;
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
static int kvm_vcpu_reset(struct kvm_vcpu *vcpu);
static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
{
int i;
for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
vcpu->arch.apf.gfns[i] = ~0;
}
static void kvm_on_user_return(struct user_return_notifier *urn)
{
unsigned slot;
struct kvm_shared_msrs *locals
= container_of(urn, struct kvm_shared_msrs, urn);
struct kvm_shared_msr_values *values;
for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
values = &locals->values[slot];
if (values->host != values->curr) {
wrmsrl(shared_msrs_global.msrs[slot], values->host);
values->curr = values->host;
}
}
locals->registered = false;
user_return_notifier_unregister(urn);
}
static void shared_msr_update(unsigned slot, u32 msr)
{
u64 value;
unsigned int cpu = smp_processor_id();
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
/* only read, and nobody should modify it at this time,
* so don't need lock */
if (slot >= shared_msrs_global.nr) {
printk(KERN_ERR "kvm: invalid MSR slot!");
return;
}
rdmsrl_safe(msr, &value);
smsr->values[slot].host = value;
smsr->values[slot].curr = value;
}
void kvm_define_shared_msr(unsigned slot, u32 msr)
{
if (slot >= shared_msrs_global.nr)
shared_msrs_global.nr = slot + 1;
shared_msrs_global.msrs[slot] = msr;
/* we need ensured the shared_msr_global have been updated */
smp_wmb();
}
EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
static void kvm_shared_msr_cpu_online(void)
{
unsigned i;
for (i = 0; i < shared_msrs_global.nr; ++i)
shared_msr_update(i, shared_msrs_global.msrs[i]);
}
void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
{
unsigned int cpu = smp_processor_id();
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
if (((value ^ smsr->values[slot].curr) & mask) == 0)
return;
smsr->values[slot].curr = value;
wrmsrl(shared_msrs_global.msrs[slot], value);
if (!smsr->registered) {
smsr->urn.on_user_return = kvm_on_user_return;
user_return_notifier_register(&smsr->urn);
smsr->registered = true;
}
}
EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
static void drop_user_return_notifiers(void *ignore)
{
unsigned int cpu = smp_processor_id();
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
if (smsr->registered)
kvm_on_user_return(&smsr->urn);
}
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
{
return vcpu->arch.apic_base;
}
EXPORT_SYMBOL_GPL(kvm_get_apic_base);
void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
{
/* TODO: reserve bits check */
kvm_lapic_set_base(vcpu, data);
}
EXPORT_SYMBOL_GPL(kvm_set_apic_base);
#define EXCPT_BENIGN 0
#define EXCPT_CONTRIBUTORY 1
#define EXCPT_PF 2
static int exception_class(int vector)
{
switch (vector) {
case PF_VECTOR:
return EXCPT_PF;
case DE_VECTOR:
case TS_VECTOR:
case NP_VECTOR:
case SS_VECTOR:
case GP_VECTOR:
return EXCPT_CONTRIBUTORY;
default:
break;
}
return EXCPT_BENIGN;
}
static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
unsigned nr, bool has_error, u32 error_code,
bool reinject)
{
u32 prev_nr;
int class1, class2;
kvm_make_request(KVM_REQ_EVENT, vcpu);
if (!vcpu->arch.exception.pending) {
queue:
vcpu->arch.exception.pending = true;
vcpu->arch.exception.has_error_code = has_error;
vcpu->arch.exception.nr = nr;
vcpu->arch.exception.error_code = error_code;
vcpu->arch.exception.reinject = reinject;
return;
}
/* to check exception */
prev_nr = vcpu->arch.exception.nr;
if (prev_nr == DF_VECTOR) {
/* triple fault -> shutdown */
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return;
}
class1 = exception_class(prev_nr);
class2 = exception_class(nr);
if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
|| (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
/* generate double fault per SDM Table 5-5 */
vcpu->arch.exception.pending = true;
vcpu->arch.exception.has_error_code = true;
vcpu->arch.exception.nr = DF_VECTOR;
vcpu->arch.exception.error_code = 0;
} else
/* replace previous exception with a new one in a hope
that instruction re-execution will regenerate lost
exception */
goto queue;
}
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
kvm_multiple_exception(vcpu, nr, false, 0, false);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception);
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
kvm_multiple_exception(vcpu, nr, false, 0, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception);
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
{
if (err)
kvm_inject_gp(vcpu, 0);
else
kvm_x86_ops->skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{
++vcpu->stat.pf_guest;
vcpu->arch.cr2 = fault->address;
kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
}
EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{
if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
else
vcpu->arch.mmu.inject_page_fault(vcpu, fault);
}
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
{
atomic_inc(&vcpu->arch.nmi_queued);
kvm_make_request(KVM_REQ_NMI, vcpu);
}
EXPORT_SYMBOL_GPL(kvm_inject_nmi);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
kvm_multiple_exception(vcpu, nr, true, error_code, false);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
kvm_multiple_exception(vcpu, nr, true, error_code, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
/*
* Checks if cpl <= required_cpl; if true, return true. Otherwise queue
* a #GP and return false.
*/
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
{
if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
return true;
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
return false;
}
EXPORT_SYMBOL_GPL(kvm_require_cpl);
/*
* This function will be used to read from the physical memory of the currently
* running guest. The difference to kvm_read_guest_page is that this function
* can read from guest physical or from the guest's guest physical memory.
*/
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gfn_t ngfn, void *data, int offset, int len,
u32 access)
{
gfn_t real_gfn;
gpa_t ngpa;
ngpa = gfn_to_gpa(ngfn);
real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
if (real_gfn == UNMAPPED_GVA)
return -EFAULT;
real_gfn = gpa_to_gfn(real_gfn);
return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
void *data, int offset, int len, u32 access)
{
return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
data, offset, len, access);
}
/*
* Load the pae pdptrs. Return true is they are all valid.
*/
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
{
gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
int i;
int ret;
u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
offset * sizeof(u64), sizeof(pdpte),
PFERR_USER_MASK|PFERR_WRITE_MASK);
if (ret < 0) {
ret = 0;
goto out;
}
for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
if (is_present_gpte(pdpte[i]) &&
(pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
ret = 0;
goto out;
}
}
ret = 1;
memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
__set_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_avail);
__set_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_dirty);
out:
return ret;
}
EXPORT_SYMBOL_GPL(load_pdptrs);
static bool pdptrs_changed(struct kvm_vcpu *vcpu)
{
u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
bool changed = true;
int offset;
gfn_t gfn;
int r;
if (is_long_mode(vcpu) || !is_pae(vcpu))
return false;
if (!test_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_avail))
return true;
gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
PFERR_USER_MASK | PFERR_WRITE_MASK);
if (r < 0)
goto out;
changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
out:
return changed;
}
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
unsigned long old_cr0 = kvm_read_cr0(vcpu);
unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
X86_CR0_CD | X86_CR0_NW;
cr0 |= X86_CR0_ET;
#ifdef CONFIG_X86_64
if (cr0 & 0xffffffff00000000UL)
return 1;
#endif
cr0 &= ~CR0_RESERVED_BITS;
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
return 1;
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
return 1;
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
#ifdef CONFIG_X86_64
if ((vcpu->arch.efer & EFER_LME)) {
int cs_db, cs_l;
if (!is_pae(vcpu))
return 1;
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
if (cs_l)
return 1;
} else
#endif
if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
kvm_read_cr3(vcpu)))
return 1;
}
if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
return 1;
kvm_x86_ops->set_cr0(vcpu, cr0);
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
}
if ((cr0 ^ old_cr0) & update_bits)
kvm_mmu_reset_context(vcpu);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_cr0);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{
(void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
}
EXPORT_SYMBOL_GPL(kvm_lmsw);
int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
u64 xcr0;
/* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
if (index != XCR_XFEATURE_ENABLED_MASK)
return 1;
xcr0 = xcr;
if (kvm_x86_ops->get_cpl(vcpu) != 0)
return 1;
if (!(xcr0 & XSTATE_FP))
return 1;
if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
return 1;
if (xcr0 & ~host_xcr0)
return 1;
vcpu->arch.xcr0 = xcr0;
vcpu->guest_xcr0_loaded = 0;
return 0;
}
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
if (__kvm_set_xcr(vcpu, index, xcr)) {
kvm_inject_gp(vcpu, 0);
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_xcr);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
X86_CR4_PAE | X86_CR4_SMEP;
if (cr4 & CR4_RESERVED_BITS)
return 1;
if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
return 1;
if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
return 1;
if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS))
return 1;
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE))
return 1;
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & pdptr_bits)
&& !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
kvm_read_cr3(vcpu)))
return 1;
if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
if (!guest_cpuid_has_pcid(vcpu))
return 1;
/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
return 1;
}
if (kvm_x86_ops->set_cr4(vcpu, cr4))
return 1;
if (((cr4 ^ old_cr4) & pdptr_bits) ||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);
if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
kvm_update_cpuid(vcpu);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_cr4);
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
kvm_mmu_sync_roots(vcpu);
kvm_mmu_flush_tlb(vcpu);
return 0;
}
if (is_long_mode(vcpu)) {
if (kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) {
if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS)
return 1;
} else
if (cr3 & CR3_L_MODE_RESERVED_BITS)
return 1;
} else {
if (is_pae(vcpu)) {
if (cr3 & CR3_PAE_RESERVED_BITS)
return 1;
if (is_paging(vcpu) &&
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
return 1;
}
/*
* We don't check reserved bits in nonpae mode, because
* this isn't enforced, and VMware depends on this.
*/
}
/*
* Does the new cr3 value map to physical memory? (Note, we
* catch an invalid cr3 even in real-mode, because it would
* cause trouble later on when we turn on paging anyway.)
*
* A real CPU would silently accept an invalid cr3 and would
* attempt to use it - with largely undefined (and often hard
* to debug) behavior on the guest side.
*/
if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
return 1;
vcpu->arch.cr3 = cr3;
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
vcpu->arch.mmu.new_cr3(vcpu);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_cr3);
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
if (cr8 & CR8_RESERVED_BITS)
return 1;
if (irqchip_in_kernel(vcpu->kvm))
kvm_lapic_set_tpr(vcpu, cr8);
else
vcpu->arch.cr8 = cr8;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_cr8);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
{
if (irqchip_in_kernel(vcpu->kvm))
return kvm_lapic_get_cr8(vcpu);
else
return vcpu->arch.cr8;
}
EXPORT_SYMBOL_GPL(kvm_get_cr8);
static void kvm_update_dr7(struct kvm_vcpu *vcpu)
{
unsigned long dr7;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
dr7 = vcpu->arch.guest_debug_dr7;
else
dr7 = vcpu->arch.dr7;
kvm_x86_ops->set_dr7(vcpu, dr7);
vcpu->arch.switch_db_regs = (dr7 & DR7_BP_EN_MASK);
}
static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{
switch (dr) {
case 0 ... 3:
vcpu->arch.db[dr] = val;
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
vcpu->arch.eff_db[dr] = val;
break;
case 4:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return 1; /* #UD */
/* fall through */
case 6:
if (val & 0xffffffff00000000ULL)
return -1; /* #GP */
vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
break;
case 5:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return 1; /* #UD */
/* fall through */
default: /* 7 */
if (val & 0xffffffff00000000ULL)
return -1; /* #GP */
vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
kvm_update_dr7(vcpu);
break;
}
return 0;
}
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{
int res;
res = __kvm_set_dr(vcpu, dr, val);
if (res > 0)
kvm_queue_exception(vcpu, UD_VECTOR);
else if (res < 0)
kvm_inject_gp(vcpu, 0);
return res;
}
EXPORT_SYMBOL_GPL(kvm_set_dr);
static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
{
switch (dr) {
case 0 ... 3:
*val = vcpu->arch.db[dr];
break;
case 4:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return 1;
/* fall through */
case 6:
*val = vcpu->arch.dr6;
break;
case 5:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return 1;
/* fall through */
default: /* 7 */
*val = vcpu->arch.dr7;
break;
}
return 0;
}
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
{
if (_kvm_get_dr(vcpu, dr, val)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_get_dr);
bool kvm_rdpmc(struct kvm_vcpu *vcpu)
{
u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
u64 data;
int err;
err = kvm_pmu_read_pmc(vcpu, ecx, &data);
if (err)
return err;
kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
return err;
}
EXPORT_SYMBOL_GPL(kvm_rdpmc);
/*
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
* and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
*
* This list is modified at module load time to reflect the
* capabilities of the host cpu. This capabilities test skips MSRs that are
* kvm-specific. Those are put in the beginning of the list.
*/
#define KVM_SAVE_MSRS_BEGIN 10
static u32 msrs_to_save[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
MSR_KVM_PV_EOI_EN,
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
MSR_STAR,
#ifdef CONFIG_X86_64
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
};
static unsigned num_msrs_to_save;
static const u32 emulated_msrs[] = {
MSR_IA32_TSC_ADJUST,
MSR_IA32_TSCDEADLINE,
MSR_IA32_MISC_ENABLE,
MSR_IA32_MCG_STATUS,
MSR_IA32_MCG_CTL,
};
static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
u64 old_efer = vcpu->arch.efer;
if (efer & efer_reserved_bits)
return 1;
if (is_paging(vcpu)
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
return 1;
if (efer & EFER_FFXSR) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
return 1;
}
if (efer & EFER_SVME) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
return 1;
}
efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA;
kvm_x86_ops->set_efer(vcpu, efer);
/* Update reserved bits */
if ((efer ^ old_efer) & EFER_NX)
kvm_mmu_reset_context(vcpu);
return 0;
}
void kvm_enable_efer_bits(u64 mask)
{
efer_reserved_bits &= ~mask;
}
EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
/*
* Writes msr value into into the appropriate "register".
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
return kvm_x86_ops->set_msr(vcpu, msr);
}
/*
* Adapt set_msr() to msr_io()'s calling convention
*/
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
struct msr_data msr;
msr.data = *data;
msr.index = index;
msr.host_initiated = true;
return kvm_set_msr(vcpu, &msr);
}
#ifdef CONFIG_X86_64
struct pvclock_gtod_data {
seqcount_t seq;
struct { /* extract of a clocksource struct */
int vclock_mode;
cycle_t cycle_last;
cycle_t mask;
u32 mult;
u32 shift;
} clock;
/* open coded 'struct timespec' */
u64 monotonic_time_snsec;
time_t monotonic_time_sec;
};
static struct pvclock_gtod_data pvclock_gtod_data;
static void update_pvclock_gtod(struct timekeeper *tk)
{
struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
write_seqcount_begin(&vdata->seq);
/* copy pvclock gtod data */
vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode;
vdata->clock.cycle_last = tk->clock->cycle_last;
vdata->clock.mask = tk->clock->mask;
vdata->clock.mult = tk->mult;
vdata->clock.shift = tk->shift;
vdata->monotonic_time_sec = tk->xtime_sec
+ tk->wall_to_monotonic.tv_sec;
vdata->monotonic_time_snsec = tk->xtime_nsec
+ (tk->wall_to_monotonic.tv_nsec
<< tk->shift);
while (vdata->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << tk->shift)) {
vdata->monotonic_time_snsec -=
((u64)NSEC_PER_SEC) << tk->shift;
vdata->monotonic_time_sec++;
}
write_seqcount_end(&vdata->seq);
}
#endif
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
{
int version;
int r;
struct pvclock_wall_clock wc;
struct timespec boot;
if (!wall_clock)
return;
r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
if (r)
return;
if (version & 1)
++version; /* first time write, random junk */
++version;
kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
/*
* The guest calculates current wall clock time by adding
* system time (updated by kvm_guest_time_update below) to the
* wall clock specified here. guest system time equals host
* system time for us, thus we must fill in host boot time here.
*/
getboottime(&boot);
if (kvm->arch.kvmclock_offset) {
struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
boot = timespec_sub(boot, ts);
}
wc.sec = boot.tv_sec;
wc.nsec = boot.tv_nsec;
wc.version = version;
kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
version++;
kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
}
static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
{
uint32_t quotient, remainder;
/* Don't try to replace with do_div(), this one calculates
* "(dividend << 32) / divisor" */
__asm__ ( "divl %4"
: "=a" (quotient), "=d" (remainder)
: "0" (0), "1" (dividend), "r" (divisor) );
return quotient;
}
static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
s8 *pshift, u32 *pmultiplier)
{
uint64_t scaled64;
int32_t shift = 0;
uint64_t tps64;
uint32_t tps32;
tps64 = base_khz * 1000LL;
scaled64 = scaled_khz * 1000LL;
while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
tps64 >>= 1;
shift--;
}
tps32 = (uint32_t)tps64;
while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
scaled64 >>= 1;
else
tps32 <<= 1;
shift++;
}
*pshift = shift;
*pmultiplier = div_frac(scaled64, tps32);
pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
__func__, base_khz, scaled_khz, shift, *pmultiplier);
}
static inline u64 get_kernel_ns(void)
{
struct timespec ts;
WARN_ON(preemptible());
ktime_get_ts(&ts);
monotonic_to_bootbased(&ts);
return timespec_to_ns(&ts);
}
#ifdef CONFIG_X86_64
static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
#endif
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
unsigned long max_tsc_khz;
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
{
return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
vcpu->arch.virtual_tsc_shift);
}
static u32 adjust_tsc_khz(u32 khz, s32 ppm)
{
u64 v = (u64)khz * (1000000 + ppm);
do_div(v, 1000000);
return v;
}
static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
{
u32 thresh_lo, thresh_hi;
int use_scaling = 0;
/* Compute a scale to convert nanoseconds in TSC cycles */
kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
&vcpu->arch.virtual_tsc_shift,
&vcpu->arch.virtual_tsc_mult);
vcpu->arch.virtual_tsc_khz = this_tsc_khz;
/*
* Compute the variation in TSC rate which is acceptable
* within the range of tolerance and decide if the
* rate being applied is within that bounds of the hardware
* rate. If so, no scaling or compensation need be done.
*/
thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) {
pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
use_scaling = 1;
}
kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
}
static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
{
u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
vcpu->arch.virtual_tsc_mult,
vcpu->arch.virtual_tsc_shift);
tsc += vcpu->arch.this_tsc_write;
return tsc;
}
void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
bool vcpus_matched;
bool do_request = false;
struct kvm_arch *ka = &vcpu->kvm->arch;
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
atomic_read(&vcpu->kvm->online_vcpus));
if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC)
if (!ka->use_master_clock)
do_request = 1;
if (!vcpus_matched && ka->use_master_clock)
do_request = 1;
if (do_request)
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
atomic_read(&vcpu->kvm->online_vcpus),
ka->use_master_clock, gtod->clock.vclock_mode);
#endif
}
static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
{
u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu);
vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
}
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
struct kvm *kvm = vcpu->kvm;
u64 offset, ns, elapsed;
unsigned long flags;
s64 usdiff;
bool matched;
u64 data = msr->data;
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
ns = get_kernel_ns();
elapsed = ns - kvm->arch.last_tsc_nsec;
/* n.b - signed multiplication and division required */
usdiff = data - kvm->arch.last_tsc_write;
#ifdef CONFIG_X86_64
usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
#else
/* do_div() only does unsigned */
asm("idivl %2; xor %%edx, %%edx"
: "=A"(usdiff)
: "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
#endif
do_div(elapsed, 1000);
usdiff -= elapsed;
if (usdiff < 0)
usdiff = -usdiff;
/*
* Special case: TSC write with a small delta (1 second) of virtual
* cycle time against real time is interpreted as an attempt to
* synchronize the CPU.
*
* For a reliable TSC, we can match TSC offsets, and for an unstable
* TSC, we add elapsed time in this computation. We could let the
* compensation code attempt to catch up if we fall behind, but
* it's better to try to match offsets from the beginning.
*/
if (usdiff < USEC_PER_SEC &&
vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
if (!check_tsc_unstable()) {
offset = kvm->arch.cur_tsc_offset;
pr_debug("kvm: matched tsc offset for %llu\n", data);
} else {
u64 delta = nsec_to_cycles(vcpu, elapsed);
data += delta;
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
}
matched = true;
} else {
/*
* We split periods of matched TSC writes into generations.
* For each generation, we track the original measured
* nanosecond time, offset, and write, so if TSCs are in
* sync, we can match exact offset, and if not, we can match
* exact software computation in compute_guest_tsc()
*
* These values are tracked in kvm->arch.cur_xxx variables.
*/
kvm->arch.cur_tsc_generation++;
kvm->arch.cur_tsc_nsec = ns;
kvm->arch.cur_tsc_write = data;
kvm->arch.cur_tsc_offset = offset;
matched = false;
pr_debug("kvm: new tsc generation %u, clock %llu\n",
kvm->arch.cur_tsc_generation, data);
}
/*
* We also track th most recent recorded KHZ, write and time to
* allow the matching interval to be extended at each write.
*/
kvm->arch.last_tsc_nsec = ns;
kvm->arch.last_tsc_write = data;
kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
/* Reset of TSC must disable overshoot protection below */
vcpu->arch.hv_clock.tsc_timestamp = 0;
vcpu->arch.last_guest_tsc = data;
/* Keep track of which generation this VCPU has synchronized to */
vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
update_ia32_tsc_adjust_msr(vcpu, offset);
kvm_x86_ops->write_tsc_offset(vcpu, offset);
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
if (matched)
kvm->arch.nr_vcpus_matched_tsc++;
else
kvm->arch.nr_vcpus_matched_tsc = 0;
kvm_track_tsc_matching(vcpu);
spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
}
EXPORT_SYMBOL_GPL(kvm_write_tsc);
#ifdef CONFIG_X86_64
static cycle_t read_tsc(void)
{
cycle_t ret;
u64 last;
/*
* Empirically, a fence (of type that depends on the CPU)
* before rdtsc is enough to ensure that rdtsc is ordered
* with respect to loads. The various CPU manuals are unclear
* as to whether rdtsc can be reordered with later loads,
* but no one has ever seen it happen.
*/
rdtsc_barrier();
ret = (cycle_t)vget_cycles();
last = pvclock_gtod_data.clock.cycle_last;
if (likely(ret >= last))
return ret;
/*
* GCC likes to generate cmov here, but this branch is extremely
* predictable (it's just a funciton of time and the likely is
* very likely) and there's a data dependence, so force GCC
* to generate a branch instead. I don't barrier() because
* we don't actually need a barrier, and if this function
* ever gets inlined it will generate worse code.
*/
asm volatile ("");
return last;
}
static inline u64 vgettsc(cycle_t *cycle_now)
{
long v;
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
*cycle_now = read_tsc();
v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask;
return v * gtod->clock.mult;
}
static int do_monotonic(struct timespec *ts, cycle_t *cycle_now)
{
unsigned long seq;
u64 ns;
int mode;
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
ts->tv_nsec = 0;
do {
seq = read_seqcount_begin(>od->seq);
mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->monotonic_time_sec;
ns = gtod->monotonic_time_snsec;
ns += vgettsc(cycle_now);
ns >>= gtod->clock.shift;
} while (unlikely(read_seqcount_retry(>od->seq, seq)));
timespec_add_ns(ts, ns);
return mode;
}
/* returns true if host is using tsc clocksource */
static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
{
struct timespec ts;
/* checked again under seqlock below */
if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
return false;
if (do_monotonic(&ts, cycle_now) != VCLOCK_TSC)
return false;
monotonic_to_bootbased(&ts);
*kernel_ns = timespec_to_ns(&ts);
return true;
}
#endif
/*
*
* Assuming a stable TSC across physical CPUS, and a stable TSC
* across virtual CPUs, the following condition is possible.
* Each numbered line represents an event visible to both
* CPUs at the next numbered event.
*
* "timespecX" represents host monotonic time. "tscX" represents
* RDTSC value.
*
* VCPU0 on CPU0 | VCPU1 on CPU1
*
* 1. read timespec0,tsc0
* 2. | timespec1 = timespec0 + N
* | tsc1 = tsc0 + M
* 3. transition to guest | transition to guest
* 4. ret0 = timespec0 + (rdtsc - tsc0) |
* 5. | ret1 = timespec1 + (rdtsc - tsc1)
* | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
*
* Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
*
* - ret0 < ret1
* - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
* ...
* - 0 < N - M => M < N
*
* That is, when timespec0 != timespec1, M < N. Unfortunately that is not
* always the case (the difference between two distinct xtime instances
* might be smaller then the difference between corresponding TSC reads,
* when updating guest vcpus pvclock areas).
*
* To avoid that problem, do not allow visibility of distinct
* system_timestamp/tsc_timestamp values simultaneously: use a master
* copy of host monotonic time values. Update that master copy
* in lockstep.
*
* Rely on synchronization of host TSCs and guest TSCs for monotonicity.
*
*/
static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
{
#ifdef CONFIG_X86_64
struct kvm_arch *ka = &kvm->arch;
int vclock_mode;
bool host_tsc_clocksource, vcpus_matched;
vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
atomic_read(&kvm->online_vcpus));
/*
* If the host uses TSC clock, then passthrough TSC as stable
* to the guest.
*/
host_tsc_clocksource = kvm_get_time_and_clockread(
&ka->master_kernel_ns,
&ka->master_cycle_now);
ka->use_master_clock = host_tsc_clocksource & vcpus_matched;
if (ka->use_master_clock)
atomic_set(&kvm_guest_has_master_clock, 1);
vclock_mode = pvclock_gtod_data.clock.vclock_mode;
trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
vcpus_matched);
#endif
}
static int kvm_guest_time_update(struct kvm_vcpu *v)
{
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
struct pvclock_vcpu_time_info *guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
kernel_ns = 0;
host_tsc = 0;
/*
* If the host uses TSC clock, then passthrough TSC as stable
* to the guest.
*/
spin_lock(&ka->pvclock_gtod_sync_lock);
use_master_clock = ka->use_master_clock;
if (use_master_clock) {
host_tsc = ka->master_cycle_now;
kernel_ns = ka->master_kernel_ns;
}
spin_unlock(&ka->pvclock_gtod_sync_lock);
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
if (unlikely(this_tsc_khz == 0)) {
local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
return 1;
}
if (!use_master_clock) {
host_tsc = native_read_tsc();
kernel_ns = get_kernel_ns();
}
tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);
/*
* We may have to catch up the TSC to match elapsed wall clock
* time for two reasons, even if kvmclock is used.
* 1) CPU could have been running below the maximum TSC rate
* 2) Broken TSC compensation resets the base at each VCPU
* entry to avoid unknown leaps of TSC even when running
* again on the same CPU. This may cause apparent elapsed
* time to disappear, and the guest to stand still or run
* very slowly.
*/
if (vcpu->tsc_catchup) {
u64 tsc = compute_guest_tsc(v, kernel_ns);
if (tsc > tsc_timestamp) {
adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
tsc_timestamp = tsc;
}
}
local_irq_restore(flags);
if (!vcpu->time_page)
return 0;
/*
* Time as measured by the TSC may go backwards when resetting the base
* tsc_timestamp. The reason for this is that the TSC resolution is
* higher than the resolution of the other clock scales. Thus, many
* possible measurments of the TSC correspond to one measurement of any
* other clock, and so a spread of values is possible. This is not a
* problem for the computation of the nanosecond clock; with TSC rates
* around 1GHZ, there can only be a few cycles which correspond to one
* nanosecond value, and any path through this code will inevitably
* take longer than that. However, with the kernel_ns value itself,
* the precision may be much lower, down to HZ granularity. If the
* first sampling of TSC against kernel_ns ends in the low part of the
* range, and the second in the high end of the range, we can get:
*
* (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
*
* As the sampling errors potentially range in the thousands of cycles,
* it is possible such a time value has already been observed by the
* guest. To protect against this, we must compute the system time as
* observed by the guest and ensure the new system time is greater.
*/
max_kernel_ns = 0;
if (vcpu->hv_clock.tsc_timestamp) {
max_kernel_ns = vcpu->last_guest_tsc -
vcpu->hv_clock.tsc_timestamp;
max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
vcpu->hv_clock.tsc_to_system_mul,
vcpu->hv_clock.tsc_shift);
max_kernel_ns += vcpu->last_kernel_ns;
}
if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
&vcpu->hv_clock.tsc_shift,
&vcpu->hv_clock.tsc_to_system_mul);
vcpu->hw_tsc_khz = this_tsc_khz;
}
/* with a master <monotonic time, tsc value> tuple,
* pvclock clock reads always increase at the (scaled) rate
* of guest TSC - no need to deal with sampling errors.
*/
if (!use_master_clock) {
if (max_kernel_ns > kernel_ns)
kernel_ns = max_kernel_ns;
}
/* With all the info we got, fill in the values */
vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
vcpu->last_kernel_ns = kernel_ns;
vcpu->last_guest_tsc = tsc_timestamp;
/*
* The interface expects us to write an even number signaling that the
* update is finished. Since the guest won't see the intermediate
* state, we just increase by 2 at the end.
*/
vcpu->hv_clock.version += 2;
shared_kaddr = kmap_atomic(vcpu->time_page);
guest_hv_clock = shared_kaddr + vcpu->time_offset;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
vcpu->pvclock_set_guest_stopped_request = false;
}
/* If the host uses TSC clocksource, then it is stable */
if (use_master_clock)
pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
vcpu->hv_clock.flags = pvclock_flags;
memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
sizeof(vcpu->hv_clock));
kunmap_atomic(shared_kaddr);
mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
return 0;
}
static bool msr_mtrr_valid(unsigned msr)
{
switch (msr) {
case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
case MSR_MTRRfix64K_00000:
case MSR_MTRRfix16K_80000:
case MSR_MTRRfix16K_A0000:
case MSR_MTRRfix4K_C0000:
case MSR_MTRRfix4K_C8000:
case MSR_MTRRfix4K_D0000:
case MSR_MTRRfix4K_D8000:
case MSR_MTRRfix4K_E0000:
case MSR_MTRRfix4K_E8000:
case MSR_MTRRfix4K_F0000:
case MSR_MTRRfix4K_F8000:
case MSR_MTRRdefType:
case MSR_IA32_CR_PAT:
return true;
case 0x2f8:
return true;
}
return false;
}
static bool valid_pat_type(unsigned t)
{
return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
}
static bool valid_mtrr_type(unsigned t)
{
return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
}
static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
int i;
if (!msr_mtrr_valid(msr))
return false;
if (msr == MSR_IA32_CR_PAT) {
for (i = 0; i < 8; i++)
if (!valid_pat_type((data >> (i * 8)) & 0xff))
return false;
return true;
} else if (msr == MSR_MTRRdefType) {
if (data & ~0xcff)
return false;
return valid_mtrr_type(data & 0xff);
} else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
for (i = 0; i < 8 ; i++)
if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
return false;
return true;
}
/* variable MTRRs */
return valid_mtrr_type(data & 0xff);
}
static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
if (!mtrr_valid(vcpu, msr, data))
return 1;
if (msr == MSR_MTRRdefType) {
vcpu->arch.mtrr_state.def_type = data;
vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
} else if (msr == MSR_MTRRfix64K_00000)
p[0] = data;
else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
p[1 + msr - MSR_MTRRfix16K_80000] = data;
else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
p[3 + msr - MSR_MTRRfix4K_C0000] = data;
else if (msr == MSR_IA32_CR_PAT)
vcpu->arch.pat = data;
else { /* Variable MTRRs */
int idx, is_mtrr_mask;
u64 *pt;
idx = (msr - 0x200) / 2;
is_mtrr_mask = msr - 0x200 - 2 * idx;
if (!is_mtrr_mask)
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
else
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
*pt = data;
}
kvm_mmu_reset_context(vcpu);
return 0;
}
static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
u64 mcg_cap = vcpu->arch.mcg_cap;
unsigned bank_num = mcg_cap & 0xff;
switch (msr) {
case MSR_IA32_MCG_STATUS:
vcpu->arch.mcg_status = data;
break;
case MSR_IA32_MCG_CTL:
if (!(mcg_cap & MCG_CTL_P))
return 1;
if (data != 0 && data != ~(u64)0)
return -1;
vcpu->arch.mcg_ctl = data;
break;
default:
if (msr >= MSR_IA32_MC0_CTL &&
msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
u32 offset = msr - MSR_IA32_MC0_CTL;
/* only 0 or all 1s can be written to IA32_MCi_CTL
* some Linux kernels though clear bit 10 in bank 4 to
* workaround a BIOS/GART TBL issue on AMD K8s, ignore
* this to avoid an uncatched #GP in the guest
*/
if ((offset & 0x3) == 0 &&
data != 0 && (data | (1 << 10)) != ~(u64)0)
return -1;
vcpu->arch.mce_banks[offset] = data;
break;
}
return 1;
}
return 0;
}
static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
int lm = is_long_mode(vcpu);
u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
: (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
: kvm->arch.xen_hvm_config.blob_size_32;
u32 page_num = data & ~PAGE_MASK;
u64 page_addr = data & PAGE_MASK;
u8 *page;
int r;
r = -E2BIG;
if (page_num >= blob_size)
goto out;
r = -ENOMEM;
page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
if (IS_ERR(page)) {
r = PTR_ERR(page);
goto out;
}
if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
goto out_free;
r = 0;
out_free:
kfree(page);
out:
return r;
}
static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
{
return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
}
static bool kvm_hv_msr_partition_wide(u32 msr)
{
bool r = false;
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
case HV_X64_MSR_HYPERCALL:
r = true;
break;
}
return r;
}
static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
struct kvm *kvm = vcpu->kvm;
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
kvm->arch.hv_guest_os_id = data;
/* setting guest os id to zero disables hypercall page */
if (!kvm->arch.hv_guest_os_id)
kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
break;
case HV_X64_MSR_HYPERCALL: {
u64 gfn;
unsigned long addr;
u8 instructions[4];
/* if guest os id is not set hypercall should remain disabled */
if (!kvm->arch.hv_guest_os_id)
break;
if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
kvm->arch.hv_hypercall = data;
break;
}
gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return 1;
kvm_x86_ops->patch_hypercall(vcpu, instructions);
((unsigned char *)instructions)[3] = 0xc3; /* ret */
if (__copy_to_user((void __user *)addr, instructions, 4))
return 1;
kvm->arch.hv_hypercall = data;
break;
}
default:
vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
"data 0x%llx\n", msr, data);
return 1;
}
return 0;
}
static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
switch (msr) {
case HV_X64_MSR_APIC_ASSIST_PAGE: {
unsigned long addr;
if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
vcpu->arch.hv_vapic = data;
break;
}
addr = gfn_to_hva(vcpu->kvm, data >>
HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
if (kvm_is_error_hva(addr))
return 1;
if (__clear_user((void __user *)addr, PAGE_SIZE))
return 1;
vcpu->arch.hv_vapic = data;
break;
}
case HV_X64_MSR_EOI:
return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
case HV_X64_MSR_ICR:
return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
case HV_X64_MSR_TPR:
return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
default:
vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
"data 0x%llx\n", msr, data);
return 1;
}
return 0;
}
static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{
gpa_t gpa = data & ~0x3f;
/* Bits 2:5 are reserved, Should be zero */
if (data & 0x3c)
return 1;
vcpu->arch.apf.msr_val = data;
if (!(data & KVM_ASYNC_PF_ENABLED)) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
return 0;
}
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
return 1;
vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
kvm_async_pf_wakeup_all(vcpu);
return 0;
}
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.time_page) {
kvm_release_page_dirty(vcpu->arch.time_page);
vcpu->arch.time_page = NULL;
}
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
{
u64 delta;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
vcpu->arch.st.accum_steal = delta;
}
static void record_steal_time(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
return;
vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
vcpu->arch.st.steal.version += 2;
vcpu->arch.st.accum_steal = 0;
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
}
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
bool pr = false;
u32 msr = msr_info->index;
u64 data = msr_info->data;
switch (msr) {
case MSR_AMD64_NB_CFG:
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
case MSR_VM_HSAVE_PA:
case MSR_AMD64_PATCH_LOADER:
case MSR_AMD64_BU_CFG2:
break;
case MSR_EFER:
return set_efer(vcpu, data);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
data &= ~(u64)0x100; /* ignore ignne emulation enable */
data &= ~(u64)0x8; /* ignore TLB cache disable */
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
data);
return 1;
}
break;
case MSR_FAM10H_MMIO_CONF_BASE:
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
"0x%llx\n", data);
return 1;
}
break;
case MSR_IA32_DEBUGCTLMSR:
if (!data) {
/* We support the non-activated case already */
break;
} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
/* Values other than LBR and BTF are vendor-specific,
thus reserved and should throw a #GP */
return 1;
}
vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
__func__, data);
break;
case 0x200 ... 0x2ff:
return set_msr_mtrr(vcpu, msr, data);
case MSR_IA32_APICBASE:
kvm_set_apic_base(vcpu, data);
break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data);
break;
case MSR_IA32_TSC_ADJUST:
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
break;
case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data;
break;
case MSR_KVM_WALL_CLOCK_NEW:
case MSR_KVM_WALL_CLOCK:
vcpu->kvm->arch.wall_clock = data;
kvm_write_wall_clock(vcpu->kvm, data);
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
kvmclock_reset(vcpu);
vcpu->arch.time = data;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
/* we verify if the enable bit is set... */
if (!(data & 1))
break;
/* ...but clean it before doing the actual write */
vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
if (vcpu->arch.time_offset &
(sizeof(struct pvclock_vcpu_time_info) - 1))
break;
vcpu->arch.time_page =
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
if (is_error_page(vcpu->arch.time_page))
vcpu->arch.time_page = NULL;
break;
}
case MSR_KVM_ASYNC_PF_EN:
if (kvm_pv_enable_async_pf(vcpu, data))
return 1;
break;
case MSR_KVM_STEAL_TIME:
if (unlikely(!sched_info_on()))
return 1;
if (data & KVM_STEAL_RESERVED_MASK)
return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
data & KVM_STEAL_VALID_BITS))
return 1;
vcpu->arch.st.msr_val = data;
if (!(data & KVM_MSR_ENABLED))
break;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
preempt_disable();
accumulate_steal_time(vcpu);
preempt_enable();
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
break;
case MSR_KVM_PV_EOI_EN:
if (kvm_lapic_enable_pv_eoi(vcpu, data))
return 1;
break;
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
return set_msr_mce(vcpu, msr, data);
/* Performance counters are not protected by a CPUID bit,
* so we should check all of them in the generic path for the sake of
* cross vendor migration.
* Writing a zero into the event select MSRs disables them,
* which we perfectly emulate ;-). Any other value should be at least
* reported, some guests depend on them.
*/
case MSR_K7_EVNTSEL0:
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
if (data != 0)
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
/* at least RHEL 4 unconditionally writes to the perfctr registers,
* so we ignore writes to make it happy.
*/
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
case MSR_K7_PERFCTR3:
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
pr = true;
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (pr || data != 0)
vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_K7_CLK_CTL:
/*
* Ignore all writes to this no longer documented MSR.
* Writes are only relevant for old K7 processors,
* all pre-dating SVM, but a recommended workaround from
* AMD for these chips. It is possible to specify the
* affected processor models on the command line, hence
* the need to ignore the workaround.
*/
break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
if (kvm_hv_msr_partition_wide(msr)) {
int r;
mutex_lock(&vcpu->kvm->lock);
r = set_msr_hyperv_pw(vcpu, msr, data);
mutex_unlock(&vcpu->kvm->lock);
return r;
} else
return set_msr_hyperv(vcpu, msr, data);
break;
case MSR_IA32_BBL_CR_CTL3:
/* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail.
*/
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.length = data;
break;
case MSR_AMD64_OSVW_STATUS:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.status = data;
break;
default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (!ignore_msrs) {
vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
msr, data);
return 1;
} else {
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
msr, data);
break;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_msr_common);
/*
* Reads an msr value (of 'msr_index') into 'pdata'.
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
{
return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
}
static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
if (!msr_mtrr_valid(msr))
return 1;
if (msr == MSR_MTRRdefType)
*pdata = vcpu->arch.mtrr_state.def_type +
(vcpu->arch.mtrr_state.enabled << 10);
else if (msr == MSR_MTRRfix64K_00000)
*pdata = p[0];
else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
*pdata = p[1 + msr - MSR_MTRRfix16K_80000];
else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
*pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
else if (msr == MSR_IA32_CR_PAT)
*pdata = vcpu->arch.pat;
else { /* Variable MTRRs */
int idx, is_mtrr_mask;
u64 *pt;
idx = (msr - 0x200) / 2;
is_mtrr_mask = msr - 0x200 - 2 * idx;
if (!is_mtrr_mask)
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
else
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
*pdata = *pt;
}
return 0;
}
static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
u64 data;
u64 mcg_cap = vcpu->arch.mcg_cap;
unsigned bank_num = mcg_cap & 0xff;
switch (msr) {
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
data = 0;
break;
case MSR_IA32_MCG_CAP:
data = vcpu->arch.mcg_cap;
break;
case MSR_IA32_MCG_CTL:
if (!(mcg_cap & MCG_CTL_P))
return 1;
data = vcpu->arch.mcg_ctl;
break;
case MSR_IA32_MCG_STATUS:
data = vcpu->arch.mcg_status;
break;
default:
if (msr >= MSR_IA32_MC0_CTL &&
msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
u32 offset = msr - MSR_IA32_MC0_CTL;
data = vcpu->arch.mce_banks[offset];
break;
}
return 1;
}
*pdata = data;
return 0;
}
static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
u64 data = 0;
struct kvm *kvm = vcpu->kvm;
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
data = kvm->arch.hv_guest_os_id;
break;
case HV_X64_MSR_HYPERCALL:
data = kvm->arch.hv_hypercall;
break;
default:
vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
return 1;
}
*pdata = data;
return 0;
}
static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
u64 data = 0;
switch (msr) {
case HV_X64_MSR_VP_INDEX: {
int r;
struct kvm_vcpu *v;
kvm_for_each_vcpu(r, v, vcpu->kvm)
if (v == vcpu)
data = r;
break;
}
case HV_X64_MSR_EOI:
return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
case HV_X64_MSR_ICR:
return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
case HV_X64_MSR_TPR:
return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
case HV_X64_MSR_APIC_ASSIST_PAGE:
data = vcpu->arch.hv_vapic;
break;
default:
vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
return 1;
}
*pdata = data;
return 0;
}
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
u64 data;
switch (msr) {
case MSR_IA32_PLATFORM_ID:
case MSR_IA32_EBL_CR_POWERON:
case MSR_IA32_DEBUGCTLMSR:
case MSR_IA32_LASTBRANCHFROMIP:
case MSR_IA32_LASTBRANCHTOIP:
case MSR_IA32_LASTINTFROMIP:
case MSR_IA32_LASTINTTOIP:
case MSR_K8_SYSCFG:
case MSR_K7_HWCR:
case MSR_VM_HSAVE_PA:
case MSR_K7_EVNTSEL0:
case MSR_K7_PERFCTR0:
case MSR_K8_INT_PENDING_MSG:
case MSR_AMD64_NB_CFG:
case MSR_FAM10H_MMIO_CONF_BASE:
case MSR_AMD64_BU_CFG2:
data = 0;
break;
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_get_msr(vcpu, msr, pdata);
data = 0;
break;
case MSR_IA32_UCODE_REV:
data = 0x100000000ULL;
break;
case MSR_MTRRcap:
data = 0x500 | KVM_NR_VAR_MTRR;
break;
case 0x200 ... 0x2ff:
return get_msr_mtrr(vcpu, msr, pdata);
case 0xcd: /* fsb frequency */
data = 3;
break;
/*
* MSR_EBC_FREQUENCY_ID
* Conservative value valid for even the basic CPU models.
* Models 0,1: 000 in bits 23:21 indicating a bus speed of
* 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
* and 266MHz for model 3, or 4. Set Core Clock
* Frequency to System Bus Frequency Ratio to 1 (bits
* 31:24) even though these are only valid for CPU
* models > 2, however guests may end up dividing or
* multiplying by zero otherwise.
*/
case MSR_EBC_FREQUENCY_ID:
data = 1 << 24;
break;
case MSR_IA32_APICBASE:
data = kvm_get_apic_base(vcpu);
break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
return kvm_x2apic_msr_read(vcpu, msr, pdata);
break;
case MSR_IA32_TSCDEADLINE:
data = kvm_get_lapic_tscdeadline_msr(vcpu);
break;
case MSR_IA32_TSC_ADJUST:
data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
break;
case MSR_IA32_MISC_ENABLE:
data = vcpu->arch.ia32_misc_enable_msr;
break;
case MSR_IA32_PERF_STATUS:
/* TSC increment by tick */
data = 1000ULL;
/* CPU multiplier */
data |= (((uint64_t)4ULL) << 40);
break;
case MSR_EFER:
data = vcpu->arch.efer;
break;
case MSR_KVM_WALL_CLOCK:
case MSR_KVM_WALL_CLOCK_NEW:
data = vcpu->kvm->arch.wall_clock;
break;
case MSR_KVM_SYSTEM_TIME:
case MSR_KVM_SYSTEM_TIME_NEW:
data = vcpu->arch.time;
break;
case MSR_KVM_ASYNC_PF_EN:
data = vcpu->arch.apf.msr_val;
break;
case MSR_KVM_STEAL_TIME:
data = vcpu->arch.st.msr_val;
break;
case MSR_KVM_PV_EOI_EN:
data = vcpu->arch.pv_eoi.msr_val;
break;
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
case MSR_IA32_MCG_CAP:
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
return get_msr_mce(vcpu, msr, pdata);
case MSR_K7_CLK_CTL:
/*
* Provide expected ramp-up count for K7. All other
* are set to zero, indicating minimum divisors for
* every field.
*
* This prevents guest kernels on AMD host with CPU
* type 6, model 8 and higher from exploding due to
* the rdmsr failing.
*/
data = 0x20000000;
break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
if (kvm_hv_msr_partition_wide(msr)) {
int r;
mutex_lock(&vcpu->kvm->lock);
r = get_msr_hyperv_pw(vcpu, msr, pdata);
mutex_unlock(&vcpu->kvm->lock);
return r;
} else
return get_msr_hyperv(vcpu, msr, pdata);
break;
case MSR_IA32_BBL_CR_CTL3:
/* This legacy MSR exists but isn't fully documented in current
* silicon. It is however accessed by winxp in very narrow
* scenarios where it sets bit #19, itself documented as
* a "reserved" bit. Best effort attempt to source coherent
* read data here should the balance of the register be
* interpreted by the guest:
*
* L2 cache control register 3: 64GB range, 256KB size,
* enabled, latency 0x1, configured
*/
data = 0xbe702111;
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
data = vcpu->arch.osvw.length;
break;
case MSR_AMD64_OSVW_STATUS:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
data = vcpu->arch.osvw.status;
break;
default:
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_get_msr(vcpu, msr, pdata);
if (!ignore_msrs) {
vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
return 1;
} else {
vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
data = 0;
}
break;
}
*pdata = data;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_get_msr_common);
/*
* Read or write a bunch of msrs. All parameters are kernel addresses.
*
* @return number of msrs set successfully.
*/
static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
struct kvm_msr_entry *entries,
int (*do_msr)(struct kvm_vcpu *vcpu,
unsigned index, u64 *data))
{
int i, idx;
idx = srcu_read_lock(&vcpu->kvm->srcu);
for (i = 0; i < msrs->nmsrs; ++i)
if (do_msr(vcpu, entries[i].index, &entries[i].data))
break;
srcu_read_unlock(&vcpu->kvm->srcu, idx);
return i;
}
/*
* Read or write a bunch of msrs. Parameters are user addresses.
*
* @return number of msrs set successfully.
*/
static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
int (*do_msr)(struct kvm_vcpu *vcpu,
unsigned index, u64 *data),
int writeback)
{
struct kvm_msrs msrs;
struct kvm_msr_entry *entries;
int r, n;
unsigned size;
r = -EFAULT;
if (copy_from_user(&msrs, user_msrs, sizeof msrs))
goto out;
r = -E2BIG;
if (msrs.nmsrs >= MAX_IO_MSRS)
goto out;
size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
entries = memdup_user(user_msrs->entries, size);
if (IS_ERR(entries)) {
r = PTR_ERR(entries);
goto out;
}
r = n = __msr_io(vcpu, &msrs, entries, do_msr);
if (r < 0)
goto out_free;
r = -EFAULT;
if (writeback && copy_to_user(user_msrs->entries, entries, size))
goto out_free;
r = n;
out_free:
kfree(entries);
out:
return r;
}
int kvm_dev_ioctl_check_extension(long ext)
{
int r;
switch (ext) {
case KVM_CAP_IRQCHIP:
case KVM_CAP_HLT:
case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
case KVM_CAP_SET_TSS_ADDR:
case KVM_CAP_EXT_CPUID:
case KVM_CAP_CLOCKSOURCE:
case KVM_CAP_PIT:
case KVM_CAP_NOP_IO_DELAY:
case KVM_CAP_MP_STATE:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_USER_NMI:
case KVM_CAP_REINJECT_CONTROL:
case KVM_CAP_IRQ_INJECT_STATUS:
case KVM_CAP_ASSIGN_DEV_IRQ:
case KVM_CAP_IRQFD:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_PIT2:
case KVM_CAP_PIT_STATE2:
case KVM_CAP_SET_IDENTITY_MAP_ADDR:
case KVM_CAP_XEN_HVM:
case KVM_CAP_ADJUST_CLOCK:
case KVM_CAP_VCPU_EVENTS:
case KVM_CAP_HYPERV:
case KVM_CAP_HYPERV_VAPIC:
case KVM_CAP_HYPERV_SPIN:
case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP:
case KVM_CAP_XSAVE:
case KVM_CAP_ASYNC_PF:
case KVM_CAP_GET_TSC_KHZ:
case KVM_CAP_PCI_2_3:
case KVM_CAP_KVMCLOCK_CTRL:
case KVM_CAP_READONLY_MEM:
case KVM_CAP_IRQFD_RESAMPLE:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
case KVM_CAP_VAPIC:
r = !kvm_x86_ops->cpu_has_accelerated_tpr();
break;
case KVM_CAP_NR_VCPUS:
r = KVM_SOFT_MAX_VCPUS;
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_NR_MEMSLOTS:
r = KVM_USER_MEM_SLOTS;
break;
case KVM_CAP_PV_MMU: /* obsolete */
r = 0;
break;
case KVM_CAP_IOMMU:
r = iommu_present(&pci_bus_type);
break;
case KVM_CAP_MCE:
r = KVM_MAX_MCE_BANKS;
break;
case KVM_CAP_XCRS:
r = cpu_has_xsave;
break;
case KVM_CAP_TSC_CONTROL:
r = kvm_has_tsc_control;
break;
case KVM_CAP_TSC_DEADLINE_TIMER:
r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
break;
default:
r = 0;
break;
}
return r;
}
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
void __user *argp = (void __user *)arg;
long r;
switch (ioctl) {
case KVM_GET_MSR_INDEX_LIST: {
struct kvm_msr_list __user *user_msr_list = argp;
struct kvm_msr_list msr_list;
unsigned n;
r = -EFAULT;
if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
goto out;
n = msr_list.nmsrs;
msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
goto out;
r = -E2BIG;
if (n < msr_list.nmsrs)
goto out;
r = -EFAULT;
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
num_msrs_to_save * sizeof(u32)))
goto out;
if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
&emulated_msrs,
ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
goto out;
r = 0;
break;
}
case KVM_GET_SUPPORTED_CPUID: {
struct kvm_cpuid2 __user *cpuid_arg = argp;
struct kvm_cpuid2 cpuid;
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
goto out;
r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
cpuid_arg->entries);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
goto out;
r = 0;
break;
}
case KVM_X86_GET_MCE_CAP_SUPPORTED: {
u64 mce_cap;
mce_cap = KVM_MCE_CAP_SUPPORTED;
r = -EFAULT;
if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
goto out;
r = 0;
break;
}
default:
r = -EINVAL;
}
out:
return r;
}
static void wbinvd_ipi(void *garbage)
{
wbinvd();
}
static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
return vcpu->kvm->arch.iommu_domain &&
!(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
/* Address WBINVD may be executed by guest */
if (need_emulate_wbinvd(vcpu)) {
if (kvm_x86_ops->has_wbinvd_exit())
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
smp_call_function_single(vcpu->cpu,
wbinvd_ipi, NULL, 1);
}
kvm_x86_ops->vcpu_load(vcpu, cpu);
/* Apply any externally detected TSC adjustments (due to suspend) */
if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
vcpu->arch.tsc_offset_adjustment = 0;
set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
}
if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
native_read_tsc() - vcpu->arch.last_host_tsc;
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
if (check_tsc_unstable()) {
u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);
kvm_x86_ops->write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1;
}
/*
* On a host with synchronized TSC, there is no need to update
* kvmclock on vcpu->cpu migration
*/
if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
if (vcpu->cpu != cpu)
kvm_migrate_timers(vcpu);
vcpu->cpu = cpu;
}
accumulate_steal_time(vcpu);
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu);
vcpu->arch.last_host_tsc = native_read_tsc();
}
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
return 0;
}
static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
kvm_apic_post_state_restore(vcpu, s);
update_cr8_intercept(vcpu);
return 0;
}
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
return -EINVAL;
if (irqchip_in_kernel(vcpu->kvm))
return -ENXIO;
kvm_queue_interrupt(vcpu, irq->irq, false);
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0;
}
static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
{
kvm_inject_nmi(vcpu);
return 0;
}
static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
struct kvm_tpr_access_ctl *tac)
{
if (tac->flags)
return -EINVAL;
vcpu->arch.tpr_access_reporting = !!tac->enabled;
return 0;
}
static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
u64 mcg_cap)
{
int r;
unsigned bank_num = mcg_cap & 0xff, bank;
r = -EINVAL;
if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
goto out;
if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
goto out;
r = 0;
vcpu->arch.mcg_cap = mcg_cap;
/* Init IA32_MCG_CTL to all 1s */
if (mcg_cap & MCG_CTL_P)
vcpu->arch.mcg_ctl = ~(u64)0;
/* Init IA32_MCi_CTL to all 1s */
for (bank = 0; bank < bank_num; bank++)
vcpu->arch.mce_banks[bank*4] = ~(u64)0;
out:
return r;
}
static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
struct kvm_x86_mce *mce)
{
u64 mcg_cap = vcpu->arch.mcg_cap;
unsigned bank_num = mcg_cap & 0xff;
u64 *banks = vcpu->arch.mce_banks;
if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
return -EINVAL;
/*
* if IA32_MCG_CTL is not all 1s, the uncorrected error
* reporting is disabled
*/
if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
vcpu->arch.mcg_ctl != ~(u64)0)
return 0;
banks += 4 * mce->bank;
/*
* if IA32_MCi_CTL is not all 1s, the uncorrected error
* reporting is disabled for the bank
*/
if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
return 0;
if (mce->status & MCI_STATUS_UC) {
if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
!kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return 0;
}
if (banks[1] & MCI_STATUS_VAL)
mce->status |= MCI_STATUS_OVER;
banks[2] = mce->addr;
banks[3] = mce->misc;
vcpu->arch.mcg_status = mce->mcg_status;
banks[1] = mce->status;
kvm_queue_exception(vcpu, MC_VECTOR);
} else if (!(banks[1] & MCI_STATUS_VAL)
|| !(banks[1] & MCI_STATUS_UC)) {
if (banks[1] & MCI_STATUS_VAL)
mce->status |= MCI_STATUS_OVER;
banks[2] = mce->addr;
banks[3] = mce->misc;
banks[1] = mce->status;
} else
banks[1] |= MCI_STATUS_OVER;
return 0;
}
static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
process_nmi(vcpu);
events->exception.injected =
vcpu->arch.exception.pending &&
!kvm_exception_is_soft(vcpu->arch.exception.nr);
events->exception.nr = vcpu->arch.exception.nr;
events->exception.has_error_code = vcpu->arch.exception.has_error_code;
events->exception.pad = 0;
events->exception.error_code = vcpu->arch.exception.error_code;
events->interrupt.injected =
vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
events->interrupt.nr = vcpu->arch.interrupt.nr;
events->interrupt.soft = 0;
events->interrupt.shadow =
kvm_x86_ops->get_interrupt_shadow(vcpu,
KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
events->nmi.injected = vcpu->arch.nmi_injected;
events->nmi.pending = vcpu->arch.nmi_pending != 0;
events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
events->nmi.pad = 0;
events->sipi_vector = vcpu->arch.sipi_vector;
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW);
memset(&events->reserved, 0, sizeof(events->reserved));
}
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW))
return -EINVAL;
process_nmi(vcpu);
vcpu->arch.exception.pending = events->exception.injected;
vcpu->arch.exception.nr = events->exception.nr;
vcpu->arch.exception.has_error_code = events->exception.has_error_code;
vcpu->arch.exception.error_code = events->exception.error_code;
vcpu->arch.interrupt.pending = events->interrupt.injected;
vcpu->arch.interrupt.nr = events->interrupt.nr;
vcpu->arch.interrupt.soft = events->interrupt.soft;
if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
kvm_x86_ops->set_interrupt_shadow(vcpu,
events->interrupt.shadow);
vcpu->arch.nmi_injected = events->nmi.injected;
if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
vcpu->arch.nmi_pending = events->nmi.pending;
kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
vcpu->arch.sipi_vector = events->sipi_vector;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0;
}
static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
struct kvm_debugregs *dbgregs)
{
memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
dbgregs->dr6 = vcpu->arch.dr6;
dbgregs->dr7 = vcpu->arch.dr7;
dbgregs->flags = 0;
memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
}
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
struct kvm_debugregs *dbgregs)
{
if (dbgregs->flags)
return -EINVAL;
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
vcpu->arch.dr6 = dbgregs->dr6;
vcpu->arch.dr7 = dbgregs->dr7;
return 0;
}
static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
if (cpu_has_xsave)
memcpy(guest_xsave->region,
&vcpu->arch.guest_fpu.state->xsave,
xstate_size);
else {
memcpy(guest_xsave->region,
&vcpu->arch.guest_fpu.state->fxsave,
sizeof(struct i387_fxsave_struct));
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
XSTATE_FPSSE;
}
}
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
u64 xstate_bv =
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
if (cpu_has_xsave)
memcpy(&vcpu->arch.guest_fpu.state->xsave,
guest_xsave->region, xstate_size);
else {
if (xstate_bv & ~XSTATE_FPSSE)
return -EINVAL;
memcpy(&vcpu->arch.guest_fpu.state->fxsave,
guest_xsave->region, sizeof(struct i387_fxsave_struct));
}
return 0;
}
static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
struct kvm_xcrs *guest_xcrs)
{
if (!cpu_has_xsave) {
guest_xcrs->nr_xcrs = 0;
return;
}
guest_xcrs->nr_xcrs = 1;
guest_xcrs->flags = 0;
guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
}
static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
struct kvm_xcrs *guest_xcrs)
{
int i, r = 0;
if (!cpu_has_xsave)
return -EINVAL;
if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
return -EINVAL;
for (i = 0; i < guest_xcrs->nr_xcrs; i++)
/* Only support XCR0 currently */
if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
guest_xcrs->xcrs[0].value);
break;
}
if (r)
r = -EINVAL;
return r;
}
/*
* kvm_set_guest_paused() indicates to the guest kernel that it has been
* stopped by the hypervisor. This function will be called from the host only.
* EINVAL is returned when the host attempts to set the flag for a guest that
* does not support pv clocks.
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
if (!vcpu->arch.time_page)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
return 0;
}
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
int r;
union {
struct kvm_lapic_state *lapic;
struct kvm_xsave *xsave;
struct kvm_xcrs *xcrs;
void *buffer;
} u;
u.buffer = NULL;
switch (ioctl) {
case KVM_GET_LAPIC: {
r = -EINVAL;
if (!vcpu->arch.apic)
goto out;
u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
r = -ENOMEM;
if (!u.lapic)
goto out;
r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
goto out;
r = 0;
break;
}
case KVM_SET_LAPIC: {
r = -EINVAL;
if (!vcpu->arch.apic)
goto out;
u.lapic = memdup_user(argp, sizeof(*u.lapic));
if (IS_ERR(u.lapic))
return PTR_ERR(u.lapic);
r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
break;
}
case KVM_INTERRUPT: {
struct kvm_interrupt irq;
r = -EFAULT;
if (copy_from_user(&irq, argp, sizeof irq))
goto out;
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
break;
}
case KVM_NMI: {
r = kvm_vcpu_ioctl_nmi(vcpu);
break;
}
case KVM_SET_CPUID: {
struct kvm_cpuid __user *cpuid_arg = argp;
struct kvm_cpuid cpuid;
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
goto out;
r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
break;
}
case KVM_SET_CPUID2: {
struct kvm_cpuid2 __user *cpuid_arg = argp;
struct kvm_cpuid2 cpuid;
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
goto out;
r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
cpuid_arg->entries);
break;
}
case KVM_GET_CPUID2: {
struct kvm_cpuid2 __user *cpuid_arg = argp;
struct kvm_cpuid2 cpuid;
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
goto out;
r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
cpuid_arg->entries);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
goto out;
r = 0;
break;
}
case KVM_GET_MSRS:
r = msr_io(vcpu, argp, kvm_get_msr, 1);
break;
case KVM_SET_MSRS:
r = msr_io(vcpu, argp, do_set_msr, 0);
break;
case KVM_TPR_ACCESS_REPORTING: {
struct kvm_tpr_access_ctl tac;
r = -EFAULT;
if (copy_from_user(&tac, argp, sizeof tac))
goto out;
r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &tac, sizeof tac))
goto out;
r = 0;
break;
};
case KVM_SET_VAPIC_ADDR: {
struct kvm_vapic_addr va;
r = -EINVAL;
if (!irqchip_in_kernel(vcpu->kvm))
goto out;
r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va))
goto out;
r = 0;
kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
break;
}
case KVM_X86_SETUP_MCE: {
u64 mcg_cap;
r = -EFAULT;
if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
goto out;
r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
break;
}
case KVM_X86_SET_MCE: {
struct kvm_x86_mce mce;
r = -EFAULT;
if (copy_from_user(&mce, argp, sizeof mce))
goto out;
r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
break;
}
case KVM_GET_VCPU_EVENTS: {
struct kvm_vcpu_events events;
kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
r = -EFAULT;
if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
break;
r = 0;
break;
}
case KVM_SET_VCPU_EVENTS: {
struct kvm_vcpu_events events;
r = -EFAULT;
if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
break;
r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
break;
}
case KVM_GET_DEBUGREGS: {
struct kvm_debugregs dbgregs;
kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
r = -EFAULT;
if (copy_to_user(argp, &dbgregs,
sizeof(struct kvm_debugregs)))
break;
r = 0;
break;
}
case KVM_SET_DEBUGREGS: {
struct kvm_debugregs dbgregs;
r = -EFAULT;
if (copy_from_user(&dbgregs, argp,
sizeof(struct kvm_debugregs)))
break;
r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
break;
}
case KVM_GET_XSAVE: {
u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
r = -ENOMEM;
if (!u.xsave)
break;
kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
r = -EFAULT;
if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
break;
r = 0;
break;
}
case KVM_SET_XSAVE: {
u.xsave = memdup_user(argp, sizeof(*u.xsave));
if (IS_ERR(u.xsave))
return PTR_ERR(u.xsave);
r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
break;
}
case KVM_GET_XCRS: {
u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
r = -ENOMEM;
if (!u.xcrs)
break;
kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
r = -EFAULT;
if (copy_to_user(argp, u.xcrs,
sizeof(struct kvm_xcrs)))
break;
r = 0;
break;
}
case KVM_SET_XCRS: {
u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
if (IS_ERR(u.xcrs))
return PTR_ERR(u.xcrs);
r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
break;
}
case KVM_SET_TSC_KHZ: {
u32 user_tsc_khz;
r = -EINVAL;
user_tsc_khz = (u32)arg;
if (user_tsc_khz >= kvm_max_guest_tsc_khz)
goto out;
if (user_tsc_khz == 0)
user_tsc_khz = tsc_khz;
kvm_set_tsc_khz(vcpu, user_tsc_khz);
r = 0;
goto out;
}
case KVM_GET_TSC_KHZ: {
r = vcpu->arch.virtual_tsc_khz;
goto out;
}
case KVM_KVMCLOCK_CTRL: {
r = kvm_set_guest_paused(vcpu);
goto out;
}
default:
r = -EINVAL;
}
out:
kfree(u.buffer);
return r;
}
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
{
int ret;
if (addr > (unsigned int)(-3 * PAGE_SIZE))
return -EINVAL;
ret = kvm_x86_ops->set_tss_addr(kvm, addr);
return ret;
}
static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
u64 ident_addr)
{
kvm->arch.ept_identity_map_addr = ident_addr;
return 0;
}
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
u32 kvm_nr_mmu_pages)
{
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
return -EINVAL;
mutex_lock(&kvm->slots_lock);
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
mutex_unlock(&kvm->slots_lock);
return 0;
}
static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
{
return kvm->arch.n_max_mmu_pages;
}
static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{
int r;
r = 0;
switch (chip->chip_id) {
case KVM_IRQCHIP_PIC_MASTER:
memcpy(&chip->chip.pic,
&pic_irqchip(kvm)->pics[0],
sizeof(struct kvm_pic_state));
break;
case KVM_IRQCHIP_PIC_SLAVE:
memcpy(&chip->chip.pic,
&pic_irqchip(kvm)->pics[1],
sizeof(struct kvm_pic_state));
break;
case KVM_IRQCHIP_IOAPIC:
r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
break;
default:
r = -EINVAL;
break;
}
return r;
}
static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{
int r;
r = 0;
switch (chip->chip_id) {
case KVM_IRQCHIP_PIC_MASTER:
spin_lock(&pic_irqchip(kvm)->lock);
memcpy(&pic_irqchip(kvm)->pics[0],
&chip->chip.pic,
sizeof(struct kvm_pic_state));
spin_unlock(&pic_irqchip(kvm)->lock);
break;
case KVM_IRQCHIP_PIC_SLAVE:
spin_lock(&pic_irqchip(kvm)->lock);
memcpy(&pic_irqchip(kvm)->pics[1],
&chip->chip.pic,
sizeof(struct kvm_pic_state));
spin_unlock(&pic_irqchip(kvm)->lock);
break;
case KVM_IRQCHIP_IOAPIC:
r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
break;
default:
r = -EINVAL;
break;
}
kvm_pic_update_irq(pic_irqchip(kvm));
return r;
}
static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
int r = 0;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return r;
}
static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
int r = 0;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return r;
}
static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
int r = 0;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
sizeof(ps->channels));
ps->flags = kvm->arch.vpit->pit_state.flags;
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
memset(&ps->reserved, 0, sizeof(ps->reserved));
return r;
}
static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
int r = 0, start = 0;
u32 prev_legacy, cur_legacy;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
if (!prev_legacy && cur_legacy)
start = 1;
memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
sizeof(kvm->arch.vpit->pit_state.channels));
kvm->arch.vpit->pit_state.flags = ps->flags;
kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return r;
}
static int kvm_vm_ioctl_reinject(struct kvm *kvm,
struct kvm_reinject_control *control)
{
if (!kvm->arch.vpit)
return -ENXIO;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
kvm->arch.vpit->pit_state.reinject = control->pit_reinject;
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return 0;
}
/**
* kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
* @kvm: kvm instance
* @log: slot id and address to which we copy the log
*
* We need to keep it in mind that VCPU threads can write to the bitmap
* concurrently. So, to avoid losing data, we keep the following order for
* each bit:
*
* 1. Take a snapshot of the bit and clear it if needed.
* 2. Write protect the corresponding page.
* 3. Flush TLB's if needed.
* 4. Copy the snapshot to the userspace.
*
* Between 2 and 3, the guest may write to the page using the remaining TLB
* entry. This is not a problem because the page will be reported dirty at
* step 4 using the snapshot taken before and step 3 ensures that successive
* writes will be logged for the next call.
*/
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
int r;
struct kvm_memory_slot *memslot;
unsigned long n, i;
unsigned long *dirty_bitmap;
unsigned long *dirty_bitmap_buffer;
bool is_dirty = false;
mutex_lock(&kvm->slots_lock);
r = -EINVAL;
if (log->slot >= KVM_USER_MEM_SLOTS)
goto out;
memslot = id_to_memslot(kvm->memslots, log->slot);
dirty_bitmap = memslot->dirty_bitmap;
r = -ENOENT;
if (!dirty_bitmap)
goto out;
n = kvm_dirty_bitmap_bytes(memslot);
dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
memset(dirty_bitmap_buffer, 0, n);
spin_lock(&kvm->mmu_lock);
for (i = 0; i < n / sizeof(long); i++) {
unsigned long mask;
gfn_t offset;
if (!dirty_bitmap[i])
continue;
is_dirty = true;
mask = xchg(&dirty_bitmap[i], 0);
dirty_bitmap_buffer[i] = mask;
offset = i * BITS_PER_LONG;
kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask);
}
if (is_dirty)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
r = -EFAULT;
if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
goto out;
r = 0;
out:
mutex_unlock(&kvm->slots_lock);
return r;
}
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event)
{
if (!irqchip_in_kernel(kvm))
return -ENXIO;
irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event->irq, irq_event->level);
return 0;
}
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg;
int r = -ENOTTY;
/*
* This union makes it completely explicit to gcc-3.x
* that these two variables' stack usage should be
* combined, not added together.
*/
union {
struct kvm_pit_state ps;
struct kvm_pit_state2 ps2;
struct kvm_pit_config pit_config;
} u;
switch (ioctl) {
case KVM_SET_TSS_ADDR:
r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
break;
case KVM_SET_IDENTITY_MAP_ADDR: {
u64 ident_addr;
r = -EFAULT;
if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
goto out;
r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
break;
}
case KVM_SET_NR_MMU_PAGES:
r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
break;
case KVM_GET_NR_MMU_PAGES:
r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
break;
case KVM_CREATE_IRQCHIP: {
struct kvm_pic *vpic;
mutex_lock(&kvm->lock);
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
r = -EINVAL;
if (atomic_read(&kvm->online_vcpus))
goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
r = kvm_ioapic_init(kvm);
if (r) {
mutex_lock(&kvm->slots_lock);
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
&vpic->dev_master);
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
&vpic->dev_slave);
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
&vpic->dev_eclr);
mutex_unlock(&kvm->slots_lock);
kfree(vpic);
goto create_irqchip_unlock;
}
} else
goto create_irqchip_unlock;
smp_wmb();
kvm->arch.vpic = vpic;
smp_wmb();
r = kvm_setup_default_irq_routing(kvm);
if (r) {
mutex_lock(&kvm->slots_lock);
mutex_lock(&kvm->irq_lock);
kvm_ioapic_destroy(kvm);
kvm_destroy_pic(kvm);
mutex_unlock(&kvm->irq_lock);
mutex_unlock(&kvm->slots_lock);
}
create_irqchip_unlock:
mutex_unlock(&kvm->lock);
break;
}
case KVM_CREATE_PIT:
u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
goto create_pit;
case KVM_CREATE_PIT2:
r = -EFAULT;
if (copy_from_user(&u.pit_config, argp,
sizeof(struct kvm_pit_config)))
goto out;
create_pit:
mutex_lock(&kvm->slots_lock);
r = -EEXIST;
if (kvm->arch.vpit)
goto create_pit_unlock;
r = -ENOMEM;
kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
if (kvm->arch.vpit)
r = 0;
create_pit_unlock:
mutex_unlock(&kvm->slots_lock);
break;
case KVM_GET_IRQCHIP: {
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
struct kvm_irqchip *chip;
chip = memdup_user(argp, sizeof(*chip));
if (IS_ERR(chip)) {
r = PTR_ERR(chip);
goto out;
}
r = -ENXIO;
if (!irqchip_in_kernel(kvm))
goto get_irqchip_out;
r = kvm_vm_ioctl_get_irqchip(kvm, chip);
if (r)
goto get_irqchip_out;
r = -EFAULT;
if (copy_to_user(argp, chip, sizeof *chip))
goto get_irqchip_out;
r = 0;
get_irqchip_out:
kfree(chip);
break;
}
case KVM_SET_IRQCHIP: {
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
struct kvm_irqchip *chip;
chip = memdup_user(argp, sizeof(*chip));
if (IS_ERR(chip)) {
r = PTR_ERR(chip);
goto out;
}
r = -ENXIO;
if (!irqchip_in_kernel(kvm))
goto set_irqchip_out;
r = kvm_vm_ioctl_set_irqchip(kvm, chip);
if (r)
goto set_irqchip_out;
r = 0;
set_irqchip_out:
kfree(chip);
break;
}
case KVM_GET_PIT: {
r = -EFAULT;
if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
goto out;
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
goto out;
r = 0;
break;
}
case KVM_SET_PIT: {
r = -EFAULT;
if (copy_from_user(&u.ps, argp, sizeof u.ps))
goto out;
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
break;
}
case KVM_GET_PIT2: {
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
goto out;
r = 0;
break;
}
case KVM_SET_PIT2: {
r = -EFAULT;
if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
goto out;
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
break;
}
case KVM_REINJECT_CONTROL: {
struct kvm_reinject_control control;
r = -EFAULT;
if (copy_from_user(&control, argp, sizeof(control)))
goto out;
r = kvm_vm_ioctl_reinject(kvm, &control);
break;
}
case KVM_XEN_HVM_CONFIG: {
r = -EFAULT;
if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
sizeof(struct kvm_xen_hvm_config)))
goto out;
r = -EINVAL;
if (kvm->arch.xen_hvm_config.flags)
goto out;
r = 0;
break;
}
case KVM_SET_CLOCK: {
struct kvm_clock_data user_ns;
u64 now_ns;
s64 delta;
r = -EFAULT;
if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
goto out;
r = -EINVAL;
if (user_ns.flags)
goto out;
r = 0;
local_irq_disable();
now_ns = get_kernel_ns();
delta = user_ns.clock - now_ns;
local_irq_enable();
kvm->arch.kvmclock_offset = delta;
break;
}
case KVM_GET_CLOCK: {
struct kvm_clock_data user_ns;
u64 now_ns;
local_irq_disable();
now_ns = get_kernel_ns();
user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
local_irq_enable();
user_ns.flags = 0;
memset(&user_ns.pad, 0, sizeof(user_ns.pad));
r = -EFAULT;
if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
goto out;
r = 0;
break;
}
default:
;
}
out:
return r;
}
static void kvm_init_msr_list(void)
{
u32 dummy[2];
unsigned i, j;
/* skip the first msrs in the list. KVM-specific */
for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
continue;
if (j < i)
msrs_to_save[j] = msrs_to_save[i];
j++;
}
num_msrs_to_save = j;
}
static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
const void *v)
{
int handled = 0;
int n;
do {
n = min(len, 8);
if (!(vcpu->arch.apic &&
!kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
&& kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
break;
handled += n;
addr += n;
len -= n;
v += n;
} while (len);
return handled;
}
static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
{
int handled = 0;
int n;
do {
n = min(len, 8);
if (!(vcpu->arch.apic &&
!kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
&& kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
break;
trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
handled += n;
addr += n;
len -= n;
v += n;
} while (len);
return handled;
}
static void kvm_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
kvm_x86_ops->set_segment(vcpu, var, seg);
}
void kvm_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
kvm_x86_ops->get_segment(vcpu, var, seg);
}
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
{
gpa_t t_gpa;
struct x86_exception exception;
BUG_ON(!mmu_is_nested(vcpu));
/* NPT walks are always user-walks */
access |= PFERR_USER_MASK;
t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
return t_gpa;
}
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
}
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_FETCH_MASK;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
}
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_WRITE_MASK;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
}
/* uses this to access any guest's mapped memory without checking CPL */
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
}
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
struct kvm_vcpu *vcpu, u32 access,
struct x86_exception *exception)
{
void *data = val;
int r = X86EMUL_CONTINUE;
while (bytes) {
gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
exception);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
if (gpa == UNMAPPED_GVA)
return X86EMUL_PROPAGATE_FAULT;
ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
if (ret < 0) {
r = X86EMUL_IO_NEEDED;
goto out;
}
bytes -= toread;
data += toread;
addr += toread;
}
out:
return r;
}
/* used for instruction fetching */
static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
access | PFERR_FETCH_MASK,
exception);
}
int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
exception);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
}
int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val,
unsigned int bytes,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
void *data = val;
int r = X86EMUL_CONTINUE;
while (bytes) {
gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
PFERR_WRITE_MASK,
exception);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
if (gpa == UNMAPPED_GVA)
return X86EMUL_PROPAGATE_FAULT;
ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
if (ret < 0) {
r = X86EMUL_IO_NEEDED;
goto out;
}
bytes -= towrite;
data += towrite;
addr += towrite;
}
out:
return r;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
gpa_t *gpa, struct x86_exception *exception,
bool write)
{
u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
| (write ? PFERR_WRITE_MASK : 0);
if (vcpu_match_mmio_gva(vcpu, gva)
&& !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) {
*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
(gva & (PAGE_SIZE - 1));
trace_vcpu_match_mmio(gva, *gpa, write, false);
return 1;
}
*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
if (*gpa == UNMAPPED_GVA)
return -1;
/* For APIC access vmexit */
if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
return 1;
if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
trace_vcpu_match_mmio(gva, *gpa, write, true);
return 1;
}
return 0;
}
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes)
{
int ret;
ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
if (ret < 0)
return 0;
kvm_mmu_pte_write(vcpu, gpa, val, bytes);
return 1;
}
struct read_write_emulator_ops {
int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
int bytes);
int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes);
int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
int bytes, void *val);
int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes);
bool write;
};
static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
{
if (vcpu->mmio_read_completed) {
trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
vcpu->mmio_fragments[0].gpa, *(u64 *)val);
vcpu->mmio_read_completed = 0;
return 1;
}
return 0;
}
static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{
return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
}
static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{
return emulator_write_phys(vcpu, gpa, val, bytes);
}
static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
{
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
return vcpu_mmio_write(vcpu, gpa, bytes, val);
}
static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{
trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
return X86EMUL_IO_NEEDED;
}
static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{
struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
return X86EMUL_CONTINUE;
}
static const struct read_write_emulator_ops read_emultor = {
.read_write_prepare = read_prepare,
.read_write_emulate = read_emulate,
.read_write_mmio = vcpu_mmio_read,
.read_write_exit_mmio = read_exit_mmio,
};
static const struct read_write_emulator_ops write_emultor = {
.read_write_emulate = write_emulate,
.read_write_mmio = write_mmio,
.read_write_exit_mmio = write_exit_mmio,
.write = true,
};
static int emulator_read_write_onepage(unsigned long addr, void *val,
unsigned int bytes,
struct x86_exception *exception,
struct kvm_vcpu *vcpu,
const struct read_write_emulator_ops *ops)
{
gpa_t gpa;
int handled, ret;
bool write = ops->write;
struct kvm_mmio_fragment *frag;
ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
if (ret < 0)
return X86EMUL_PROPAGATE_FAULT;
/* For APIC access vmexit */
if (ret)
goto mmio;
if (ops->read_write_emulate(vcpu, gpa, val, bytes))
return X86EMUL_CONTINUE;
mmio:
/*
* Is this MMIO handled locally?
*/
handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
if (handled == bytes)
return X86EMUL_CONTINUE;
gpa += handled;
bytes -= handled;
val += handled;
WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
frag->gpa = gpa;
frag->data = val;
frag->len = bytes;
return X86EMUL_CONTINUE;
}
int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
void *val, unsigned int bytes,
struct x86_exception *exception,
const struct read_write_emulator_ops *ops)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
gpa_t gpa;
int rc;
if (ops->read_write_prepare &&
ops->read_write_prepare(vcpu, val, bytes))
return X86EMUL_CONTINUE;
vcpu->mmio_nr_fragments = 0;
/* Crossing a page boundary? */
if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
int now;
now = -addr & ~PAGE_MASK;
rc = emulator_read_write_onepage(addr, val, now, exception,
vcpu, ops);
if (rc != X86EMUL_CONTINUE)
return rc;
addr += now;
val += now;
bytes -= now;
}
rc = emulator_read_write_onepage(addr, val, bytes, exception,
vcpu, ops);
if (rc != X86EMUL_CONTINUE)
return rc;
if (!vcpu->mmio_nr_fragments)
return rc;
gpa = vcpu->mmio_fragments[0].gpa;
vcpu->mmio_needed = 1;
vcpu->mmio_cur_fragment = 0;
vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
vcpu->run->exit_reason = KVM_EXIT_MMIO;
vcpu->run->mmio.phys_addr = gpa;
return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
}
static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
void *val,
unsigned int bytes,
struct x86_exception *exception)
{
return emulator_read_write(ctxt, addr, val, bytes,
exception, &read_emultor);
}
int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
const void *val,
unsigned int bytes,
struct x86_exception *exception)
{
return emulator_read_write(ctxt, addr, (void *)val, bytes,
exception, &write_emultor);
}
#define CMPXCHG_TYPE(t, ptr, old, new) \
(cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
#ifdef CONFIG_X86_64
# define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
#else
# define CMPXCHG64(ptr, old, new) \
(cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
#endif
static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
const void *old,
const void *new,
unsigned int bytes,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
gpa_t gpa;
struct page *page;
char *kaddr;
bool exchanged;
/* guests cmpxchg8b have to be emulated atomically */
if (bytes > 8 || (bytes & (bytes - 1)))
goto emul_write;
gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
if (gpa == UNMAPPED_GVA ||
(gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
goto emul_write;
if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
goto emul_write;
page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
if (is_error_page(page))
goto emul_write;
kaddr = kmap_atomic(page);
kaddr += offset_in_page(gpa);
switch (bytes) {
case 1:
exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
break;
case 2:
exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
break;
case 4:
exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
break;
case 8:
exchanged = CMPXCHG64(kaddr, old, new);
break;
default:
BUG();
}
kunmap_atomic(kaddr);
kvm_release_page_dirty(page);
if (!exchanged)
return X86EMUL_CMPXCHG_FAILED;
kvm_mmu_pte_write(vcpu, gpa, new, bytes);
return X86EMUL_CONTINUE;
emul_write:
printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
return emulator_write_emulated(ctxt, addr, new, bytes, exception);
}
static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
{
/* TODO: String I/O for in kernel device */
int r;
if (vcpu->arch.pio.in)
r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
vcpu->arch.pio.size, pd);
else
r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
vcpu->arch.pio.port, vcpu->arch.pio.size,
pd);
return r;
}
static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
unsigned short port, void *val,
unsigned int count, bool in)
{
trace_kvm_pio(!in, port, size, count);
vcpu->arch.pio.port = port;
vcpu->arch.pio.in = in;
vcpu->arch.pio.count = count;
vcpu->arch.pio.size = size;
if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
vcpu->arch.pio.count = 0;
return 1;
}
vcpu->run->exit_reason = KVM_EXIT_IO;
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
vcpu->run->io.size = size;
vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
vcpu->run->io.count = count;
vcpu->run->io.port = port;
return 0;
}
static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
int size, unsigned short port, void *val,
unsigned int count)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
int ret;
if (vcpu->arch.pio.count)
goto data_avail;
ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
if (ret) {
data_avail:
memcpy(val, vcpu->arch.pio_data, size * count);
vcpu->arch.pio.count = 0;
return 1;
}
return 0;
}
static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
int size, unsigned short port,
const void *val, unsigned int count)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
memcpy(vcpu->arch.pio_data, val, size * count);
return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
}
static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
return kvm_x86_ops->get_segment_base(vcpu, seg);
}
static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
{
kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
}
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
if (!need_emulate_wbinvd(vcpu))
return X86EMUL_CONTINUE;
if (kvm_x86_ops->has_wbinvd_exit()) {
int cpu = get_cpu();
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
wbinvd_ipi, NULL, 1);
put_cpu();
cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
} else
wbinvd();
return X86EMUL_CONTINUE;
}
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
{
kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
}
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
{
return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
}
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
{
return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
}
static u64 mk_cr_64(u64 curr_cr, u32 new_val)
{
return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
}
static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
unsigned long value;
switch (cr) {
case 0:
value = kvm_read_cr0(vcpu);
break;
case 2:
value = vcpu->arch.cr2;
break;
case 3:
value = kvm_read_cr3(vcpu);
break;
case 4:
value = kvm_read_cr4(vcpu);
break;
case 8:
value = kvm_get_cr8(vcpu);
break;
default:
kvm_err("%s: unexpected cr %u\n", __func__, cr);
return 0;
}
return value;
}
static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
int res = 0;
switch (cr) {
case 0:
res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
break;
case 2:
vcpu->arch.cr2 = val;
break;
case 3:
res = kvm_set_cr3(vcpu, val);
break;
case 4:
res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
break;
case 8:
res = kvm_set_cr8(vcpu, val);
break;
default:
kvm_err("%s: unexpected cr %u\n", __func__, cr);
res = -1;
}
return res;
}
static void emulator_set_rflags(struct x86_emulate_ctxt *ctxt, ulong val)
{
kvm_set_rflags(emul_to_vcpu(ctxt), val);
}
static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
{
return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
}
static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
}
static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
}
static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
}
static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
}
static unsigned long emulator_get_cached_segment_base(
struct x86_emulate_ctxt *ctxt, int seg)
{
return get_segment_base(emul_to_vcpu(ctxt), seg);
}
static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
struct desc_struct *desc, u32 *base3,
int seg)
{
struct kvm_segment var;
kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
*selector = var.selector;
if (var.unusable) {
memset(desc, 0, sizeof(*desc));
return false;
}
if (var.g)
var.limit >>= 12;
set_desc_limit(desc, var.limit);
set_desc_base(desc, (unsigned long)var.base);
#ifdef CONFIG_X86_64
if (base3)
*base3 = var.base >> 32;
#endif
desc->type = var.type;
desc->s = var.s;
desc->dpl = var.dpl;
desc->p = var.present;
desc->avl = var.avl;
desc->l = var.l;
desc->d = var.db;
desc->g = var.g;
return true;
}
static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
struct desc_struct *desc, u32 base3,
int seg)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
struct kvm_segment var;
var.selector = selector;
var.base = get_desc_base(desc);
#ifdef CONFIG_X86_64
var.base |= ((u64)base3) << 32;
#endif
var.limit = get_desc_limit(desc);
if (desc->g)
var.limit = (var.limit << 12) | 0xfff;
var.type = desc->type;
var.present = desc->p;
var.dpl = desc->dpl;
var.db = desc->d;
var.s = desc->s;
var.l = desc->l;
var.g = desc->g;
var.avl = desc->avl;
var.present = desc->p;
var.unusable = !var.present;
var.padding = 0;
kvm_set_segment(vcpu, &var, seg);
return;
}
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 *pdata)
{
return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
}
static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 data)
{
struct msr_data msr;
msr.data = data;
msr.index = msr_index;
msr.host_initiated = false;
return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
}
static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
u32 pmc, u64 *pdata)
{
return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata);
}
static void emulator_halt(struct x86_emulate_ctxt *ctxt)
{
emul_to_vcpu(ctxt)->arch.halt_request = 1;
}
static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
{
preempt_disable();
kvm_load_guest_fpu(emul_to_vcpu(ctxt));
/*
* CR0.TS may reference the host fpu state, not the guest fpu state,
* so it may be clear at this point.
*/
clts();
}
static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
{
preempt_enable();
}
static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
}
static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
{
kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx);
}
static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
{
return kvm_register_read(emul_to_vcpu(ctxt), reg);
}
static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
{
kvm_register_write(emul_to_vcpu(ctxt), reg, val);
}
static const struct x86_emulate_ops emulate_ops = {
.read_gpr = emulator_read_gpr,
.write_gpr = emulator_write_gpr,
.read_std = kvm_read_guest_virt_system,
.write_std = kvm_write_guest_virt_system,
.fetch = kvm_fetch_guest_virt,
.read_emulated = emulator_read_emulated,
.write_emulated = emulator_write_emulated,
.cmpxchg_emulated = emulator_cmpxchg_emulated,
.invlpg = emulator_invlpg,
.pio_in_emulated = emulator_pio_in_emulated,
.pio_out_emulated = emulator_pio_out_emulated,
.get_segment = emulator_get_segment,
.set_segment = emulator_set_segment,
.get_cached_segment_base = emulator_get_cached_segment_base,
.get_gdt = emulator_get_gdt,
.get_idt = emulator_get_idt,
.set_gdt = emulator_set_gdt,
.set_idt = emulator_set_idt,
.get_cr = emulator_get_cr,
.set_cr = emulator_set_cr,
.set_rflags = emulator_set_rflags,
.cpl = emulator_get_cpl,
.get_dr = emulator_get_dr,
.set_dr = emulator_set_dr,
.set_msr = emulator_set_msr,
.get_msr = emulator_get_msr,
.read_pmc = emulator_read_pmc,
.halt = emulator_halt,
.wbinvd = emulator_wbinvd,
.fix_hypercall = emulator_fix_hypercall,
.get_fpu = emulator_get_fpu,
.put_fpu = emulator_put_fpu,
.intercept = emulator_intercept,
.get_cpuid = emulator_get_cpuid,
};
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
{
u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
/*
* an sti; sti; sequence only disable interrupts for the first
* instruction. So, if the last instruction, be it emulated or
* not, left the system with the INT_STI flag enabled, it
* means that the last instruction is an sti. We should not
* leave the flag on in this case. The same goes for mov ss
*/
if (!(int_shadow & mask))
kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
}
static void inject_emulated_exception(struct kvm_vcpu *vcpu)
{
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
if (ctxt->exception.vector == PF_VECTOR)
kvm_propagate_fault(vcpu, &ctxt->exception);
else if (ctxt->exception.error_code_valid)
kvm_queue_exception_e(vcpu, ctxt->exception.vector,
ctxt->exception.error_code);
else
kvm_queue_exception(vcpu, ctxt->exception.vector);
}
static void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
memset(&ctxt->twobyte, 0,
(void *)&ctxt->_regs - (void *)&ctxt->twobyte);
ctxt->fetch.start = 0;
ctxt->fetch.end = 0;
ctxt->io_read.pos = 0;
ctxt->io_read.end = 0;
ctxt->mem_read.pos = 0;
ctxt->mem_read.end = 0;
}
static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
{
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
int cs_db, cs_l;
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
ctxt->eflags = kvm_get_rflags(vcpu);
ctxt->eip = kvm_rip_read(vcpu);
ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
(ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
cs_l ? X86EMUL_MODE_PROT64 :
cs_db ? X86EMUL_MODE_PROT32 :
X86EMUL_MODE_PROT16;
ctxt->guest_mode = is_guest_mode(vcpu);
init_decode_cache(ctxt);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
}
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
{
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
int ret;
init_emulate_ctxt(vcpu);
ctxt->op_bytes = 2;
ctxt->ad_bytes = 2;
ctxt->_eip = ctxt->eip + inc_eip;
ret = emulate_int_real(ctxt, irq);
if (ret != X86EMUL_CONTINUE)
return EMULATE_FAIL;
ctxt->eip = ctxt->_eip;
kvm_rip_write(vcpu, ctxt->eip);
kvm_set_rflags(vcpu, ctxt->eflags);
if (irq == NMI_VECTOR)
vcpu->arch.nmi_pending = 0;
else
vcpu->arch.interrupt.pending = false;
return EMULATE_DONE;
}
EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
static int handle_emulation_failure(struct kvm_vcpu *vcpu)
{
int r = EMULATE_DONE;
++vcpu->stat.insn_emulation_fail;
trace_kvm_emulate_insn_failed(vcpu);
if (!is_guest_mode(vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
r = EMULATE_FAIL;
}
kvm_queue_exception(vcpu, UD_VECTOR);
return r;
}
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
bool write_fault_to_shadow_pgtable)
{
gpa_t gpa = cr2;
pfn_t pfn;
if (!vcpu->arch.mmu.direct_map) {
/*
* Write permission should be allowed since only
* write access need to be emulated.
*/
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
/*
* If the mapping is invalid in guest, let cpu retry
* it to generate fault.
*/
if (gpa == UNMAPPED_GVA)
return true;
}
/*
* Do not retry the unhandleable instruction if it faults on the
* readonly host memory, otherwise it will goto a infinite loop:
* retry instruction -> write #PF -> emulation fail -> retry
* instruction -> ...
*/
pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
/*
* If the instruction failed on the error pfn, it can not be fixed,
* report the error to userspace.
*/
if (is_error_noslot_pfn(pfn))
return false;
kvm_release_pfn_clean(pfn);
/* The instructions are well-emulated on direct mmu. */
if (vcpu->arch.mmu.direct_map) {
unsigned int indirect_shadow_pages;
spin_lock(&vcpu->kvm->mmu_lock);
indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
spin_unlock(&vcpu->kvm->mmu_lock);
if (indirect_shadow_pages)
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
return true;
}
/*
* if emulation was due to access to shadowed page table
* and it failed try to unshadow page and re-enter the
* guest to let CPU execute the instruction.
*/
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
/*
* If the access faults on its page table, it can not
* be fixed by unprotecting shadow page and it should
* be reported to userspace.
*/
return !write_fault_to_shadow_pgtable;
}
static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
unsigned long cr2, int emulation_type)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
last_retry_eip = vcpu->arch.last_retry_eip;
last_retry_addr = vcpu->arch.last_retry_addr;
/*
* If the emulation is caused by #PF and it is non-page_table
* writing instruction, it means the VM-EXIT is caused by shadow
* page protected, we can zap the shadow page and retry this
* instruction directly.
*
* Note: if the guest uses a non-page-table modifying instruction
* on the PDE that points to the instruction, then we will unmap
* the instruction and go to an infinite loop. So, we cache the
* last retried eip and the last fault address, if we meet the eip
* and the address again, we can break out of the potential infinite
* loop.
*/
vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
if (!(emulation_type & EMULTYPE_RETRY))
return false;
if (x86_page_table_writing_insn(ctxt))
return false;
if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
return false;
vcpu->arch.last_retry_eip = ctxt->eip;
vcpu->arch.last_retry_addr = cr2;
if (!vcpu->arch.mmu.direct_map)
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
return true;
}
static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
static int complete_emulated_pio(struct kvm_vcpu *vcpu);
int x86_emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2,
int emulation_type,
void *insn,
int insn_len)
{
int r;
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
bool writeback = true;
bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
/*
* Clear write_fault_to_shadow_pgtable here to ensure it is
* never reused.
*/
vcpu->arch.write_fault_to_shadow_pgtable = false;
kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
init_emulate_ctxt(vcpu);
ctxt->interruptibility = 0;
ctxt->have_exception = false;
ctxt->perm_ok = false;
ctxt->only_vendor_specific_insn
= emulation_type & EMULTYPE_TRAP_UD;
r = x86_decode_insn(ctxt, insn, insn_len);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
if (r != EMULATION_OK) {
if (emulation_type & EMULTYPE_TRAP_UD)
return EMULATE_FAIL;
if (reexecute_instruction(vcpu, cr2,
write_fault_to_spt))
return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL;
return handle_emulation_failure(vcpu);
}
}
if (emulation_type & EMULTYPE_SKIP) {
kvm_rip_write(vcpu, ctxt->_eip);
return EMULATE_DONE;
}
if (retry_instruction(ctxt, cr2, emulation_type))
return EMULATE_DONE;
/* this is needed for vmware backdoor interface to work since it
changes registers values during IO operation */
if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
emulator_invalidate_register_cache(ctxt);
}
restart:
r = x86_emulate_insn(ctxt);
if (r == EMULATION_INTERCEPTED)
return EMULATE_DONE;
if (r == EMULATION_FAILED) {
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt))
return EMULATE_DONE;
return handle_emulation_failure(vcpu);
}
if (ctxt->have_exception) {
inject_emulated_exception(vcpu);
r = EMULATE_DONE;
} else if (vcpu->arch.pio.count) {
if (!vcpu->arch.pio.in)
vcpu->arch.pio.count = 0;
else {
writeback = false;
vcpu->arch.complete_userspace_io = complete_emulated_pio;
}
r = EMULATE_DO_MMIO;
} else if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
writeback = false;
r = EMULATE_DO_MMIO;
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
} else if (r == EMULATION_RESTART)
goto restart;
else
r = EMULATE_DONE;
if (writeback) {
toggle_interruptibility(vcpu, ctxt->interruptibility);
kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
} else
vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
return r;
}
EXPORT_SYMBOL_GPL(x86_emulate_instruction);
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
{
unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
size, port, &val, 1);
/* do not return to emulator after return from userspace */
vcpu->arch.pio.count = 0;
return ret;
}
EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
static void tsc_bad(void *info)
{
__this_cpu_write(cpu_tsc_khz, 0);
}
static void tsc_khz_changed(void *data)
{
struct cpufreq_freqs *freq = data;
unsigned long khz = 0;
if (data)
khz = freq->new;
else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
khz = cpufreq_quick_get(raw_smp_processor_id());
if (!khz)
khz = tsc_khz;
__this_cpu_write(cpu_tsc_khz, khz);
}
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_freqs *freq = data;
struct kvm *kvm;
struct kvm_vcpu *vcpu;
int i, send_ipi = 0;
/*
* We allow guests to temporarily run on slowing clocks,
* provided we notify them after, or to run on accelerating
* clocks, provided we notify them before. Thus time never
* goes backwards.
*
* However, we have a problem. We can't atomically update
* the frequency of a given CPU from this function; it is
* merely a notifier, which can be called from any CPU.
* Changing the TSC frequency at arbitrary points in time
* requires a recomputation of local variables related to
* the TSC for each VCPU. We must flag these local variables
* to be updated and be sure the update takes place with the
* new frequency before any guests proceed.
*
* Unfortunately, the combination of hotplug CPU and frequency
* change creates an intractable locking scenario; the order
* of when these callouts happen is undefined with respect to
* CPU hotplug, and they can race with each other. As such,
* merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
* undefined; you can actually have a CPU frequency change take
* place in between the computation of X and the setting of the
* variable. To protect against this problem, all updates of
* the per_cpu tsc_khz variable are done in an interrupt
* protected IPI, and all callers wishing to update the value
* must wait for a synchronous IPI to complete (which is trivial
* if the caller is on the CPU already). This establishes the
* necessary total order on variable updates.
*
* Note that because a guest time update may take place
* anytime after the setting of the VCPU's request bit, the
* correct TSC value must be set before the request. However,
* to ensure the update actually makes it to any guest which
* starts running in hardware virtualization between the set
* and the acquisition of the spinlock, we must also ping the
* CPU after setting the request bit.
*
*/
if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
return 0;
if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
return 0;
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->cpu != freq->cpu)
continue;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
if (vcpu->cpu != smp_processor_id())
send_ipi = 1;
}
}
raw_spin_unlock(&kvm_lock);
if (freq->old < freq->new && send_ipi) {
/*
* We upscale the frequency. Must make the guest
* doesn't see old kvmclock values while running with
* the new frequency, otherwise we risk the guest sees
* time go backwards.
*
* In case we update the frequency for another cpu
* (which might be in guest context) send an interrupt
* to kick the cpu out of guest context. Next time
* guest context is entered kvmclock will be updated,
* so the guest will not see stale values.
*/
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
}
return 0;
}
static struct notifier_block kvmclock_cpufreq_notifier_block = {
.notifier_call = kvmclock_cpufreq_notifier
};
static int kvmclock_cpu_notifier(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
switch (action) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, tsc_bad, NULL, 1);
break;
}
return NOTIFY_OK;
}
static struct notifier_block kvmclock_cpu_notifier_block = {
.notifier_call = kvmclock_cpu_notifier,
.priority = -INT_MAX
};
static void kvm_timer_init(void)
{
int cpu;
max_tsc_khz = tsc_khz;
register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy policy;
memset(&policy, 0, sizeof(policy));
cpu = get_cpu();
cpufreq_get_policy(&policy, cpu);
if (policy.cpuinfo.max_freq)
max_tsc_khz = policy.cpuinfo.max_freq;
put_cpu();
#endif
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
for_each_online_cpu(cpu)
smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
}
static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
int kvm_is_in_guest(void)
{
return __this_cpu_read(current_vcpu) != NULL;
}
static int kvm_is_user_mode(void)
{
int user_mode = 3;
if (__this_cpu_read(current_vcpu))
user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
return user_mode != 0;
}
static unsigned long kvm_get_guest_ip(void)
{
unsigned long ip = 0;
if (__this_cpu_read(current_vcpu))
ip = kvm_rip_read(__this_cpu_read(current_vcpu));
return ip;
}
static struct perf_guest_info_callbacks kvm_guest_cbs = {
.is_in_guest = kvm_is_in_guest,
.is_user_mode = kvm_is_user_mode,
.get_guest_ip = kvm_get_guest_ip,
};
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
{
__this_cpu_write(current_vcpu, vcpu);
}
EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
{
__this_cpu_write(current_vcpu, NULL);
}
EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
static void kvm_set_mmio_spte_mask(void)
{
u64 mask;
int maxphyaddr = boot_cpu_data.x86_phys_bits;
/*
* Set the reserved bits and the present bit of an paging-structure
* entry to generate page fault with PFER.RSV = 1.
*/
mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr;
mask |= 1ull;
#ifdef CONFIG_X86_64
/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if (maxphyaddr == 52)
mask &= ~1ull;
#endif
kvm_mmu_set_mmio_spte_mask(mask);
}
#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{
struct kvm *kvm;
struct kvm_vcpu *vcpu;
int i;
raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm)
set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests);
atomic_set(&kvm_guest_has_master_clock, 0);
raw_spin_unlock(&kvm_lock);
}
static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
/*
* Notification about pvclock gtod data update.
*/
static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
void *priv)
{
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
struct timekeeper *tk = priv;
update_pvclock_gtod(tk);
/* disable master clock if host does not trust, or does not
* use, TSC clocksource
*/
if (gtod->clock.vclock_mode != VCLOCK_TSC &&
atomic_read(&kvm_guest_has_master_clock) != 0)
queue_work(system_long_wq, &pvclock_gtod_work);
return 0;
}
static struct notifier_block pvclock_gtod_notifier = {
.notifier_call = pvclock_gtod_notify,
};
#endif
int kvm_arch_init(void *opaque)
{
int r;
struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
if (kvm_x86_ops) {
printk(KERN_ERR "kvm: already loaded the other module\n");
r = -EEXIST;
goto out;
}
if (!ops->cpu_has_kvm_support()) {
printk(KERN_ERR "kvm: no hardware support\n");
r = -EOPNOTSUPP;
goto out;
}
if (ops->disabled_by_bios()) {
printk(KERN_ERR "kvm: disabled by bios\n");
r = -EOPNOTSUPP;
goto out;
}
r = -ENOMEM;
shared_msrs = alloc_percpu(struct kvm_shared_msrs);
if (!shared_msrs) {
printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
goto out;
}
r = kvm_mmu_module_init();
if (r)
goto out_free_percpu;
kvm_set_mmio_spte_mask();
kvm_init_msr_list();
kvm_x86_ops = ops;
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
PT_DIRTY_MASK, PT64_NX_MASK, 0);
kvm_timer_init();
perf_register_guest_info_callbacks(&kvm_guest_cbs);
if (cpu_has_xsave)
host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
kvm_lapic_init();
#ifdef CONFIG_X86_64
pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
#endif
return 0;
out_free_percpu:
free_percpu(shared_msrs);
out:
return r;
}
void kvm_arch_exit(void)
{
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
#ifdef CONFIG_X86_64
pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
#endif
kvm_x86_ops = NULL;
kvm_mmu_module_exit();
free_percpu(shared_msrs);
}
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
++vcpu->stat.halt_exits;
if (irqchip_in_kernel(vcpu->kvm)) {
vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
return 1;
} else {
vcpu->run->exit_reason = KVM_EXIT_HLT;
return 0;
}
}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{
u64 param, ingpa, outgpa, ret;
uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
bool fast, longmode;
int cs_db, cs_l;
/*
* hypercall generates UD from non zero cpl and real mode
* per HYPER-V spec
*/
if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 0;
}
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
longmode = is_long_mode(vcpu) && cs_l == 1;
if (!longmode) {
param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
(kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
(kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
(kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
}
#ifdef CONFIG_X86_64
else {
param = kvm_register_read(vcpu, VCPU_REGS_RCX);
ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
}
#endif
code = param & 0xffff;
fast = (param >> 16) & 0x1;
rep_cnt = (param >> 32) & 0xfff;
rep_idx = (param >> 48) & 0xfff;
trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
switch (code) {
case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
kvm_vcpu_on_spin(vcpu);
break;
default:
res = HV_STATUS_INVALID_HYPERCALL_CODE;
break;
}
ret = res | (((u64)rep_done & 0xfff) << 32);
if (longmode) {
kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
} else {
kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
}
return 1;
}
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
unsigned long nr, a0, a1, a2, a3, ret;
int r = 1;
if (kvm_hv_hypercall_enabled(vcpu->kvm))
return kvm_hv_hypercall(vcpu);
nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
trace_kvm_hypercall(nr, a0, a1, a2, a3);
if (!is_long_mode(vcpu)) {
nr &= 0xFFFFFFFF;
a0 &= 0xFFFFFFFF;
a1 &= 0xFFFFFFFF;
a2 &= 0xFFFFFFFF;
a3 &= 0xFFFFFFFF;
}
if (kvm_x86_ops->get_cpl(vcpu) != 0) {
ret = -KVM_EPERM;
goto out;
}
switch (nr) {
case KVM_HC_VAPIC_POLL_IRQ:
ret = 0;
break;
default:
ret = -KVM_ENOSYS;
break;
}
out:
kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
++vcpu->stat.hypercalls;
return r;
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
char instruction[3];
unsigned long rip = kvm_rip_read(vcpu);
/*
* Blow out the MMU to ensure that no other VCPU has an active mapping
* to ensure that the updated hypercall appears atomically across all
* VCPUs.
*/
kvm_mmu_zap_all(vcpu->kvm);
kvm_x86_ops->patch_hypercall(vcpu, instruction);
return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
}
/*
* Check if userspace requested an interrupt window, and that the
* interrupt window is open.
*
* No need to exit to userspace if we already have an interrupt queued.
*/
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
{
return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
vcpu->run->request_interrupt_window &&
kvm_arch_interrupt_allowed(vcpu));
}
static void post_kvm_run_save(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);
if (irqchip_in_kernel(vcpu->kvm))
kvm_run->ready_for_interrupt_injection = 1;
else
kvm_run->ready_for_interrupt_injection =
kvm_arch_interrupt_allowed(vcpu) &&
!kvm_cpu_has_interrupt(vcpu) &&
!kvm_event_needs_reinjection(vcpu);
}
static int vapic_enter(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct page *page;
if (!apic || !apic->vapic_addr)
return 0;
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
if (is_error_page(page))
return -EFAULT;
vcpu->arch.apic->vapic_page = page;
return 0;
}
static void vapic_exit(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int idx;
if (!apic || !apic->vapic_addr)
return;
idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_release_page_dirty(apic->vapic_page);
mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
{
int max_irr, tpr;
if (!kvm_x86_ops->update_cr8_intercept)
return;
if (!vcpu->arch.apic)
return;
if (!vcpu->arch.apic->vapic_addr)
max_irr = kvm_lapic_find_highest_irr(vcpu);
else
max_irr = -1;
if (max_irr != -1)
max_irr >>= 4;
tpr = kvm_lapic_get_cr8(vcpu);
kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
}
static void inject_pending_event(struct kvm_vcpu *vcpu)
{
/* try to reinject previous events if any */
if (vcpu->arch.exception.pending) {
trace_kvm_inj_exception(vcpu->arch.exception.nr,
vcpu->arch.exception.has_error_code,
vcpu->arch.exception.error_code);
kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
vcpu->arch.exception.has_error_code,
vcpu->arch.exception.error_code,
vcpu->arch.exception.reinject);
return;
}
if (vcpu->arch.nmi_injected) {
kvm_x86_ops->set_nmi(vcpu);
return;
}
if (vcpu->arch.interrupt.pending) {
kvm_x86_ops->set_irq(vcpu);
return;
}
/* try to inject new event if pending */
if (vcpu->arch.nmi_pending) {
if (kvm_x86_ops->nmi_allowed(vcpu)) {
--vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true;
kvm_x86_ops->set_nmi(vcpu);
}
} else if (kvm_cpu_has_injectable_intr(vcpu)) {
if (kvm_x86_ops->interrupt_allowed(vcpu)) {
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
false);
kvm_x86_ops->set_irq(vcpu);
}
}
}
static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
{
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
!vcpu->guest_xcr0_loaded) {
/* kvm_set_xcr() also depends on this */
xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
vcpu->guest_xcr0_loaded = 1;
}
}
static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
{
if (vcpu->guest_xcr0_loaded) {
if (vcpu->arch.xcr0 != host_xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
vcpu->guest_xcr0_loaded = 0;
}
}
static void process_nmi(struct kvm_vcpu *vcpu)
{
unsigned limit = 2;
/*
* x86 is limited to one NMI running, and one NMI pending after it.
* If an NMI is already in progress, limit further NMIs to just one.
* Otherwise, allow two (and we'll inject the first one immediately).
*/
if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
limit = 1;
vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
static void kvm_gen_update_masterclock(struct kvm *kvm)
{
#ifdef CONFIG_X86_64
int i;
struct kvm_vcpu *vcpu;
struct kvm_arch *ka = &kvm->arch;
spin_lock(&ka->pvclock_gtod_sync_lock);
kvm_make_mclock_inprogress_request(kvm);
/* no guest entries from this point */
pvclock_update_vm_gtod_copy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm)
set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
/* guest entries allowed */
kvm_for_each_vcpu(i, vcpu, kvm)
clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);
spin_unlock(&ka->pvclock_gtod_sync_lock);
#endif
}
static void update_eoi_exitmap(struct kvm_vcpu *vcpu)
{
u64 eoi_exit_bitmap[4];
memset(eoi_exit_bitmap, 0, 32);
kvm_ioapic_calculate_eoi_exitmap(vcpu, eoi_exit_bitmap);
kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
}
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
vcpu->run->request_interrupt_window;
bool req_immediate_exit = 0;
if (vcpu->requests) {
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
kvm_mmu_unload(vcpu);
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
__kvm_migrate_timers(vcpu);
if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
kvm_gen_update_masterclock(vcpu->kvm);
if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
r = kvm_guest_time_update(vcpu);
if (unlikely(r))
goto out;
}
if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
kvm_mmu_sync_roots(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvm_x86_ops->tlb_flush(vcpu);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
r = 0;
goto out;
}
if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
r = 0;
goto out;
}
if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
vcpu->fpu_active = 0;
kvm_x86_ops->fpu_deactivate(vcpu);
}
if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
/* Page is swapped out. Do synthetic halt */
vcpu->arch.apf.halted = true;
r = 1;
goto out;
}
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
record_steal_time(vcpu);
if (kvm_check_request(KVM_REQ_NMI, vcpu))
process_nmi(vcpu);
req_immediate_exit =
kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
if (kvm_check_request(KVM_REQ_PMU, vcpu))
kvm_handle_pmu_event(vcpu);
if (kvm_check_request(KVM_REQ_PMI, vcpu))
kvm_deliver_pmi(vcpu);
if (kvm_check_request(KVM_REQ_EOIBITMAP, vcpu))
update_eoi_exitmap(vcpu);
}
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
inject_pending_event(vcpu);
/* enable NMI/IRQ window open exits if needed */
if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu);
else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
kvm_x86_ops->enable_irq_window(vcpu);
if (kvm_lapic_enabled(vcpu)) {
/*
* Update architecture specific hints for APIC
* virtual interrupt delivery.
*/
if (kvm_x86_ops->hwapic_irr_update)
kvm_x86_ops->hwapic_irr_update(vcpu,
kvm_lapic_find_highest_irr(vcpu));
update_cr8_intercept(vcpu);
kvm_lapic_sync_to_vapic(vcpu);
}
}
r = kvm_mmu_reload(vcpu);
if (unlikely(r)) {
goto cancel_injection;
}
preempt_disable();
kvm_x86_ops->prepare_guest_switch(vcpu);
if (vcpu->fpu_active)
kvm_load_guest_fpu(vcpu);
kvm_load_guest_xcr0(vcpu);
vcpu->mode = IN_GUEST_MODE;
/* We should set ->mode before check ->requests,
* see the comment in make_all_cpus_request.
*/
smp_mb();
local_irq_disable();
if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
|| need_resched() || signal_pending(current)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
local_irq_enable();
preempt_enable();
r = 1;
goto cancel_injection;
}
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (req_immediate_exit)
smp_send_reschedule(vcpu->cpu);
kvm_guest_enter();
if (unlikely(vcpu->arch.switch_db_regs)) {
set_debugreg(0, 7);
set_debugreg(vcpu->arch.eff_db[0], 0);
set_debugreg(vcpu->arch.eff_db[1], 1);
set_debugreg(vcpu->arch.eff_db[2], 2);
set_debugreg(vcpu->arch.eff_db[3], 3);
}
trace_kvm_entry(vcpu->vcpu_id);
kvm_x86_ops->run(vcpu);
/*
* If the guest has used debug registers, at least dr7
* will be disabled while returning to the host.
* If we don't have active breakpoints in the host, we don't
* care about the messed up debug address registers. But if
* we have some of them active, restore the old state.
*/
if (hw_breakpoint_active())
hw_breakpoint_restore();
vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
native_read_tsc());
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
local_irq_enable();
++vcpu->stat.exits;
/*
* We must have an instruction between local_irq_enable() and
* kvm_guest_exit(), so the timer interrupt isn't delayed by
* the interrupt shadow. The stat.exits increment will do nicely.
* But we need to prevent reordering, hence this barrier():
*/
barrier();
kvm_guest_exit();
preempt_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
/*
* Profile KVM exit RIPs:
*/
if (unlikely(prof_on == KVM_PROFILING)) {
unsigned long rip = kvm_rip_read(vcpu);
profile_hit(KVM_PROFILING, (void *)rip);
}
if (unlikely(vcpu->arch.tsc_always_catchup))
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
if (vcpu->arch.apic_attention)
kvm_lapic_sync_from_vapic(vcpu);
r = kvm_x86_ops->handle_exit(vcpu);
return r;
cancel_injection:
kvm_x86_ops->cancel_injection(vcpu);
if (unlikely(vcpu->arch.apic_attention))
kvm_lapic_sync_from_vapic(vcpu);
out:
return r;
}
static int __vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
struct kvm *kvm = vcpu->kvm;
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
pr_debug("vcpu %d received sipi with vector # %x\n",
vcpu->vcpu_id, vcpu->arch.sipi_vector);
kvm_lapic_reset(vcpu);
r = kvm_vcpu_reset(vcpu);
if (r)
return r;
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
r = vapic_enter(vcpu);
if (r) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
return r;
}
r = 1;
while (r > 0) {
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted)
r = vcpu_enter_guest(vcpu);
else {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_block(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
{
switch(vcpu->arch.mp_state) {
case KVM_MP_STATE_HALTED:
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
case KVM_MP_STATE_RUNNABLE:
vcpu->arch.apf.halted = false;
break;
case KVM_MP_STATE_SIPI_RECEIVED:
default:
r = -EINTR;
break;
}
}
}
if (r <= 0)
break;
clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);
if (dm_request_for_irq_injection(vcpu)) {
r = -EINTR;
vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.request_irq_exits;
}
kvm_check_async_pf_completion(vcpu);
if (signal_pending(current)) {
r = -EINTR;
vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.signal_exits;
}
if (need_resched()) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_resched(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
}
}
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
vapic_exit(vcpu);
return r;
}
static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
{
int r;
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (r != EMULATE_DONE)
return 0;
return 1;
}
static int complete_emulated_pio(struct kvm_vcpu *vcpu)
{
BUG_ON(!vcpu->arch.pio.count);
return complete_emulated_io(vcpu);
}
/*
* Implements the following, as a state machine:
*
* read:
* for each fragment
* for each mmio piece in the fragment
* write gpa, len
* exit
* copy data
* execute insn
*
* write:
* for each fragment
* for each mmio piece in the fragment
* write gpa, len
* copy data
* exit
*/
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct kvm_mmio_fragment *frag;
unsigned len;
BUG_ON(!vcpu->mmio_needed);
/* Complete previous fragment */
frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
len = min(8u, frag->len);
if (!vcpu->mmio_is_write)
memcpy(frag->data, run->mmio.data, len);
if (frag->len <= 8) {
/* Switch to the next fragment. */
frag++;
vcpu->mmio_cur_fragment++;
} else {
/* Go forward to the next mmio piece. */
frag->data += len;
frag->gpa += len;
frag->len -= len;
}
if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
vcpu->mmio_needed = 0;
if (vcpu->mmio_is_write)
return 1;
vcpu->mmio_read_completed = 1;
return complete_emulated_io(vcpu);
}
run->exit_reason = KVM_EXIT_MMIO;
run->mmio.phys_addr = frag->gpa;
if (vcpu->mmio_is_write)
memcpy(run->mmio.data, frag->data, min(8u, frag->len));
run->mmio.len = min(8u, frag->len);
run->mmio.is_write = vcpu->mmio_is_write;
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
return 0;
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int r;
sigset_t sigsaved;
if (!tsk_used_math(current) && init_fpu(current))
return -ENOMEM;
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
r = -EAGAIN;
goto out;
}
/* re-sync apic's tpr */
if (!irqchip_in_kernel(vcpu->kvm)) {
if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
r = -EINVAL;
goto out;
}
}
if (unlikely(vcpu->arch.complete_userspace_io)) {
int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
vcpu->arch.complete_userspace_io = NULL;
r = cui(vcpu);
if (r <= 0)
goto out;
} else
WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
r = __vcpu_run(vcpu);
out:
post_kvm_run_save(vcpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
return r;
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
/*
* We are here if userspace calls get_regs() in the middle of
* instruction emulation. Registers state needs to be copied
* back from emulation context to vcpu. Userspace shouldn't do
* that usually, but some bad designed PV devices (vmware
* backdoor interface) need this to work
*/
emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
}
regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
#ifdef CONFIG_X86_64
regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
#endif
regs->rip = kvm_rip_read(vcpu);
regs->rflags = kvm_get_rflags(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
#ifdef CONFIG_X86_64
kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
#endif
kvm_rip_write(vcpu, regs->rip);
kvm_set_rflags(vcpu, regs->rflags);
vcpu->arch.exception.pending = false;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0;
}
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
{
struct kvm_segment cs;
kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
*db = cs.db;
*l = cs.l;
}
EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
struct desc_ptr dt;
kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
kvm_x86_ops->get_idt(vcpu, &dt);
sregs->idt.limit = dt.size;
sregs->idt.base = dt.address;
kvm_x86_ops->get_gdt(vcpu, &dt);
sregs->gdt.limit = dt.size;
sregs->gdt.base = dt.address;
sregs->cr0 = kvm_read_cr0(vcpu);
sregs->cr2 = vcpu->arch.cr2;
sregs->cr3 = kvm_read_cr3(vcpu);
sregs->cr4 = kvm_read_cr4(vcpu);
sregs->cr8 = kvm_get_cr8(vcpu);
sregs->efer = vcpu->arch.efer;
sregs->apic_base = kvm_get_apic_base(vcpu);
memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
set_bit(vcpu->arch.interrupt.nr,
(unsigned long *)sregs->interrupt_bitmap);
return 0;
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
mp_state->mp_state = vcpu->arch.mp_state;
return 0;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
vcpu->arch.mp_state = mp_state->mp_state;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0;
}
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
int reason, bool has_error_code, u32 error_code)
{
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
int ret;
init_emulate_ctxt(vcpu);
ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
has_error_code, error_code);
if (ret)
return EMULATE_FAIL;
kvm_rip_write(vcpu, ctxt->eip);
kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
return EMULATE_DONE;
}
EXPORT_SYMBOL_GPL(kvm_task_switch);
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
int mmu_reset_needed = 0;
int pending_vec, max_bits, idx;
struct desc_ptr dt;
if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
return -EINVAL;
dt.size = sregs->idt.limit;
dt.address = sregs->idt.base;
kvm_x86_ops->set_idt(vcpu, &dt);
dt.size = sregs->gdt.limit;
dt.address = sregs->gdt.base;
kvm_x86_ops->set_gdt(vcpu, &dt);
vcpu->arch.cr2 = sregs->cr2;
mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
vcpu->arch.cr3 = sregs->cr3;
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
kvm_set_cr8(vcpu, sregs->cr8);
mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
kvm_x86_ops->set_efer(vcpu, sregs->efer);
kvm_set_apic_base(vcpu, sregs->apic_base);
mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0;
mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
if (sregs->cr4 & X86_CR4_OSXSAVE)
kvm_update_cpuid(vcpu);
idx = srcu_read_lock(&vcpu->kvm->srcu);
if (!is_long_mode(vcpu) && is_pae(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
mmu_reset_needed = 1;
}
srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (mmu_reset_needed)
kvm_mmu_reset_context(vcpu);
max_bits = KVM_NR_INTERRUPTS;
pending_vec = find_first_bit(
(const unsigned long *)sregs->interrupt_bitmap, max_bits);
if (pending_vec < max_bits) {
kvm_queue_interrupt(vcpu, pending_vec, false);
pr_debug("Set back pending irq %d\n", pending_vec);
}
kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
update_cr8_intercept(vcpu);
/* Older userspace won't unhalt the vcpu on reset. */
if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
!is_protmode(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
unsigned long rflags;
int i, r;
if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
r = -EBUSY;
if (vcpu->arch.exception.pending)
goto out;
if (dbg->control & KVM_GUESTDBG_INJECT_DB)
kvm_queue_exception(vcpu, DB_VECTOR);
else
kvm_queue_exception(vcpu, BP_VECTOR);
}
/*
* Read rflags as long as potentially injected trace flags are still
* filtered out.
*/
rflags = kvm_get_rflags(vcpu);
vcpu->guest_debug = dbg->control;
if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
vcpu->guest_debug = 0;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
for (i = 0; i < KVM_NR_DB_REGS; ++i)
vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
} else {
for (i = 0; i < KVM_NR_DB_REGS; i++)
vcpu->arch.eff_db[i] = vcpu->arch.db[i];
}
kvm_update_dr7(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
get_segment_base(vcpu, VCPU_SREG_CS);
/*
* Trigger an rflags update that will inject or remove the trace
* flags.
*/
kvm_set_rflags(vcpu, rflags);
kvm_x86_ops->update_db_bp_intercept(vcpu);
r = 0;
out:
return r;
}
/*
* Translate a guest virtual address to a guest physical address.
*/
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
unsigned long vaddr = tr->linear_address;
gpa_t gpa;
int idx;
idx = srcu_read_lock(&vcpu->kvm->srcu);
gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
tr->physical_address = gpa;
tr->valid = gpa != UNMAPPED_GVA;
tr->writeable = 1;
tr->usermode = 0;
return 0;
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
struct i387_fxsave_struct *fxsave =
&vcpu->arch.guest_fpu.state->fxsave;
memcpy(fpu->fpr, fxsave->st_space, 128);
fpu->fcw = fxsave->cwd;
fpu->fsw = fxsave->swd;
fpu->ftwx = fxsave->twd;
fpu->last_opcode = fxsave->fop;
fpu->last_ip = fxsave->rip;
fpu->last_dp = fxsave->rdp;
memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
return 0;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
struct i387_fxsave_struct *fxsave =
&vcpu->arch.guest_fpu.state->fxsave;
memcpy(fxsave->st_space, fpu->fpr, 128);
fxsave->cwd = fpu->fcw;
fxsave->swd = fpu->fsw;
fxsave->twd = fpu->ftwx;
fxsave->fop = fpu->last_opcode;
fxsave->rip = fpu->last_ip;
fxsave->rdp = fpu->last_dp;
memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
return 0;
}
int fx_init(struct kvm_vcpu *vcpu)
{
int err;
err = fpu_alloc(&vcpu->arch.guest_fpu);
if (err)
return err;
fpu_finit(&vcpu->arch.guest_fpu);
/*
* Ensure guest xcr0 is valid for loading
*/
vcpu->arch.xcr0 = XSTATE_FP;
vcpu->arch.cr0 |= X86_CR0_ET;
return 0;
}
EXPORT_SYMBOL_GPL(fx_init);
static void fx_free(struct kvm_vcpu *vcpu)
{
fpu_free(&vcpu->arch.guest_fpu);
}
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
if (vcpu->guest_fpu_loaded)
return;
/*
* Restore all possible states in the guest,
* and assume host would use all available bits.
* Guest xcr0 would be loaded later.
*/
kvm_put_guest_xcr0(vcpu);
vcpu->guest_fpu_loaded = 1;
__kernel_fpu_begin();
fpu_restore_checking(&vcpu->arch.guest_fpu);
trace_kvm_fpu(1);
}
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
kvm_put_guest_xcr0(vcpu);
if (!vcpu->guest_fpu_loaded)
return;
vcpu->guest_fpu_loaded = 0;
fpu_save_init(&vcpu->arch.guest_fpu);
__kernel_fpu_end();
++vcpu->stat.fpu_reload;
kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
trace_kvm_fpu(0);
}
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
kvmclock_reset(vcpu);
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
fx_free(vcpu);
kvm_x86_ops->vcpu_free(vcpu);
}
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
unsigned int id)
{
if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
printk_once(KERN_WARNING
"kvm: SMP vm created on host with unstable TSC; "
"guest TSC will not be reliable\n");
return kvm_x86_ops->vcpu_create(kvm, id);
}
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
int r;
vcpu->arch.mtrr_state.have_fixed = 1;
r = vcpu_load(vcpu);
if (r)
return r;
r = kvm_vcpu_reset(vcpu);
if (r == 0)
r = kvm_mmu_setup(vcpu);
vcpu_put(vcpu);
return r;
}
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
int r;
struct msr_data msr;
r = vcpu_load(vcpu);
if (r)
return r;
msr.data = 0x0;
msr.index = MSR_IA32_TSC;
msr.host_initiated = true;
kvm_write_tsc(vcpu, &msr);
vcpu_put(vcpu);
return r;
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
int r;
vcpu->arch.apf.msr_val = 0;
r = vcpu_load(vcpu);
BUG_ON(r);
kvm_mmu_unload(vcpu);
vcpu_put(vcpu);
fx_free(vcpu);
kvm_x86_ops->vcpu_free(vcpu);
}
static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
{
atomic_set(&vcpu->arch.nmi_queued, 0);
vcpu->arch.nmi_pending = 0;
vcpu->arch.nmi_injected = false;
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
vcpu->arch.dr6 = DR6_FIXED_1;
vcpu->arch.dr7 = DR7_FIXED_1;
kvm_update_dr7(vcpu);
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.apf.msr_val = 0;
vcpu->arch.st.msr_val = 0;
kvmclock_reset(vcpu);
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
vcpu->arch.apf.halted = false;
kvm_pmu_reset(vcpu);
memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
vcpu->arch.regs_avail = ~0;
vcpu->arch.regs_dirty = ~0;
return kvm_x86_ops->vcpu_reset(vcpu);
}
int kvm_arch_hardware_enable(void *garbage)
{
struct kvm *kvm;
struct kvm_vcpu *vcpu;
int i;
int ret;
u64 local_tsc;
u64 max_tsc = 0;
bool stable, backwards_tsc = false;
kvm_shared_msr_cpu_online();
ret = kvm_x86_ops->hardware_enable(garbage);
if (ret != 0)
return ret;
local_tsc = native_read_tsc();
stable = !check_tsc_unstable();
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (!stable && vcpu->cpu == smp_processor_id())
set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
if (stable && vcpu->arch.last_host_tsc > local_tsc) {
backwards_tsc = true;
if (vcpu->arch.last_host_tsc > max_tsc)
max_tsc = vcpu->arch.last_host_tsc;
}
}
}
/*
* Sometimes, even reliable TSCs go backwards. This happens on
* platforms that reset TSC during suspend or hibernate actions, but
* maintain synchronization. We must compensate. Fortunately, we can
* detect that condition here, which happens early in CPU bringup,
* before any KVM threads can be running. Unfortunately, we can't
* bring the TSCs fully up to date with real time, as we aren't yet far
* enough into CPU bringup that we know how much real time has actually
* elapsed; our helper function, get_kernel_ns() will be using boot
* variables that haven't been updated yet.
*
* So we simply find the maximum observed TSC above, then record the
* adjustment to TSC in each VCPU. When the VCPU later gets loaded,
* the adjustment will be applied. Note that we accumulate
* adjustments, in case multiple suspend cycles happen before some VCPU
* gets a chance to run again. In the event that no KVM threads get a
* chance to run, we will miss the entire elapsed period, as we'll have
* reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
* loose cycle time. This isn't too big a deal, since the loss will be
* uniform across all VCPUs (not to mention the scenario is extremely
* unlikely). It is possible that a second hibernate recovery happens
* much faster than a first, causing the observed TSC here to be
* smaller; this would require additional padding adjustment, which is
* why we set last_host_tsc to the local tsc observed here.
*
* N.B. - this code below runs only on platforms with reliable TSC,
* as that is the only way backwards_tsc is set above. Also note
* that this runs for ALL vcpus, which is not a bug; all VCPUs should
* have the same delta_cyc adjustment applied if backwards_tsc
* is detected. Note further, this adjustment is only done once,
* as we reset last_host_tsc on all VCPUs to stop this from being
* called multiple times (one for each physical CPU bringup).
*
* Platforms with unreliable TSCs don't have to deal with this, they
* will be compensated by the logic in vcpu_load, which sets the TSC to
* catchup mode. This will catchup all VCPUs to real time, but cannot
* guarantee that they stay in perfect synchronization.
*/
if (backwards_tsc) {
u64 delta_cyc = max_tsc - local_tsc;
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.tsc_offset_adjustment += delta_cyc;
vcpu->arch.last_host_tsc = local_tsc;
set_bit(KVM_REQ_MASTERCLOCK_UPDATE,
&vcpu->requests);
}
/*
* We have to disable TSC offset matching.. if you were
* booting a VM while issuing an S4 host suspend....
* you may have some problem. Solving this issue is
* left as an exercise to the reader.
*/
kvm->arch.last_tsc_nsec = 0;
kvm->arch.last_tsc_write = 0;
}
}
return 0;
}
void kvm_arch_hardware_disable(void *garbage)
{
kvm_x86_ops->hardware_disable(garbage);
drop_user_return_notifiers(garbage);
}
int kvm_arch_hardware_setup(void)
{
return kvm_x86_ops->hardware_setup();
}
void kvm_arch_hardware_unsetup(void)
{
kvm_x86_ops->hardware_unsetup();
}
void kvm_arch_check_processor_compat(void *rtn)
{
kvm_x86_ops->check_processor_compatibility(rtn);
}
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
{
return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
}
struct static_key kvm_no_apic_vcpu __read_mostly;
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page;
struct kvm *kvm;
int r;
BUG_ON(vcpu->kvm == NULL);
kvm = vcpu->kvm;
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
r = -ENOMEM;
goto fail;
}
vcpu->arch.pio_data = page_address(page);
kvm_set_tsc_khz(vcpu, max_tsc_khz);
r = kvm_mmu_create(vcpu);
if (r < 0)
goto fail_free_pio_data;
if (irqchip_in_kernel(kvm)) {
r = kvm_create_lapic(vcpu);
if (r < 0)
goto fail_mmu_destroy;
} else
static_key_slow_inc(&kvm_no_apic_vcpu);
vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
GFP_KERNEL);
if (!vcpu->arch.mce_banks) {
r = -ENOMEM;
goto fail_free_lapic;
}
vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
goto fail_free_mce_banks;
r = fx_init(vcpu);
if (r)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
return 0;
fail_free_wbinvd_dirty_mask:
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
fail_free_mce_banks:
kfree(vcpu->arch.mce_banks);
fail_free_lapic:
kvm_free_lapic(vcpu);
fail_mmu_destroy:
kvm_mmu_destroy(vcpu);
fail_free_pio_data:
free_page((unsigned long)vcpu->arch.pio_data);
fail:
return r;
}
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
int idx;
kvm_pmu_destroy(vcpu);
kfree(vcpu->arch.mce_banks);
kvm_free_lapic(vcpu);
idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_mmu_destroy(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
free_page((unsigned long)vcpu->arch.pio_data);
if (!irqchip_in_kernel(vcpu->kvm))
static_key_slow_dec(&kvm_no_apic_vcpu);
}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
if (type)
return -EINVAL;
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
/* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
&kvm->arch.irq_sources_bitmap);
raw_spin_lock_init(&kvm->arch.tsc_write_lock);
mutex_init(&kvm->arch.apic_map_lock);
spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
pvclock_update_vm_gtod_copy(kvm);
return 0;
}
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{
int r;
r = vcpu_load(vcpu);
BUG_ON(r);
kvm_mmu_unload(vcpu);
vcpu_put(vcpu);
}
static void kvm_free_vcpus(struct kvm *kvm)
{
unsigned int i;
struct kvm_vcpu *vcpu;
/*
* Unpin any mmu pages first.
*/
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_unload_vcpu_mmu(vcpu);
}
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_arch_vcpu_free(vcpu);
mutex_lock(&kvm->lock);
for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
kvm->vcpus[i] = NULL;
atomic_set(&kvm->online_vcpus, 0);
mutex_unlock(&kvm->lock);
}
void kvm_arch_sync_events(struct kvm *kvm)
{
kvm_free_all_assigned_devices(kvm);
kvm_free_pit(kvm);
}
void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_iommu_unmap_guest(kvm);
kfree(kvm->arch.vpic);
kfree(kvm->arch.vioapic);
kvm_free_vcpus(kvm);
if (kvm->arch.apic_access_page)
put_page(kvm->arch.apic_access_page);
if (kvm->arch.ept_identity_pagetable)
put_page(kvm->arch.ept_identity_pagetable);
kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
}
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
int i;
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
kvm_kvfree(free->arch.rmap[i]);
free->arch.rmap[i] = NULL;
}
if (i == 0)
continue;
if (!dont || free->arch.lpage_info[i - 1] !=
dont->arch.lpage_info[i - 1]) {
kvm_kvfree(free->arch.lpage_info[i - 1]);
free->arch.lpage_info[i - 1] = NULL;
}
}
}
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
{
int i;
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
unsigned long ugfn;
int lpages;
int level = i + 1;
lpages = gfn_to_index(slot->base_gfn + npages - 1,
slot->base_gfn, level) + 1;
slot->arch.rmap[i] =
kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i]));
if (!slot->arch.rmap[i])
goto out_free;
if (i == 0)
continue;
slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages *
sizeof(*slot->arch.lpage_info[i - 1]));
if (!slot->arch.lpage_info[i - 1])
goto out_free;
if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
slot->arch.lpage_info[i - 1][0].write_count = 1;
if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1;
ugfn = slot->userspace_addr >> PAGE_SHIFT;
/*
* If the gfn and userspace address are not aligned wrt each
* other, or if explicitly asked to, disable large page
* support for this slot
*/
if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
!kvm_largepages_enabled()) {
unsigned long j;
for (j = 0; j < lpages; ++j)
slot->arch.lpage_info[i - 1][j].write_count = 1;
}
}
return 0;
out_free:
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
kvm_kvfree(slot->arch.rmap[i]);
slot->arch.rmap[i] = NULL;
if (i == 0)
continue;
kvm_kvfree(slot->arch.lpage_info[i - 1]);
slot->arch.lpage_info[i - 1] = NULL;
}
return -ENOMEM;
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
bool user_alloc)
{
int npages = memslot->npages;
/*
* Only private memory slots need to be mapped here since
* KVM_SET_MEMORY_REGION ioctl is no longer supported.
*/
if ((memslot->id >= KVM_USER_MEM_SLOTS) && npages && !old.npages) {
unsigned long userspace_addr;
/*
* MAP_SHARED to prevent internal slot pages from being moved
* by fork()/COW.
*/
userspace_addr = vm_mmap(NULL, 0, npages * PAGE_SIZE,
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, 0);
if (IS_ERR((void *)userspace_addr))
return PTR_ERR((void *)userspace_addr);
memslot->userspace_addr = userspace_addr;
}
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
bool user_alloc)
{
int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
if ((mem->slot >= KVM_USER_MEM_SLOTS) && old.npages && !npages) {
int ret;
ret = vm_munmap(old.userspace_addr,
old.npages * PAGE_SIZE);
if (ret < 0)
printk(KERN_WARNING
"kvm_vm_ioctl_set_memory_region: "
"failed to munmap memory\n");
}
if (!kvm->arch.n_requested_mmu_pages)
nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
if (nr_mmu_pages)
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
/*
* Write protect all pages for dirty logging.
* Existing largepage mappings are destroyed here and new ones will
* not be created until the end of the logging.
*/
if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
kvm_mmu_slot_remove_write_access(kvm, mem->slot);
/*
* If memory slot is created, or moved, we need to clear all
* mmio sptes.
*/
if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT) {
kvm_mmu_zap_all(kvm);
kvm_reload_remote_mmus(kvm);
}
}
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
kvm_mmu_zap_all(kvm);
kvm_reload_remote_mmus(kvm);
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
kvm_arch_flush_shadow_all(kvm);
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted)
|| !list_empty_careful(&vcpu->async_pf.done)
|| vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
|| atomic_read(&vcpu->arch.nmi_queued) ||
(kvm_arch_interrupt_allowed(vcpu) &&
kvm_cpu_has_interrupt(vcpu));
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
}
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
{
return kvm_x86_ops->interrupt_allowed(vcpu);
}
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
{
unsigned long current_rip = kvm_rip_read(vcpu) +
get_segment_base(vcpu, VCPU_SREG_CS);
return current_rip == linear_rip;
}
EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
{
unsigned long rflags;
rflags = kvm_x86_ops->get_rflags(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
rflags &= ~X86_EFLAGS_TF;
return rflags;
}
EXPORT_SYMBOL_GPL(kvm_get_rflags);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
rflags |= X86_EFLAGS_TF;
kvm_x86_ops->set_rflags(vcpu, rflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
EXPORT_SYMBOL_GPL(kvm_set_rflags);
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{
int r;
if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
is_error_page(work->page))
return;
r = kvm_mmu_reload(vcpu);
if (unlikely(r))
return;
if (!vcpu->arch.mmu.direct_map &&
work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
return;
vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
}
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
{
return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
}
static inline u32 kvm_async_pf_next_probe(u32 key)
{
return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
}
static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
u32 key = kvm_async_pf_hash_fn(gfn);
while (vcpu->arch.apf.gfns[key] != ~0)
key = kvm_async_pf_next_probe(key);
vcpu->arch.apf.gfns[key] = gfn;
}
static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
{
int i;
u32 key = kvm_async_pf_hash_fn(gfn);
for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
(vcpu->arch.apf.gfns[key] != gfn &&
vcpu->arch.apf.gfns[key] != ~0); i++)
key = kvm_async_pf_next_probe(key);
return key;
}
bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
}
static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
u32 i, j, k;
i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
while (true) {
vcpu->arch.apf.gfns[i] = ~0;
do {
j = kvm_async_pf_next_probe(j);
if (vcpu->arch.apf.gfns[j] == ~0)
return;
k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
/*
* k lies cyclically in ]i,j]
* | i.k.j |
* |....j i.k.| or |.k..j i...|
*/
} while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
i = j;
}
}
static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
{
return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
sizeof(val));
}
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
struct x86_exception fault;
trace_kvm_async_pf_not_present(work->arch.token, work->gva);
kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
(vcpu->arch.apf.send_user_only &&
kvm_x86_ops->get_cpl(vcpu) == 0))
kvm_make_request(KVM_REQ_APF_HALT, vcpu);
else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
fault.vector = PF_VECTOR;
fault.error_code_valid = true;
fault.error_code = 0;
fault.nested_page_fault = false;
fault.address = work->arch.token;
kvm_inject_page_fault(vcpu, &fault);
}
}
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
struct x86_exception fault;
trace_kvm_async_pf_ready(work->arch.token, work->gva);
if (is_error_page(work->page))
work->arch.token = ~0; /* broadcast wakeup */
else
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
fault.vector = PF_VECTOR;
fault.error_code_valid = true;
fault.error_code = 0;
fault.nested_page_fault = false;
fault.address = work->arch.token;
kvm_inject_page_fault(vcpu, &fault);
}
vcpu->arch.apf.halted = false;
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
return true;
else
return !kvm_event_needs_reinjection(vcpu) &&
kvm_x86_ops->interrupt_allowed(vcpu);
}
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_5594_1 |
crossvul-cpp_data_bad_4715_0 | /*
* fs/inotify_user.c - inotify support for userspace
*
* Authors:
* John McCutchan <ttb@tentacle.dhs.org>
* Robert Love <rml@novell.com>
*
* Copyright (C) 2005 John McCutchan
* Copyright 2006 Hewlett-Packard Development Company, L.P.
*
* Copyright (C) 2009 Eric Paris <Red Hat Inc>
* inotify was largely rewriten to make use of the fsnotify infrastructure
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/file.h>
#include <linux/fs.h> /* struct inode */
#include <linux/fsnotify_backend.h>
#include <linux/idr.h>
#include <linux/init.h> /* module_init */
#include <linux/inotify.h>
#include <linux/kernel.h> /* roundup() */
#include <linux/namei.h> /* LOOKUP_FOLLOW */
#include <linux/sched.h> /* struct user */
#include <linux/slab.h> /* struct kmem_cache */
#include <linux/syscalls.h>
#include <linux/types.h>
#include <linux/anon_inodes.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/wait.h>
#include "inotify.h"
#include <asm/ioctls.h>
/* these are configurable via /proc/sys/fs/inotify/ */
static int inotify_max_user_instances __read_mostly;
static int inotify_max_queued_events __read_mostly;
static int inotify_max_user_watches __read_mostly;
static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
struct kmem_cache *event_priv_cachep __read_mostly;
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
static int zero;
ctl_table inotify_table[] = {
{
.procname = "max_user_instances",
.data = &inotify_max_user_instances,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
{
.procname = "max_user_watches",
.data = &inotify_max_user_watches,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
{
.procname = "max_queued_events",
.data = &inotify_max_queued_events,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero
},
{ }
};
#endif /* CONFIG_SYSCTL */
static inline __u32 inotify_arg_to_mask(u32 arg)
{
__u32 mask;
/*
* everything should accept their own ignored, cares about children,
* and should receive events when the inode is unmounted
*/
mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
/* mask off the flags used to open the fd */
mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
return mask;
}
static inline u32 inotify_mask_to_arg(__u32 mask)
{
return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
IN_Q_OVERFLOW);
}
/* intofiy userspace file descriptor functions */
static unsigned int inotify_poll(struct file *file, poll_table *wait)
{
struct fsnotify_group *group = file->private_data;
int ret = 0;
poll_wait(file, &group->notification_waitq, wait);
mutex_lock(&group->notification_mutex);
if (!fsnotify_notify_queue_is_empty(group))
ret = POLLIN | POLLRDNORM;
mutex_unlock(&group->notification_mutex);
return ret;
}
/*
* Get an inotify_kernel_event if one exists and is small
* enough to fit in "count". Return an error pointer if
* not large enough.
*
* Called with the group->notification_mutex held.
*/
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
size_t count)
{
size_t event_size = sizeof(struct inotify_event);
struct fsnotify_event *event;
if (fsnotify_notify_queue_is_empty(group))
return NULL;
event = fsnotify_peek_notify_event(group);
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
if (event->name_len)
event_size += roundup(event->name_len + 1, event_size);
if (event_size > count)
return ERR_PTR(-EINVAL);
/* held the notification_mutex the whole time, so this is the
* same event we peeked above */
fsnotify_remove_notify_event(group);
return event;
}
/*
* Copy an event to user space, returning how much we copied.
*
* We already checked that the event size is smaller than the
* buffer we had in "get_one_event()" above.
*/
static ssize_t copy_event_to_user(struct fsnotify_group *group,
struct fsnotify_event *event,
char __user *buf)
{
struct inotify_event inotify_event;
struct fsnotify_event_private_data *fsn_priv;
struct inotify_event_private_data *priv;
size_t event_size = sizeof(struct inotify_event);
size_t name_len = 0;
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
/* we get the inotify watch descriptor from the event private data */
spin_lock(&event->lock);
fsn_priv = fsnotify_remove_priv_from_event(group, event);
spin_unlock(&event->lock);
if (!fsn_priv)
inotify_event.wd = -1;
else {
priv = container_of(fsn_priv, struct inotify_event_private_data,
fsnotify_event_priv_data);
inotify_event.wd = priv->wd;
inotify_free_event_priv(fsn_priv);
}
/*
* round up event->name_len so it is a multiple of event_size
* plus an extra byte for the terminating '\0'.
*/
if (event->name_len)
name_len = roundup(event->name_len + 1, event_size);
inotify_event.len = name_len;
inotify_event.mask = inotify_mask_to_arg(event->mask);
inotify_event.cookie = event->sync_cookie;
/* send the main event */
if (copy_to_user(buf, &inotify_event, event_size))
return -EFAULT;
buf += event_size;
/*
* fsnotify only stores the pathname, so here we have to send the pathname
* and then pad that pathname out to a multiple of sizeof(inotify_event)
* with zeros. I get my zeros from the nul_inotify_event.
*/
if (name_len) {
unsigned int len_to_zero = name_len - event->name_len;
/* copy the path name */
if (copy_to_user(buf, event->file_name, event->name_len))
return -EFAULT;
buf += event->name_len;
/* fill userspace with 0's */
if (clear_user(buf, len_to_zero))
return -EFAULT;
buf += len_to_zero;
event_size += name_len;
}
return event_size;
}
static ssize_t inotify_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct fsnotify_group *group;
struct fsnotify_event *kevent;
char __user *start;
int ret;
DEFINE_WAIT(wait);
start = buf;
group = file->private_data;
while (1) {
prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&group->notification_mutex);
kevent = get_one_event(group, count);
mutex_unlock(&group->notification_mutex);
pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
if (kevent) {
ret = PTR_ERR(kevent);
if (IS_ERR(kevent))
break;
ret = copy_event_to_user(group, kevent, buf);
fsnotify_put_event(kevent);
if (ret < 0)
break;
buf += ret;
count -= ret;
continue;
}
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
break;
ret = -EINTR;
if (signal_pending(current))
break;
if (start != buf)
break;
schedule();
}
finish_wait(&group->notification_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
return ret;
}
static int inotify_fasync(int fd, struct file *file, int on)
{
struct fsnotify_group *group = file->private_data;
return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
}
static int inotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
struct user_struct *user = group->inotify_data.user;
pr_debug("%s: group=%p\n", __func__, group);
fsnotify_clear_marks_by_group(group);
/* free this group, matching get was inotify_init->fsnotify_obtain_group */
fsnotify_put_group(group);
atomic_dec(&user->inotify_devs);
return 0;
}
static long inotify_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct fsnotify_group *group;
struct fsnotify_event_holder *holder;
struct fsnotify_event *event;
void __user *p;
int ret = -ENOTTY;
size_t send_len = 0;
group = file->private_data;
p = (void __user *) arg;
pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
switch (cmd) {
case FIONREAD:
mutex_lock(&group->notification_mutex);
list_for_each_entry(holder, &group->notification_list, event_list) {
event = holder->event;
send_len += sizeof(struct inotify_event);
if (event->name_len)
send_len += roundup(event->name_len + 1,
sizeof(struct inotify_event));
}
mutex_unlock(&group->notification_mutex);
ret = put_user(send_len, (int __user *) p);
break;
}
return ret;
}
static const struct file_operations inotify_fops = {
.poll = inotify_poll,
.read = inotify_read,
.fasync = inotify_fasync,
.release = inotify_release,
.unlocked_ioctl = inotify_ioctl,
.compat_ioctl = inotify_ioctl,
.llseek = noop_llseek,
};
/*
* find_inode - resolve a user-given path to a specific inode
*/
static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
{
int error;
error = user_path_at(AT_FDCWD, dirname, flags, path);
if (error)
return error;
/* you can only watch an inode if you have read permissions on it */
error = inode_permission(path->dentry->d_inode, MAY_READ);
if (error)
path_put(path);
return error;
}
static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
int *last_wd,
struct inotify_inode_mark *i_mark)
{
int ret;
do {
if (unlikely(!idr_pre_get(idr, GFP_KERNEL)))
return -ENOMEM;
spin_lock(idr_lock);
ret = idr_get_new_above(idr, i_mark, *last_wd + 1,
&i_mark->wd);
/* we added the mark to the idr, take a reference */
if (!ret) {
*last_wd = i_mark->wd;
fsnotify_get_mark(&i_mark->fsn_mark);
}
spin_unlock(idr_lock);
} while (ret == -EAGAIN);
return ret;
}
static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
int wd)
{
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
struct inotify_inode_mark *i_mark;
assert_spin_locked(idr_lock);
i_mark = idr_find(idr, wd);
if (i_mark) {
struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
fsnotify_get_mark(fsn_mark);
/* One ref for being in the idr, one ref we just took */
BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
}
return i_mark;
}
static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
int wd)
{
struct inotify_inode_mark *i_mark;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
spin_lock(idr_lock);
i_mark = inotify_idr_find_locked(group, wd);
spin_unlock(idr_lock);
return i_mark;
}
static void do_inotify_remove_from_idr(struct fsnotify_group *group,
struct inotify_inode_mark *i_mark)
{
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
int wd = i_mark->wd;
assert_spin_locked(idr_lock);
idr_remove(idr, wd);
/* removed from the idr, drop that ref */
fsnotify_put_mark(&i_mark->fsn_mark);
}
/*
* Remove the mark from the idr (if present) and drop the reference
* on the mark because it was in the idr.
*/
static void inotify_remove_from_idr(struct fsnotify_group *group,
struct inotify_inode_mark *i_mark)
{
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
struct inotify_inode_mark *found_i_mark = NULL;
int wd;
spin_lock(idr_lock);
wd = i_mark->wd;
/*
* does this i_mark think it is in the idr? we shouldn't get called
* if it wasn't....
*/
if (wd == -1) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out;
}
/* Lets look in the idr to see if we find it */
found_i_mark = inotify_idr_find_locked(group, wd);
if (unlikely(!found_i_mark)) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out;
}
/*
* We found an mark in the idr at the right wd, but it's
* not the mark we were told to remove. eparis seriously
* fucked up somewhere.
*/
if (unlikely(found_i_mark != i_mark)) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
"mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
"found_i_mark->group=%p found_i_mark->inode=%p\n",
__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group,
i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd,
found_i_mark->fsn_mark.group,
found_i_mark->fsn_mark.i.inode);
goto out;
}
/*
* One ref for being in the idr
* one ref held by the caller trying to kill us
* one ref grabbed by inotify_idr_find
*/
if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
/* we can't really recover with bad ref cnting.. */
BUG();
}
do_inotify_remove_from_idr(group, i_mark);
out:
/* match the ref taken by inotify_idr_find_locked() */
if (found_i_mark)
fsnotify_put_mark(&found_i_mark->fsn_mark);
i_mark->wd = -1;
spin_unlock(idr_lock);
}
/*
* Send IN_IGNORED for this wd, remove this wd from the idr.
*/
void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_group *group)
{
struct inotify_inode_mark *i_mark;
struct fsnotify_event *ignored_event, *notify_event;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
int ret;
ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
FSNOTIFY_EVENT_NONE, NULL, 0,
GFP_NOFS);
if (!ignored_event)
return;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
if (unlikely(!event_priv))
goto skip_send_ignore;
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsn_event_priv->group = group;
event_priv->wd = i_mark->wd;
notify_event = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
if (notify_event) {
if (IS_ERR(notify_event))
ret = PTR_ERR(notify_event);
else
fsnotify_put_event(notify_event);
inotify_free_event_priv(fsn_event_priv);
}
skip_send_ignore:
/* matches the reference taken when the event was created */
fsnotify_put_event(ignored_event);
/* remove this mark from the idr */
inotify_remove_from_idr(group, i_mark);
atomic_dec(&group->inotify_data.user->inotify_watches);
}
/* ding dong the mark is dead */
static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
{
struct inotify_inode_mark *i_mark;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
kmem_cache_free(inotify_inode_mark_cachep, i_mark);
}
static int inotify_update_existing_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
struct fsnotify_mark *fsn_mark;
struct inotify_inode_mark *i_mark;
__u32 old_mask, new_mask;
__u32 mask;
int add = (arg & IN_MASK_ADD);
int ret;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
if (unlikely(!(mask & IN_ALL_EVENTS)))
return -EINVAL;
fsn_mark = fsnotify_find_inode_mark(group, inode);
if (!fsn_mark)
return -ENOENT;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
spin_lock(&fsn_mark->lock);
old_mask = fsn_mark->mask;
if (add)
fsnotify_set_mark_mask_locked(fsn_mark, (fsn_mark->mask | mask));
else
fsnotify_set_mark_mask_locked(fsn_mark, mask);
new_mask = fsn_mark->mask;
spin_unlock(&fsn_mark->lock);
if (old_mask != new_mask) {
/* more bits in old than in new? */
int dropped = (old_mask & ~new_mask);
/* more bits in this fsn_mark than the inode's mask? */
int do_inode = (new_mask & ~inode->i_fsnotify_mask);
/* update the inode with this new fsn_mark */
if (dropped || do_inode)
fsnotify_recalc_inode_mask(inode);
}
/* return the wd */
ret = i_mark->wd;
/* match the get from fsnotify_find_mark() */
fsnotify_put_mark(fsn_mark);
return ret;
}
static int inotify_new_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
struct inotify_inode_mark *tmp_i_mark;
__u32 mask;
int ret;
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
if (unlikely(!(mask & IN_ALL_EVENTS)))
return -EINVAL;
tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
if (unlikely(!tmp_i_mark))
return -ENOMEM;
fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark);
tmp_i_mark->fsn_mark.mask = mask;
tmp_i_mark->wd = -1;
ret = -ENOSPC;
if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
goto out_err;
ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd,
tmp_i_mark);
if (ret)
goto out_err;
/* we are on the idr, now get on the inode */
ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, NULL, 0);
if (ret) {
/* we failed to get on the inode, get off the idr */
inotify_remove_from_idr(group, tmp_i_mark);
goto out_err;
}
/* increment the number of watches the user has */
atomic_inc(&group->inotify_data.user->inotify_watches);
/* return the watch descriptor for this new mark */
ret = tmp_i_mark->wd;
out_err:
/* match the ref from fsnotify_init_mark() */
fsnotify_put_mark(&tmp_i_mark->fsn_mark);
return ret;
}
static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
{
int ret = 0;
retry:
/* try to update and existing watch with the new arg */
ret = inotify_update_existing_watch(group, inode, arg);
/* no mark present, try to add a new one */
if (ret == -ENOENT)
ret = inotify_new_watch(group, inode, arg);
/*
* inotify_new_watch could race with another thread which did an
* inotify_new_watch between the update_existing and the add watch
* here, go back and try to update an existing mark again.
*/
if (ret == -EEXIST)
goto retry;
return ret;
}
static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
{
struct fsnotify_group *group;
group = fsnotify_alloc_group(&inotify_fsnotify_ops);
if (IS_ERR(group))
return group;
group->max_events = max_events;
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
group->inotify_data.last_wd = 0;
group->inotify_data.user = user;
group->inotify_data.fa = NULL;
return group;
}
/* inotify syscalls */
SYSCALL_DEFINE1(inotify_init1, int, flags)
{
struct fsnotify_group *group;
struct user_struct *user;
int ret;
/* Check the IN_* constants for consistency. */
BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
return -EINVAL;
user = get_current_user();
if (unlikely(atomic_read(&user->inotify_devs) >=
inotify_max_user_instances)) {
ret = -EMFILE;
goto out_free_uid;
}
/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
group = inotify_new_group(user, inotify_max_queued_events);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
goto out_free_uid;
}
atomic_inc(&user->inotify_devs);
ret = anon_inode_getfd("inotify", &inotify_fops, group,
O_RDONLY | flags);
if (ret >= 0)
return ret;
atomic_dec(&user->inotify_devs);
out_free_uid:
free_uid(user);
return ret;
}
SYSCALL_DEFINE0(inotify_init)
{
return sys_inotify_init1(0);
}
SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
u32, mask)
{
struct fsnotify_group *group;
struct inode *inode;
struct path path;
struct file *filp;
int ret, fput_needed;
unsigned flags = 0;
filp = fget_light(fd, &fput_needed);
if (unlikely(!filp))
return -EBADF;
/* verify that this is indeed an inotify instance */
if (unlikely(filp->f_op != &inotify_fops)) {
ret = -EINVAL;
goto fput_and_out;
}
if (!(mask & IN_DONT_FOLLOW))
flags |= LOOKUP_FOLLOW;
if (mask & IN_ONLYDIR)
flags |= LOOKUP_DIRECTORY;
ret = inotify_find_inode(pathname, &path, flags);
if (ret)
goto fput_and_out;
/* inode held in place by reference to path; group by fget on fd */
inode = path.dentry->d_inode;
group = filp->private_data;
/* create/update an inode mark */
ret = inotify_update_watch(group, inode, mask);
path_put(&path);
fput_and_out:
fput_light(filp, fput_needed);
return ret;
}
SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
{
struct fsnotify_group *group;
struct inotify_inode_mark *i_mark;
struct file *filp;
int ret = 0, fput_needed;
filp = fget_light(fd, &fput_needed);
if (unlikely(!filp))
return -EBADF;
/* verify that this is indeed an inotify instance */
ret = -EINVAL;
if (unlikely(filp->f_op != &inotify_fops))
goto out;
group = filp->private_data;
ret = -EINVAL;
i_mark = inotify_idr_find(group, wd);
if (unlikely(!i_mark))
goto out;
ret = 0;
fsnotify_destroy_mark(&i_mark->fsn_mark);
/* match ref taken by inotify_idr_find */
fsnotify_put_mark(&i_mark->fsn_mark);
out:
fput_light(filp, fput_needed);
return ret;
}
/*
* inotify_user_setup - Our initialization function. Note that we cannnot return
* error because we have compiled-in VFS hooks. So an (unlikely) failure here
* must result in panic().
*/
static int __init inotify_user_setup(void)
{
BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
BUILD_BUG_ON(IN_OPEN != FS_OPEN);
BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
BUILD_BUG_ON(IN_CREATE != FS_CREATE);
BUILD_BUG_ON(IN_DELETE != FS_DELETE);
BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);
inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
inotify_max_queued_events = 16384;
inotify_max_user_instances = 128;
inotify_max_user_watches = 8192;
return 0;
}
module_init(inotify_user_setup);
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_4715_0 |
crossvul-cpp_data_good_3486_14 | /*
* Performance event support - Freescale Embedded Performance Monitor
*
* Copyright 2008-2009 Paul Mackerras, IBM Corporation.
* Copyright 2010 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
#include <asm/reg_fsl_emb.h>
#include <asm/pmc.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/ptrace.h>
struct cpu_hw_events {
int n_events;
int disabled;
u8 pmcs_enabled;
struct perf_event *event[MAX_HWEVENTS];
};
static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
static struct fsl_emb_pmu *ppmu;
/* Number of perf_events counting hardware events */
static atomic_t num_events;
/* Used to avoid races in calling reserve/release_pmc_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
/*
* If interrupts were soft-disabled when a PMU interrupt occurs, treat
* it as an NMI.
*/
static inline int perf_intr_is_nmi(struct pt_regs *regs)
{
#ifdef __powerpc64__
return !regs->softe;
#else
return 0;
#endif
}
static void perf_event_interrupt(struct pt_regs *regs);
/*
* Read one performance monitor counter (PMC).
*/
static unsigned long read_pmc(int idx)
{
unsigned long val;
switch (idx) {
case 0:
val = mfpmr(PMRN_PMC0);
break;
case 1:
val = mfpmr(PMRN_PMC1);
break;
case 2:
val = mfpmr(PMRN_PMC2);
break;
case 3:
val = mfpmr(PMRN_PMC3);
break;
default:
printk(KERN_ERR "oops trying to read PMC%d\n", idx);
val = 0;
}
return val;
}
/*
* Write one PMC.
*/
static void write_pmc(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMC0, val);
break;
case 1:
mtpmr(PMRN_PMC1, val);
break;
case 2:
mtpmr(PMRN_PMC2, val);
break;
case 3:
mtpmr(PMRN_PMC3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMC%d\n", idx);
}
isync();
}
/*
* Write one local control A register
*/
static void write_pmlca(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMLCA0, val);
break;
case 1:
mtpmr(PMRN_PMLCA1, val);
break;
case 2:
mtpmr(PMRN_PMLCA2, val);
break;
case 3:
mtpmr(PMRN_PMLCA3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMLCA%d\n", idx);
}
isync();
}
/*
* Write one local control B register
*/
static void write_pmlcb(int idx, unsigned long val)
{
switch (idx) {
case 0:
mtpmr(PMRN_PMLCB0, val);
break;
case 1:
mtpmr(PMRN_PMLCB1, val);
break;
case 2:
mtpmr(PMRN_PMLCB2, val);
break;
case 3:
mtpmr(PMRN_PMLCB3, val);
break;
default:
printk(KERN_ERR "oops trying to write PMLCB%d\n", idx);
}
isync();
}
static void fsl_emb_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
if (event->hw.state & PERF_HES_STOPPED)
return;
/*
* Performance monitor interrupts come even when interrupts
* are soft-disabled, as long as interrupts are hard-enabled.
* Therefore we treat them like NMIs.
*/
do {
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
} while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
/* The counters are only 32 bits wide */
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
local64_sub(delta, &event->hw.period_left);
}
/*
* Disable all events to prevent PMU interrupts and to allow
* events to be added or removed.
*/
static void fsl_emb_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled) {
cpuhw->disabled = 1;
/*
* Check if we ever enabled the PMU on this cpu.
*/
if (!cpuhw->pmcs_enabled) {
ppc_enable_pmcs();
cpuhw->pmcs_enabled = 1;
}
if (atomic_read(&num_events)) {
/*
* Set the 'freeze all counters' bit, and disable
* interrupts. The barrier is to make sure the
* mtpmr has been executed and the PMU has frozen
* the events before we return.
*/
mtpmr(PMRN_PMGC0, PMGC0_FAC);
isync();
}
}
local_irq_restore(flags);
}
/*
* Re-enable all events if disable == 0.
* If we were previously disabled and events were added, then
* put the new config on the PMU.
*/
static void fsl_emb_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuhw;
unsigned long flags;
local_irq_save(flags);
cpuhw = &__get_cpu_var(cpu_hw_events);
if (!cpuhw->disabled)
goto out;
cpuhw->disabled = 0;
ppc_set_pmu_inuse(cpuhw->n_events != 0);
if (cpuhw->n_events > 0) {
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();
}
out:
local_irq_restore(flags);
}
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *ctrs[])
{
int n = 0;
struct perf_event *event;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
ctrs[n] = group;
n++;
}
list_for_each_entry(event, &group->sibling_list, group_entry) {
if (!is_software_event(event) &&
event->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
ctrs[n] = event;
n++;
}
}
return n;
}
/* context locked on entry */
static int fsl_emb_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuhw;
int ret = -EAGAIN;
int num_counters = ppmu->n_counter;
u64 val;
int i;
perf_pmu_disable(event->pmu);
cpuhw = &get_cpu_var(cpu_hw_events);
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
num_counters = ppmu->n_restricted;
/*
* Allocate counters from top-down, so that restricted-capable
* counters are kept free as long as possible.
*/
for (i = num_counters - 1; i >= 0; i--) {
if (cpuhw->event[i])
continue;
break;
}
if (i < 0)
goto out;
event->hw.idx = i;
cpuhw->event[i] = event;
++cpuhw->n_events;
val = 0;
if (event->hw.sample_period) {
s64 left = local64_read(&event->hw.period_left);
if (left < 0x80000000L)
val = 0x80000000L - left;
}
local64_set(&event->hw.prev_count, val);
if (!(flags & PERF_EF_START)) {
event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
val = 0;
}
write_pmc(i, val);
perf_event_update_userpage(event);
write_pmlcb(i, event->hw.config >> 32);
write_pmlca(i, event->hw.config_base);
ret = 0;
out:
put_cpu_var(cpu_hw_events);
perf_pmu_enable(event->pmu);
return ret;
}
/* context locked on entry */
static void fsl_emb_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuhw;
int i = event->hw.idx;
perf_pmu_disable(event->pmu);
if (i < 0)
goto out;
fsl_emb_pmu_read(event);
cpuhw = &get_cpu_var(cpu_hw_events);
WARN_ON(event != cpuhw->event[event->hw.idx]);
write_pmlca(i, 0);
write_pmlcb(i, 0);
write_pmc(i, 0);
cpuhw->event[i] = NULL;
event->hw.idx = -1;
/*
* TODO: if at least one restricted event exists, and we
* just freed up a non-restricted-capable counter, and
* there is a restricted-capable counter occupied by
* a non-restricted event, migrate that event to the
* vacated counter.
*/
cpuhw->n_events--;
out:
perf_pmu_enable(event->pmu);
put_cpu_var(cpu_hw_events);
}
static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags)
{
unsigned long flags;
s64 left;
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
if (!(event->hw.state & PERF_HES_STOPPED))
return;
if (ef_flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
local_irq_save(flags);
perf_pmu_disable(event->pmu);
event->hw.state = 0;
left = local64_read(&event->hw.period_left);
write_pmc(event->hw.idx, left);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags)
{
unsigned long flags;
if (event->hw.idx < 0 || !event->hw.sample_period)
return;
if (event->hw.state & PERF_HES_STOPPED)
return;
local_irq_save(flags);
perf_pmu_disable(event->pmu);
fsl_emb_pmu_read(event);
event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
write_pmc(event->hw.idx, 0);
perf_event_update_userpage(event);
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
/*
* Release the PMU if this is the last perf_event.
*/
static void hw_perf_event_destroy(struct perf_event *event)
{
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
release_pmc_hardware();
mutex_unlock(&pmc_reserve_mutex);
}
}
/*
* Translate a generic cache event_id config to a raw event_id code.
*/
static int hw_perf_cache_event(u64 config, u64 *eventp)
{
unsigned long type, op, result;
int ev;
if (!ppmu->cache_events)
return -EINVAL;
/* unpack config */
type = config & 0xff;
op = (config >> 8) & 0xff;
result = (config >> 16) & 0xff;
if (type >= PERF_COUNT_HW_CACHE_MAX ||
op >= PERF_COUNT_HW_CACHE_OP_MAX ||
result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ev = (*ppmu->cache_events)[type][op][result];
if (ev == 0)
return -EOPNOTSUPP;
if (ev == -1)
return -EINVAL;
*eventp = ev;
return 0;
}
static int fsl_emb_pmu_event_init(struct perf_event *event)
{
u64 ev;
struct perf_event *events[MAX_HWEVENTS];
int n;
int err;
int num_restricted;
int i;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
ev = event->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return -EOPNOTSUPP;
ev = ppmu->generic_events[ev];
break;
case PERF_TYPE_HW_CACHE:
err = hw_perf_cache_event(event->attr.config, &ev);
if (err)
return err;
break;
case PERF_TYPE_RAW:
ev = event->attr.config;
break;
default:
return -ENOENT;
}
event->hw.config = ppmu->xlate_event(ev);
if (!(event->hw.config & FSL_EMB_EVENT_VALID))
return -EINVAL;
/*
* If this is in a group, check if it can go on with all the
* other hardware events in the group. We assume the event
* hasn't been linked into its leader's sibling list at this point.
*/
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
ppmu->n_counter - 1, events);
if (n < 0)
return -EINVAL;
}
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) {
num_restricted = 0;
for (i = 0; i < n; i++) {
if (events[i]->hw.config & FSL_EMB_EVENT_RESTRICTED)
num_restricted++;
}
if (num_restricted >= ppmu->n_restricted)
return -EINVAL;
}
event->hw.idx = -1;
event->hw.config_base = PMLCA_CE | PMLCA_FCM1 |
(u32)((ev << 16) & PMLCA_EVENT_MASK);
if (event->attr.exclude_user)
event->hw.config_base |= PMLCA_FCU;
if (event->attr.exclude_kernel)
event->hw.config_base |= PMLCA_FCS;
if (event->attr.exclude_idle)
return -ENOTSUPP;
event->hw.last_period = event->hw.sample_period;
local64_set(&event->hw.period_left, event->hw.last_period);
/*
* See if we need to reserve the PMU.
* If no events are currently in use, then we have to take a
* mutex to ensure that we don't race with another task doing
* reserve_pmc_hardware or release_pmc_hardware.
*/
err = 0;
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_read(&num_events) == 0 &&
reserve_pmc_hardware(perf_event_interrupt))
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
mtpmr(PMRN_PMGC0, PMGC0_FAC);
isync();
}
event->destroy = hw_perf_event_destroy;
return err;
}
static struct pmu fsl_emb_pmu = {
.pmu_enable = fsl_emb_pmu_enable,
.pmu_disable = fsl_emb_pmu_disable,
.event_init = fsl_emb_pmu_event_init,
.add = fsl_emb_pmu_add,
.del = fsl_emb_pmu_del,
.start = fsl_emb_pmu_start,
.stop = fsl_emb_pmu_stop,
.read = fsl_emb_pmu_read,
};
/*
* A counter has overflowed; update its count and record
* things if requested. Note that interrupts are hard-disabled
* here so there is no possibility of being interrupted.
*/
static void record_and_restart(struct perf_event *event, unsigned long val,
struct pt_regs *regs)
{
u64 period = event->hw.sample_period;
s64 prev, delta, left;
int record = 0;
if (event->hw.state & PERF_HES_STOPPED) {
write_pmc(event->hw.idx, 0);
return;
}
/* we don't have to worry about interrupts here */
prev = local64_read(&event->hw.prev_count);
delta = (val - prev) & 0xfffffffful;
local64_add(delta, &event->count);
/*
* See if the total period for this event has expired,
* and update for the next period.
*/
val = 0;
left = local64_read(&event->hw.period_left) - delta;
if (period) {
if (left <= 0) {
left += period;
if (left <= 0)
left = period;
record = 1;
event->hw.last_period = event->hw.sample_period;
}
if (left < 0x80000000LL)
val = 0x80000000LL - left;
}
write_pmc(event->hw.idx, val);
local64_set(&event->hw.prev_count, val);
local64_set(&event->hw.period_left, left);
perf_event_update_userpage(event);
/*
* Finally record data if requested.
*/
if (record) {
struct perf_sample_data data;
perf_sample_data_init(&data, 0);
data.period = event->hw.last_period;
if (perf_event_overflow(event, &data, regs))
fsl_emb_pmu_stop(event, 0);
}
}
static void perf_event_interrupt(struct pt_regs *regs)
{
int i;
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
struct perf_event *event;
unsigned long val;
int found = 0;
int nmi;
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();
else
irq_enter();
for (i = 0; i < ppmu->n_counter; ++i) {
event = cpuhw->event[i];
val = read_pmc(i);
if ((int)val < 0) {
if (event) {
/* event has overflowed */
found = 1;
record_and_restart(event, val, regs);
} else {
/*
* Disabled counter is negative,
* reset it just in case.
*/
write_pmc(i, 0);
}
}
}
/* PMM will keep counters frozen until we return from the interrupt. */
mtmsr(mfmsr() | MSR_PMM);
mtpmr(PMRN_PMGC0, PMGC0_PMIE | PMGC0_FCECE);
isync();
if (nmi)
nmi_exit();
else
irq_exit();
}
void hw_perf_event_setup(int cpu)
{
struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
memset(cpuhw, 0, sizeof(*cpuhw));
}
int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu)
{
if (ppmu)
return -EBUSY; /* something's already registered */
ppmu = pmu;
pr_info("%s performance monitor hardware support registered\n",
pmu->name);
perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW);
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3486_14 |
crossvul-cpp_data_bad_3486_4 | /*
* linux/arch/arm/kernel/ptrace.c
*
* By Ross Biro 1/23/92
* edited by Linus Torvalds
* ARM modifications Copyright (C) 2000 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/init.h>
#include <linux/signal.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/regset.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/traps.h>
#define REG_PC 15
#define REG_PSR 16
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
#if 0
/*
* Breakpoint SWI instruction: SWI &9F0001
*/
#define BREAKINST_ARM 0xef9f0001
#define BREAKINST_THUMB 0xdf00 /* fill this in later */
#else
/*
* New breakpoints - use an undefined instruction. The ARM architecture
* reference manual guarantees that the following instruction space
* will produce an undefined instruction exception on all CPUs:
*
* ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
* Thumb: 1101 1110 xxxx xxxx
*/
#define BREAKINST_ARM 0xe7f001f0
#define BREAKINST_THUMB 0xde01
#endif
struct pt_regs_offset {
const char *name;
int offset;
};
#define REG_OFFSET_NAME(r) \
{.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
#define REG_OFFSET_END {.name = NULL, .offset = 0}
static const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(r0),
REG_OFFSET_NAME(r1),
REG_OFFSET_NAME(r2),
REG_OFFSET_NAME(r3),
REG_OFFSET_NAME(r4),
REG_OFFSET_NAME(r5),
REG_OFFSET_NAME(r6),
REG_OFFSET_NAME(r7),
REG_OFFSET_NAME(r8),
REG_OFFSET_NAME(r9),
REG_OFFSET_NAME(r10),
REG_OFFSET_NAME(fp),
REG_OFFSET_NAME(ip),
REG_OFFSET_NAME(sp),
REG_OFFSET_NAME(lr),
REG_OFFSET_NAME(pc),
REG_OFFSET_NAME(cpsr),
REG_OFFSET_NAME(ORIG_r0),
REG_OFFSET_END,
};
/**
* regs_query_register_offset() - query register offset from its name
* @name: the name of a register
*
* regs_query_register_offset() returns the offset of a register in struct
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
*/
int regs_query_register_offset(const char *name)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (!strcmp(roff->name, name))
return roff->offset;
return -EINVAL;
}
/**
* regs_query_register_name() - query register name from its offset
* @offset: the offset of a register in struct pt_regs.
*
* regs_query_register_name() returns the name of a register from its
* offset in struct pt_regs. If the @offset is invalid, this returns NULL;
*/
const char *regs_query_register_name(unsigned int offset)
{
const struct pt_regs_offset *roff;
for (roff = regoffset_table; roff->name != NULL; roff++)
if (roff->offset == offset)
return roff->name;
return NULL;
}
/**
* regs_within_kernel_stack() - check the address in the stack
* @regs: pt_regs which contains kernel stack pointer.
* @addr: address which is checked.
*
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
* If @addr is within the kernel stack, it returns true. If not, returns false.
*/
bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
}
/**
* regs_get_kernel_stack_nth() - get Nth entry of the stack
* @regs: pt_regs which contains kernel stack pointer.
* @n: stack entry number.
*
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
* this returns 0.
*/
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
{
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
addr += n;
if (regs_within_kernel_stack(regs, (unsigned long)addr))
return *addr;
else
return 0;
}
/*
* this routine will get a word off of the processes privileged stack.
* the offset is how far from the base addr as stored in the THREAD.
* this routine assumes that all the privileged stacks are in our
* data space.
*/
static inline long get_user_reg(struct task_struct *task, int offset)
{
return task_pt_regs(task)->uregs[offset];
}
/*
* this routine will put a word on the processes privileged stack.
* the offset is how far from the base addr as stored in the THREAD.
* this routine assumes that all the privileged stacks are in our
* data space.
*/
static inline int
put_user_reg(struct task_struct *task, int offset, long data)
{
struct pt_regs newregs, *regs = task_pt_regs(task);
int ret = -EINVAL;
newregs = *regs;
newregs.uregs[offset] = data;
if (valid_user_regs(&newregs)) {
regs->uregs[offset] = data;
ret = 0;
}
return ret;
}
/*
* Called by kernel/ptrace.c when detaching..
*/
void ptrace_disable(struct task_struct *child)
{
/* Nothing to do. */
}
/*
* Handle hitting a breakpoint.
*/
void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
{
siginfo_t info;
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = (void __user *)instruction_pointer(regs);
force_sig_info(SIGTRAP, &info, tsk);
}
static int break_trap(struct pt_regs *regs, unsigned int instr)
{
ptrace_break(current, regs);
return 0;
}
static struct undef_hook arm_break_hook = {
.instr_mask = 0x0fffffff,
.instr_val = 0x07f001f0,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = 0,
.fn = break_trap,
};
static struct undef_hook thumb_break_hook = {
.instr_mask = 0xffff,
.instr_val = 0xde01,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
.fn = break_trap,
};
static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr)
{
unsigned int instr2;
void __user *pc;
/* Check the second half of the instruction. */
pc = (void __user *)(instruction_pointer(regs) + 2);
if (processor_mode(regs) == SVC_MODE) {
instr2 = *(u16 *) pc;
} else {
get_user(instr2, (u16 __user *)pc);
}
if (instr2 == 0xa000) {
ptrace_break(current, regs);
return 0;
} else {
return 1;
}
}
static struct undef_hook thumb2_break_hook = {
.instr_mask = 0xffff,
.instr_val = 0xf7f0,
.cpsr_mask = PSR_T_BIT,
.cpsr_val = PSR_T_BIT,
.fn = thumb2_break_trap,
};
static int __init ptrace_break_init(void)
{
register_undef_hook(&arm_break_hook);
register_undef_hook(&thumb_break_hook);
register_undef_hook(&thumb2_break_hook);
return 0;
}
core_initcall(ptrace_break_init);
/*
* Read the word at offset "off" into the "struct user". We
* actually access the pt_regs stored on the kernel stack.
*/
static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
unsigned long __user *ret)
{
unsigned long tmp;
if (off & 3 || off >= sizeof(struct user))
return -EIO;
tmp = 0;
if (off == PT_TEXT_ADDR)
tmp = tsk->mm->start_code;
else if (off == PT_DATA_ADDR)
tmp = tsk->mm->start_data;
else if (off == PT_TEXT_END_ADDR)
tmp = tsk->mm->end_code;
else if (off < sizeof(struct pt_regs))
tmp = get_user_reg(tsk, off >> 2);
return put_user(tmp, ret);
}
/*
* Write the word at offset "off" into "struct user". We
* actually access the pt_regs stored on the kernel stack.
*/
static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
unsigned long val)
{
if (off & 3 || off >= sizeof(struct user))
return -EIO;
if (off >= sizeof(struct pt_regs))
return 0;
return put_user_reg(tsk, off >> 2, val);
}
#ifdef CONFIG_IWMMXT
/*
* Get the child iWMMXt state.
*/
static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
{
struct thread_info *thread = task_thread_info(tsk);
if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
return -ENODATA;
iwmmxt_task_disable(thread); /* force it to ram */
return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
? -EFAULT : 0;
}
/*
* Set the child iWMMXt state.
*/
static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
{
struct thread_info *thread = task_thread_info(tsk);
if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
return -EACCES;
iwmmxt_task_release(thread); /* force a reload */
return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
? -EFAULT : 0;
}
#endif
#ifdef CONFIG_CRUNCH
/*
* Get the child Crunch state.
*/
static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
{
struct thread_info *thread = task_thread_info(tsk);
crunch_task_disable(thread); /* force it to ram */
return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
? -EFAULT : 0;
}
/*
* Set the child Crunch state.
*/
static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
{
struct thread_info *thread = task_thread_info(tsk);
crunch_task_release(thread); /* force a reload */
return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
? -EFAULT : 0;
}
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
* Convert a virtual register number into an index for a thread_info
* breakpoint array. Breakpoints are identified using positive numbers
* whilst watchpoints are negative. The registers are laid out as pairs
* of (address, control), each pair mapping to a unique hw_breakpoint struct.
* Register 0 is reserved for describing resource information.
*/
static int ptrace_hbp_num_to_idx(long num)
{
if (num < 0)
num = (ARM_MAX_BRP << 1) - num;
return (num - 1) >> 1;
}
/*
* Returns the virtual register number for the address of the
* breakpoint at index idx.
*/
static long ptrace_hbp_idx_to_num(int idx)
{
long mid = ARM_MAX_BRP << 1;
long num = (idx << 1) + 1;
return num > mid ? mid - num : num;
}
/*
* Handle hitting a HW-breakpoint.
*/
static void ptrace_hbptriggered(struct perf_event *bp, int unused,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
long num;
int i;
siginfo_t info;
for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
if (current->thread.debug.hbp[i] == bp)
break;
num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
info.si_signo = SIGTRAP;
info.si_errno = (int)num;
info.si_code = TRAP_HWBKPT;
info.si_addr = (void __user *)(bkpt->trigger);
force_sig_info(SIGTRAP, &info, current);
}
/*
* Set ptrace breakpoint pointers to zero for this task.
* This is required in order to prevent child processes from unregistering
* breakpoints held by their parent.
*/
void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
{
memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
}
/*
* Unregister breakpoints from this task and reset the pointers in
* the thread_struct.
*/
void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
{
int i;
struct thread_struct *t = &tsk->thread;
for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
if (t->debug.hbp[i]) {
unregister_hw_breakpoint(t->debug.hbp[i]);
t->debug.hbp[i] = NULL;
}
}
}
static u32 ptrace_get_hbp_resource_info(void)
{
u8 num_brps, num_wrps, debug_arch, wp_len;
u32 reg = 0;
num_brps = hw_breakpoint_slots(TYPE_INST);
num_wrps = hw_breakpoint_slots(TYPE_DATA);
debug_arch = arch_get_debug_arch();
wp_len = arch_get_max_wp_len();
reg |= debug_arch;
reg <<= 8;
reg |= wp_len;
reg <<= 8;
reg |= num_wrps;
reg <<= 8;
reg |= num_brps;
return reg;
}
static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
{
struct perf_event_attr attr;
ptrace_breakpoint_init(&attr);
/* Initialise fields to sane defaults. */
attr.bp_addr = 0;
attr.bp_len = HW_BREAKPOINT_LEN_4;
attr.bp_type = type;
attr.disabled = 1;
return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk);
}
static int ptrace_gethbpregs(struct task_struct *tsk, long num,
unsigned long __user *data)
{
u32 reg;
int idx, ret = 0;
struct perf_event *bp;
struct arch_hw_breakpoint_ctrl arch_ctrl;
if (num == 0) {
reg = ptrace_get_hbp_resource_info();
} else {
idx = ptrace_hbp_num_to_idx(num);
if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
ret = -EINVAL;
goto out;
}
bp = tsk->thread.debug.hbp[idx];
if (!bp) {
reg = 0;
goto put;
}
arch_ctrl = counter_arch_bp(bp)->ctrl;
/*
* Fix up the len because we may have adjusted it
* to compensate for an unaligned address.
*/
while (!(arch_ctrl.len & 0x1))
arch_ctrl.len >>= 1;
if (num & 0x1)
reg = bp->attr.bp_addr;
else
reg = encode_ctrl_reg(arch_ctrl);
}
put:
if (put_user(reg, data))
ret = -EFAULT;
out:
return ret;
}
static int ptrace_sethbpregs(struct task_struct *tsk, long num,
unsigned long __user *data)
{
int idx, gen_len, gen_type, implied_type, ret = 0;
u32 user_val;
struct perf_event *bp;
struct arch_hw_breakpoint_ctrl ctrl;
struct perf_event_attr attr;
if (num == 0)
goto out;
else if (num < 0)
implied_type = HW_BREAKPOINT_RW;
else
implied_type = HW_BREAKPOINT_X;
idx = ptrace_hbp_num_to_idx(num);
if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
ret = -EINVAL;
goto out;
}
if (get_user(user_val, data)) {
ret = -EFAULT;
goto out;
}
bp = tsk->thread.debug.hbp[idx];
if (!bp) {
bp = ptrace_hbp_create(tsk, implied_type);
if (IS_ERR(bp)) {
ret = PTR_ERR(bp);
goto out;
}
tsk->thread.debug.hbp[idx] = bp;
}
attr = bp->attr;
if (num & 0x1) {
/* Address */
attr.bp_addr = user_val;
} else {
/* Control */
decode_ctrl_reg(user_val, &ctrl);
ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
if (ret)
goto out;
if ((gen_type & implied_type) != gen_type) {
ret = -EINVAL;
goto out;
}
attr.bp_len = gen_len;
attr.bp_type = gen_type;
attr.disabled = !ctrl.enabled;
}
ret = modify_user_hw_breakpoint(bp, &attr);
out:
return ret;
}
#endif
/* regset get/set implementations */
static int gpr_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs,
0, sizeof(*regs));
}
static int gpr_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
struct pt_regs newregs;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&newregs,
0, sizeof(newregs));
if (ret)
return ret;
if (!valid_user_regs(&newregs))
return -EINVAL;
*task_pt_regs(target) = newregs;
return 0;
}
static int fpa_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&task_thread_info(target)->fpstate,
0, sizeof(struct user_fp));
}
static int fpa_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct thread_info *thread = task_thread_info(target);
thread->used_cp[1] = thread->used_cp[2] = 1;
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&thread->fpstate,
0, sizeof(struct user_fp));
}
#ifdef CONFIG_VFP
/*
* VFP register get/set implementations.
*
* With respect to the kernel, struct user_fp is divided into three chunks:
* 16 or 32 real VFP registers (d0-d15 or d0-31)
* These are transferred to/from the real registers in the task's
* vfp_hard_struct. The number of registers depends on the kernel
* configuration.
*
* 16 or 0 fake VFP registers (d16-d31 or empty)
* i.e., the user_vfp structure has space for 32 registers even if
* the kernel doesn't have them all.
*
* vfp_get() reads this chunk as zero where applicable
* vfp_set() ignores this chunk
*
* 1 word for the FPSCR
*
* The bounds-checking logic built into user_regset_copyout and friends
* means that we can make a simple sequence of calls to map the relevant data
* to/from the specified slice of the user regset structure.
*/
static int vfp_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
struct thread_info *thread = task_thread_info(target);
struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
vfp_sync_hwstate(thread);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&vfp->fpregs,
user_fpregs_offset,
user_fpregs_offset + sizeof(vfp->fpregs));
if (ret)
return ret;
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
user_fpregs_offset + sizeof(vfp->fpregs),
user_fpscr_offset);
if (ret)
return ret;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&vfp->fpscr,
user_fpscr_offset,
user_fpscr_offset + sizeof(vfp->fpscr));
}
/*
* For vfp_set() a read-modify-write is done on the VFP registers,
* in order to avoid writing back a half-modified set of registers on
* failure.
*/
static int vfp_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
struct thread_info *thread = task_thread_info(target);
struct vfp_hard_struct new_vfp = thread->vfpstate.hard;
const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&new_vfp.fpregs,
user_fpregs_offset,
user_fpregs_offset + sizeof(new_vfp.fpregs));
if (ret)
return ret;
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
user_fpregs_offset + sizeof(new_vfp.fpregs),
user_fpscr_offset);
if (ret)
return ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&new_vfp.fpscr,
user_fpscr_offset,
user_fpscr_offset + sizeof(new_vfp.fpscr));
if (ret)
return ret;
vfp_sync_hwstate(thread);
thread->vfpstate.hard = new_vfp;
vfp_flush_hwstate(thread);
return 0;
}
#endif /* CONFIG_VFP */
enum arm_regset {
REGSET_GPR,
REGSET_FPR,
#ifdef CONFIG_VFP
REGSET_VFP,
#endif
};
static const struct user_regset arm_regsets[] = {
[REGSET_GPR] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(u32),
.align = sizeof(u32),
.get = gpr_get,
.set = gpr_set
},
[REGSET_FPR] = {
/*
* For the FPA regs in fpstate, the real fields are a mixture
* of sizes, so pretend that the registers are word-sized:
*/
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_fp) / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.get = fpa_get,
.set = fpa_set
},
#ifdef CONFIG_VFP
[REGSET_VFP] = {
/*
* Pretend that the VFP regs are word-sized, since the FPSCR is
* a single word dangling at the end of struct user_vfp:
*/
.core_note_type = NT_ARM_VFP,
.n = ARM_VFPREGS_SIZE / sizeof(u32),
.size = sizeof(u32),
.align = sizeof(u32),
.get = vfp_get,
.set = vfp_set
},
#endif /* CONFIG_VFP */
};
static const struct user_regset_view user_arm_view = {
.name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
.regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_arm_view;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
case PTRACE_PEEKUSR:
ret = ptrace_read_user(child, addr, datap);
break;
case PTRACE_POKEUSR:
ret = ptrace_write_user(child, addr, data);
break;
case PTRACE_GETREGS:
ret = copy_regset_to_user(child,
&user_arm_view, REGSET_GPR,
0, sizeof(struct pt_regs),
datap);
break;
case PTRACE_SETREGS:
ret = copy_regset_from_user(child,
&user_arm_view, REGSET_GPR,
0, sizeof(struct pt_regs),
datap);
break;
case PTRACE_GETFPREGS:
ret = copy_regset_to_user(child,
&user_arm_view, REGSET_FPR,
0, sizeof(union fp_state),
datap);
break;
case PTRACE_SETFPREGS:
ret = copy_regset_from_user(child,
&user_arm_view, REGSET_FPR,
0, sizeof(union fp_state),
datap);
break;
#ifdef CONFIG_IWMMXT
case PTRACE_GETWMMXREGS:
ret = ptrace_getwmmxregs(child, datap);
break;
case PTRACE_SETWMMXREGS:
ret = ptrace_setwmmxregs(child, datap);
break;
#endif
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value,
datap);
break;
case PTRACE_SET_SYSCALL:
task_thread_info(child)->syscall = data;
ret = 0;
break;
#ifdef CONFIG_CRUNCH
case PTRACE_GETCRUNCHREGS:
ret = ptrace_getcrunchregs(child, datap);
break;
case PTRACE_SETCRUNCHREGS:
ret = ptrace_setcrunchregs(child, datap);
break;
#endif
#ifdef CONFIG_VFP
case PTRACE_GETVFPREGS:
ret = copy_regset_to_user(child,
&user_arm_view, REGSET_VFP,
0, ARM_VFPREGS_SIZE,
datap);
break;
case PTRACE_SETVFPREGS:
ret = copy_regset_from_user(child,
&user_arm_view, REGSET_VFP,
0, ARM_VFPREGS_SIZE,
datap);
break;
#endif
#ifdef CONFIG_HAVE_HW_BREAKPOINT
case PTRACE_GETHBPREGS:
if (ptrace_get_breakpoints(child) < 0)
return -ESRCH;
ret = ptrace_gethbpregs(child, addr,
(unsigned long __user *)data);
ptrace_put_breakpoints(child);
break;
case PTRACE_SETHBPREGS:
if (ptrace_get_breakpoints(child) < 0)
return -ESRCH;
ret = ptrace_sethbpregs(child, addr,
(unsigned long __user *)data);
ptrace_put_breakpoints(child);
break;
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
{
unsigned long ip;
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return scno;
if (!(current->ptrace & PT_PTRACED))
return scno;
/*
* Save IP. IP is used to denote syscall entry/exit:
* IP = 0 -> entry, = 1 -> exit
*/
ip = regs->ARM_ip;
regs->ARM_ip = why;
current_thread_info()->syscall = scno;
/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
regs->ARM_ip = ip;
return current_thread_info()->syscall;
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3486_4 |
crossvul-cpp_data_bad_3486_16 | /*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Derived from "arch/i386/mm/fault.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Modified by Cort Dougan and Paul Mackerras.
*
* Modified for PPC64 by Dave Engebretsen (engebret@ibm.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
#include <linux/perf_event.h>
#include <linux/magic.h>
#include <asm/firmware.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/siginfo.h>
#include <mm/mmu_decl.h>
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs)
{
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (!user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 11))
ret = 1;
preempt_enable();
}
return ret;
}
#else
static inline int notify_page_fault(struct pt_regs *regs)
{
return 0;
}
#endif
/*
* Check whether the instruction at regs->nip is a store using
* an update addressing form which will update r1.
*/
static int store_updates_sp(struct pt_regs *regs)
{
unsigned int inst;
if (get_user(inst, (unsigned int __user *)regs->nip))
return 0;
/* check for 1 in the rA field */
if (((inst >> 16) & 0x1f) != 1)
return 0;
/* check major opcode */
switch (inst >> 26) {
case 37: /* stwu */
case 39: /* stbu */
case 45: /* sthu */
case 53: /* stfsu */
case 55: /* stfdu */
return 1;
case 62: /* std or stdu */
return (inst & 3) == 1;
case 31:
/* check minor opcode */
switch ((inst >> 1) & 0x3ff) {
case 181: /* stdux */
case 183: /* stwux */
case 247: /* stbux */
case 439: /* sthux */
case 695: /* stfsux */
case 759: /* stfdux */
return 1;
}
}
return 0;
}
/*
* For 600- and 800-family processors, the error_code parameter is DSISR
* for a data fault, SRR1 for an instruction fault. For 400-family processors
* the error_code parameter is ESR for a data fault, 0 for an instruction
* fault.
* For 64-bit processors, the error_code parameter is
* - DSISR for a non-SLB data access fault,
* - SRR1 & 0x08000000 for a non-SLB instruction access fault
* - 0 any SLB fault.
*
* The return value is 0 if the fault was handled, or the signal
* number if this is a kernel fault that can't be handled here.
*/
int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
struct vm_area_struct * vma;
struct mm_struct *mm = current->mm;
siginfo_t info;
int code = SEGV_MAPERR;
int is_write = 0, ret;
int trap = TRAP(regs);
int is_exec = trap == 0x400;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
/*
* Fortunately the bit assignments in SRR1 for an instruction
* fault and DSISR for a data fault are mostly the same for the
* bits we are interested in. But there are some bits which
* indicate errors in DSISR but can validly be set in SRR1.
*/
if (trap == 0x400)
error_code &= 0x48200000;
else
is_write = error_code & DSISR_ISSTORE;
#else
is_write = error_code & ESR_DST;
#endif /* CONFIG_4xx || CONFIG_BOOKE */
if (notify_page_fault(regs))
return 0;
if (unlikely(debugger_fault_handler(regs)))
return 0;
/* On a kernel SLB miss we can only check for a valid exception entry */
if (!user_mode(regs) && (address >= TASK_SIZE))
return SIGSEGV;
#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
defined(CONFIG_PPC_BOOK3S_64))
if (error_code & DSISR_DABRMATCH) {
/* DABR match */
do_dabr(regs, address, error_code);
return 0;
}
#endif
if (in_atomic() || mm == NULL) {
if (!user_mode(regs))
return SIGSEGV;
/* in_atomic() in user mode is really bad,
as is current->mm == NULL. */
printk(KERN_EMERG "Page fault in user mode with "
"in_atomic() = %d mm = %p\n", in_atomic(), mm);
printk(KERN_EMERG "NIP = %lx MSR = %lx\n",
regs->nip, regs->msr);
die("Weird page fault", regs, SIGSEGV);
}
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
/* When running in the kernel we expect faults to occur only to
* addresses in user space. All other faults represent errors in the
* kernel and should generate an OOPS. Unfortunately, in the case of an
* erroneous fault occurring in a code path which already holds mmap_sem
* we will deadlock attempting to validate the fault against the
* address space. Luckily the kernel only validly references user
* space from well defined areas of code, which are listed in the
* exceptions table.
*
* As the vast majority of faults will be valid we will only perform
* the source reference check when there is a possibility of a deadlock.
* Attempt to lock the address space, if we cannot we then validate the
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
if (!user_mode(regs) && !search_exception_tables(regs->nip))
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
}
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
/*
* N.B. The POWER/Open ABI allows programs to access up to
* 288 bytes below the stack pointer.
* The kernel signal delivery code writes up to about 1.5kB
* below the stack pointer (r1) before decrementing it.
* The exec code can write slightly over 640kB to the stack
* before setting the user r1. Thus we allow the stack to
* expand to 1MB without further checks.
*/
if (address + 0x100000 < vma->vm_end) {
/* get user regs even if this fault is in kernel mode */
struct pt_regs *uregs = current->thread.regs;
if (uregs == NULL)
goto bad_area;
/*
* A user-mode access to an address a long way below
* the stack pointer is only valid if the instruction
* is one which would update the stack pointer to the
* address accessed if the instruction completed,
* i.e. either stwu rs,n(r1) or stwux rs,r1,rb
* (or the byte, halfword, float or double forms).
*
* If we don't check this then any write to the area
* between the last mapped region and the stack will
* expand the stack rather than segfaulting.
*/
if (address + 2048 < uregs->gpr[1]
&& (!user_mode(regs) || !store_updates_sp(regs)))
goto bad_area;
}
if (expand_stack(vma, address))
goto bad_area;
good_area:
code = SEGV_ACCERR;
#if defined(CONFIG_6xx)
if (error_code & 0x95700000)
/* an error such as lwarx to I/O controller space,
address matching DABR, eciwx, etc. */
goto bad_area;
#endif /* CONFIG_6xx */
#if defined(CONFIG_8xx)
/* 8xx sometimes need to load a invalid/non-present TLBs.
* These must be invalidated separately as linux mm don't.
*/
if (error_code & 0x40000000) /* no translation? */
_tlbil_va(address, 0, 0, 0);
/* The MPC8xx seems to always set 0x80000000, which is
* "undefined". Of those that can be set, this is the only
* one which seems bad.
*/
if (error_code & 0x10000000)
/* Guarded storage error. */
goto bad_area;
#endif /* CONFIG_8xx */
if (is_exec) {
#ifdef CONFIG_PPC_STD_MMU
/* Protection fault on exec go straight to failure on
* Hash based MMUs as they either don't support per-page
* execute permission, or if they do, it's handled already
* at the hash level. This test would probably have to
* be removed if we change the way this works to make hash
* processors use the same I/D cache coherency mechanism
* as embedded.
*/
if (error_code & DSISR_PROTFAULT)
goto bad_area;
#endif /* CONFIG_PPC_STD_MMU */
/*
* Allow execution from readable areas if the MMU does not
* provide separate controls over reading and executing.
*
* Note: That code used to not be enabled for 4xx/BookE.
* It is now as I/D cache coherency for these is done at
* set_pte_at() time and I see no reason why the test
* below wouldn't be valid on those processors. This -may-
* break programs compiled with a really old ABI though.
*/
if (!(vma->vm_flags & VM_EXEC) &&
(cpu_has_feature(CPU_FTR_NOEXECUTE) ||
!(vma->vm_flags & (VM_READ | VM_WRITE))))
goto bad_area;
/* a write */
} else if (is_write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
/* a read */
} else {
/* protection fault */
if (error_code & 0x08000000)
goto bad_area;
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto bad_area;
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0);
if (unlikely(ret & VM_FAULT_ERROR)) {
if (ret & VM_FAULT_OOM)
goto out_of_memory;
else if (ret & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (ret & VM_FAULT_MAJOR) {
current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
regs, address);
#ifdef CONFIG_PPC_SMLPAR
if (firmware_has_feature(FW_FEATURE_CMO)) {
preempt_disable();
get_lppaca()->page_ins += (1 << PAGE_FACTOR);
preempt_enable();
}
#endif
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
regs, address);
}
up_read(&mm->mmap_sem);
return 0;
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses cause a SIGSEGV */
if (user_mode(regs)) {
_exception(SIGSEGV, regs, code, address);
return 0;
}
if (is_exec && (error_code & DSISR_PROTFAULT)
&& printk_ratelimit())
printk(KERN_CRIT "kernel tried to execute NX-protected"
" page (%lx) - exploit attempt? (uid: %d)\n",
address, current_uid());
return SIGSEGV;
/*
* We ran out of memory, or some other thing happened to us that made
* us unable to handle the page fault gracefully.
*/
out_of_memory:
up_read(&mm->mmap_sem);
if (!user_mode(regs))
return SIGKILL;
pagefault_out_of_memory();
return 0;
do_sigbus:
up_read(&mm->mmap_sem);
if (user_mode(regs)) {
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
return 0;
}
return SIGBUS;
}
/*
* bad_page_fault is called when we have a bad access from the kernel.
* It is called from the DSI and ISI handlers in head.S and from some
* of the procedures in traps.c.
*/
void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
const struct exception_table_entry *entry;
unsigned long *stackend;
/* Are we prepared to handle this fault? */
if ((entry = search_exception_tables(regs->nip)) != NULL) {
regs->nip = entry->fixup;
return;
}
/* kernel has accessed a bad area */
switch (regs->trap) {
case 0x300:
case 0x380:
printk(KERN_ALERT "Unable to handle kernel paging request for "
"data at address 0x%08lx\n", regs->dar);
break;
case 0x400:
case 0x480:
printk(KERN_ALERT "Unable to handle kernel paging request for "
"instruction fetch\n");
break;
default:
printk(KERN_ALERT "Unable to handle kernel paging request for "
"unknown fault\n");
break;
}
printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n",
regs->nip);
stackend = end_of_stack(current);
if (current != &init_task && *stackend != STACK_END_MAGIC)
printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
die("Kernel access of bad area", regs, sig);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3486_16 |
crossvul-cpp_data_good_4715_0 | /*
* fs/inotify_user.c - inotify support for userspace
*
* Authors:
* John McCutchan <ttb@tentacle.dhs.org>
* Robert Love <rml@novell.com>
*
* Copyright (C) 2005 John McCutchan
* Copyright 2006 Hewlett-Packard Development Company, L.P.
*
* Copyright (C) 2009 Eric Paris <Red Hat Inc>
* inotify was largely rewriten to make use of the fsnotify infrastructure
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/file.h>
#include <linux/fs.h> /* struct inode */
#include <linux/fsnotify_backend.h>
#include <linux/idr.h>
#include <linux/init.h> /* module_init */
#include <linux/inotify.h>
#include <linux/kernel.h> /* roundup() */
#include <linux/namei.h> /* LOOKUP_FOLLOW */
#include <linux/sched.h> /* struct user */
#include <linux/slab.h> /* struct kmem_cache */
#include <linux/syscalls.h>
#include <linux/types.h>
#include <linux/anon_inodes.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/wait.h>
#include "inotify.h"
#include <asm/ioctls.h>
/* these are configurable via /proc/sys/fs/inotify/ */
static int inotify_max_user_instances __read_mostly;
static int inotify_max_queued_events __read_mostly;
static int inotify_max_user_watches __read_mostly;
static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
struct kmem_cache *event_priv_cachep __read_mostly;
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
static int zero;
ctl_table inotify_table[] = {
{
.procname = "max_user_instances",
.data = &inotify_max_user_instances,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
{
.procname = "max_user_watches",
.data = &inotify_max_user_watches,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
{
.procname = "max_queued_events",
.data = &inotify_max_queued_events,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero
},
{ }
};
#endif /* CONFIG_SYSCTL */
static inline __u32 inotify_arg_to_mask(u32 arg)
{
__u32 mask;
/*
* everything should accept their own ignored, cares about children,
* and should receive events when the inode is unmounted
*/
mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
/* mask off the flags used to open the fd */
mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
return mask;
}
static inline u32 inotify_mask_to_arg(__u32 mask)
{
return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
IN_Q_OVERFLOW);
}
/* intofiy userspace file descriptor functions */
static unsigned int inotify_poll(struct file *file, poll_table *wait)
{
struct fsnotify_group *group = file->private_data;
int ret = 0;
poll_wait(file, &group->notification_waitq, wait);
mutex_lock(&group->notification_mutex);
if (!fsnotify_notify_queue_is_empty(group))
ret = POLLIN | POLLRDNORM;
mutex_unlock(&group->notification_mutex);
return ret;
}
/*
* Get an inotify_kernel_event if one exists and is small
* enough to fit in "count". Return an error pointer if
* not large enough.
*
* Called with the group->notification_mutex held.
*/
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
size_t count)
{
size_t event_size = sizeof(struct inotify_event);
struct fsnotify_event *event;
if (fsnotify_notify_queue_is_empty(group))
return NULL;
event = fsnotify_peek_notify_event(group);
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
if (event->name_len)
event_size += roundup(event->name_len + 1, event_size);
if (event_size > count)
return ERR_PTR(-EINVAL);
/* held the notification_mutex the whole time, so this is the
* same event we peeked above */
fsnotify_remove_notify_event(group);
return event;
}
/*
* Copy an event to user space, returning how much we copied.
*
* We already checked that the event size is smaller than the
* buffer we had in "get_one_event()" above.
*/
static ssize_t copy_event_to_user(struct fsnotify_group *group,
struct fsnotify_event *event,
char __user *buf)
{
struct inotify_event inotify_event;
struct fsnotify_event_private_data *fsn_priv;
struct inotify_event_private_data *priv;
size_t event_size = sizeof(struct inotify_event);
size_t name_len = 0;
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
/* we get the inotify watch descriptor from the event private data */
spin_lock(&event->lock);
fsn_priv = fsnotify_remove_priv_from_event(group, event);
spin_unlock(&event->lock);
if (!fsn_priv)
inotify_event.wd = -1;
else {
priv = container_of(fsn_priv, struct inotify_event_private_data,
fsnotify_event_priv_data);
inotify_event.wd = priv->wd;
inotify_free_event_priv(fsn_priv);
}
/*
* round up event->name_len so it is a multiple of event_size
* plus an extra byte for the terminating '\0'.
*/
if (event->name_len)
name_len = roundup(event->name_len + 1, event_size);
inotify_event.len = name_len;
inotify_event.mask = inotify_mask_to_arg(event->mask);
inotify_event.cookie = event->sync_cookie;
/* send the main event */
if (copy_to_user(buf, &inotify_event, event_size))
return -EFAULT;
buf += event_size;
/*
* fsnotify only stores the pathname, so here we have to send the pathname
* and then pad that pathname out to a multiple of sizeof(inotify_event)
* with zeros. I get my zeros from the nul_inotify_event.
*/
if (name_len) {
unsigned int len_to_zero = name_len - event->name_len;
/* copy the path name */
if (copy_to_user(buf, event->file_name, event->name_len))
return -EFAULT;
buf += event->name_len;
/* fill userspace with 0's */
if (clear_user(buf, len_to_zero))
return -EFAULT;
buf += len_to_zero;
event_size += name_len;
}
return event_size;
}
static ssize_t inotify_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct fsnotify_group *group;
struct fsnotify_event *kevent;
char __user *start;
int ret;
DEFINE_WAIT(wait);
start = buf;
group = file->private_data;
while (1) {
prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&group->notification_mutex);
kevent = get_one_event(group, count);
mutex_unlock(&group->notification_mutex);
pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
if (kevent) {
ret = PTR_ERR(kevent);
if (IS_ERR(kevent))
break;
ret = copy_event_to_user(group, kevent, buf);
fsnotify_put_event(kevent);
if (ret < 0)
break;
buf += ret;
count -= ret;
continue;
}
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
break;
ret = -EINTR;
if (signal_pending(current))
break;
if (start != buf)
break;
schedule();
}
finish_wait(&group->notification_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
return ret;
}
static int inotify_fasync(int fd, struct file *file, int on)
{
struct fsnotify_group *group = file->private_data;
return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
}
static int inotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
struct user_struct *user = group->inotify_data.user;
pr_debug("%s: group=%p\n", __func__, group);
fsnotify_clear_marks_by_group(group);
/* free this group, matching get was inotify_init->fsnotify_obtain_group */
fsnotify_put_group(group);
atomic_dec(&user->inotify_devs);
return 0;
}
static long inotify_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct fsnotify_group *group;
struct fsnotify_event_holder *holder;
struct fsnotify_event *event;
void __user *p;
int ret = -ENOTTY;
size_t send_len = 0;
group = file->private_data;
p = (void __user *) arg;
pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
switch (cmd) {
case FIONREAD:
mutex_lock(&group->notification_mutex);
list_for_each_entry(holder, &group->notification_list, event_list) {
event = holder->event;
send_len += sizeof(struct inotify_event);
if (event->name_len)
send_len += roundup(event->name_len + 1,
sizeof(struct inotify_event));
}
mutex_unlock(&group->notification_mutex);
ret = put_user(send_len, (int __user *) p);
break;
}
return ret;
}
static const struct file_operations inotify_fops = {
.poll = inotify_poll,
.read = inotify_read,
.fasync = inotify_fasync,
.release = inotify_release,
.unlocked_ioctl = inotify_ioctl,
.compat_ioctl = inotify_ioctl,
.llseek = noop_llseek,
};
/*
* find_inode - resolve a user-given path to a specific inode
*/
static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
{
int error;
error = user_path_at(AT_FDCWD, dirname, flags, path);
if (error)
return error;
/* you can only watch an inode if you have read permissions on it */
error = inode_permission(path->dentry->d_inode, MAY_READ);
if (error)
path_put(path);
return error;
}
static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
int *last_wd,
struct inotify_inode_mark *i_mark)
{
int ret;
do {
if (unlikely(!idr_pre_get(idr, GFP_KERNEL)))
return -ENOMEM;
spin_lock(idr_lock);
ret = idr_get_new_above(idr, i_mark, *last_wd + 1,
&i_mark->wd);
/* we added the mark to the idr, take a reference */
if (!ret) {
*last_wd = i_mark->wd;
fsnotify_get_mark(&i_mark->fsn_mark);
}
spin_unlock(idr_lock);
} while (ret == -EAGAIN);
return ret;
}
static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
int wd)
{
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
struct inotify_inode_mark *i_mark;
assert_spin_locked(idr_lock);
i_mark = idr_find(idr, wd);
if (i_mark) {
struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
fsnotify_get_mark(fsn_mark);
/* One ref for being in the idr, one ref we just took */
BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
}
return i_mark;
}
static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
int wd)
{
struct inotify_inode_mark *i_mark;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
spin_lock(idr_lock);
i_mark = inotify_idr_find_locked(group, wd);
spin_unlock(idr_lock);
return i_mark;
}
static void do_inotify_remove_from_idr(struct fsnotify_group *group,
struct inotify_inode_mark *i_mark)
{
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
int wd = i_mark->wd;
assert_spin_locked(idr_lock);
idr_remove(idr, wd);
/* removed from the idr, drop that ref */
fsnotify_put_mark(&i_mark->fsn_mark);
}
/*
* Remove the mark from the idr (if present) and drop the reference
* on the mark because it was in the idr.
*/
static void inotify_remove_from_idr(struct fsnotify_group *group,
struct inotify_inode_mark *i_mark)
{
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
struct inotify_inode_mark *found_i_mark = NULL;
int wd;
spin_lock(idr_lock);
wd = i_mark->wd;
/*
* does this i_mark think it is in the idr? we shouldn't get called
* if it wasn't....
*/
if (wd == -1) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out;
}
/* Lets look in the idr to see if we find it */
found_i_mark = inotify_idr_find_locked(group, wd);
if (unlikely(!found_i_mark)) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out;
}
/*
* We found an mark in the idr at the right wd, but it's
* not the mark we were told to remove. eparis seriously
* fucked up somewhere.
*/
if (unlikely(found_i_mark != i_mark)) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
"mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
"found_i_mark->group=%p found_i_mark->inode=%p\n",
__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group,
i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd,
found_i_mark->fsn_mark.group,
found_i_mark->fsn_mark.i.inode);
goto out;
}
/*
* One ref for being in the idr
* one ref held by the caller trying to kill us
* one ref grabbed by inotify_idr_find
*/
if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
/* we can't really recover with bad ref cnting.. */
BUG();
}
do_inotify_remove_from_idr(group, i_mark);
out:
/* match the ref taken by inotify_idr_find_locked() */
if (found_i_mark)
fsnotify_put_mark(&found_i_mark->fsn_mark);
i_mark->wd = -1;
spin_unlock(idr_lock);
}
/*
* Send IN_IGNORED for this wd, remove this wd from the idr.
*/
void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_group *group)
{
struct inotify_inode_mark *i_mark;
struct fsnotify_event *ignored_event, *notify_event;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
int ret;
ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
FSNOTIFY_EVENT_NONE, NULL, 0,
GFP_NOFS);
if (!ignored_event)
return;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
if (unlikely(!event_priv))
goto skip_send_ignore;
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsn_event_priv->group = group;
event_priv->wd = i_mark->wd;
notify_event = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
if (notify_event) {
if (IS_ERR(notify_event))
ret = PTR_ERR(notify_event);
else
fsnotify_put_event(notify_event);
inotify_free_event_priv(fsn_event_priv);
}
skip_send_ignore:
/* matches the reference taken when the event was created */
fsnotify_put_event(ignored_event);
/* remove this mark from the idr */
inotify_remove_from_idr(group, i_mark);
atomic_dec(&group->inotify_data.user->inotify_watches);
}
/* ding dong the mark is dead */
static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
{
struct inotify_inode_mark *i_mark;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
kmem_cache_free(inotify_inode_mark_cachep, i_mark);
}
static int inotify_update_existing_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
struct fsnotify_mark *fsn_mark;
struct inotify_inode_mark *i_mark;
__u32 old_mask, new_mask;
__u32 mask;
int add = (arg & IN_MASK_ADD);
int ret;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
if (unlikely(!(mask & IN_ALL_EVENTS)))
return -EINVAL;
fsn_mark = fsnotify_find_inode_mark(group, inode);
if (!fsn_mark)
return -ENOENT;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
spin_lock(&fsn_mark->lock);
old_mask = fsn_mark->mask;
if (add)
fsnotify_set_mark_mask_locked(fsn_mark, (fsn_mark->mask | mask));
else
fsnotify_set_mark_mask_locked(fsn_mark, mask);
new_mask = fsn_mark->mask;
spin_unlock(&fsn_mark->lock);
if (old_mask != new_mask) {
/* more bits in old than in new? */
int dropped = (old_mask & ~new_mask);
/* more bits in this fsn_mark than the inode's mask? */
int do_inode = (new_mask & ~inode->i_fsnotify_mask);
/* update the inode with this new fsn_mark */
if (dropped || do_inode)
fsnotify_recalc_inode_mask(inode);
}
/* return the wd */
ret = i_mark->wd;
/* match the get from fsnotify_find_mark() */
fsnotify_put_mark(fsn_mark);
return ret;
}
static int inotify_new_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
struct inotify_inode_mark *tmp_i_mark;
__u32 mask;
int ret;
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
if (unlikely(!(mask & IN_ALL_EVENTS)))
return -EINVAL;
tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
if (unlikely(!tmp_i_mark))
return -ENOMEM;
fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark);
tmp_i_mark->fsn_mark.mask = mask;
tmp_i_mark->wd = -1;
ret = -ENOSPC;
if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
goto out_err;
ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd,
tmp_i_mark);
if (ret)
goto out_err;
/* we are on the idr, now get on the inode */
ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, NULL, 0);
if (ret) {
/* we failed to get on the inode, get off the idr */
inotify_remove_from_idr(group, tmp_i_mark);
goto out_err;
}
/* increment the number of watches the user has */
atomic_inc(&group->inotify_data.user->inotify_watches);
/* return the watch descriptor for this new mark */
ret = tmp_i_mark->wd;
out_err:
/* match the ref from fsnotify_init_mark() */
fsnotify_put_mark(&tmp_i_mark->fsn_mark);
return ret;
}
static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
{
int ret = 0;
retry:
/* try to update and existing watch with the new arg */
ret = inotify_update_existing_watch(group, inode, arg);
/* no mark present, try to add a new one */
if (ret == -ENOENT)
ret = inotify_new_watch(group, inode, arg);
/*
* inotify_new_watch could race with another thread which did an
* inotify_new_watch between the update_existing and the add watch
* here, go back and try to update an existing mark again.
*/
if (ret == -EEXIST)
goto retry;
return ret;
}
static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
{
struct fsnotify_group *group;
group = fsnotify_alloc_group(&inotify_fsnotify_ops);
if (IS_ERR(group))
return group;
group->max_events = max_events;
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
group->inotify_data.last_wd = 0;
group->inotify_data.user = user;
group->inotify_data.fa = NULL;
return group;
}
/* inotify syscalls */
SYSCALL_DEFINE1(inotify_init1, int, flags)
{
struct fsnotify_group *group;
struct user_struct *user;
int ret;
/* Check the IN_* constants for consistency. */
BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
return -EINVAL;
user = get_current_user();
if (unlikely(atomic_read(&user->inotify_devs) >=
inotify_max_user_instances)) {
ret = -EMFILE;
goto out_free_uid;
}
/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
group = inotify_new_group(user, inotify_max_queued_events);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
goto out_free_uid;
}
atomic_inc(&user->inotify_devs);
ret = anon_inode_getfd("inotify", &inotify_fops, group,
O_RDONLY | flags);
if (ret >= 0)
return ret;
fsnotify_put_group(group);
atomic_dec(&user->inotify_devs);
out_free_uid:
free_uid(user);
return ret;
}
SYSCALL_DEFINE0(inotify_init)
{
return sys_inotify_init1(0);
}
SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
u32, mask)
{
struct fsnotify_group *group;
struct inode *inode;
struct path path;
struct file *filp;
int ret, fput_needed;
unsigned flags = 0;
filp = fget_light(fd, &fput_needed);
if (unlikely(!filp))
return -EBADF;
/* verify that this is indeed an inotify instance */
if (unlikely(filp->f_op != &inotify_fops)) {
ret = -EINVAL;
goto fput_and_out;
}
if (!(mask & IN_DONT_FOLLOW))
flags |= LOOKUP_FOLLOW;
if (mask & IN_ONLYDIR)
flags |= LOOKUP_DIRECTORY;
ret = inotify_find_inode(pathname, &path, flags);
if (ret)
goto fput_and_out;
/* inode held in place by reference to path; group by fget on fd */
inode = path.dentry->d_inode;
group = filp->private_data;
/* create/update an inode mark */
ret = inotify_update_watch(group, inode, mask);
path_put(&path);
fput_and_out:
fput_light(filp, fput_needed);
return ret;
}
SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
{
struct fsnotify_group *group;
struct inotify_inode_mark *i_mark;
struct file *filp;
int ret = 0, fput_needed;
filp = fget_light(fd, &fput_needed);
if (unlikely(!filp))
return -EBADF;
/* verify that this is indeed an inotify instance */
ret = -EINVAL;
if (unlikely(filp->f_op != &inotify_fops))
goto out;
group = filp->private_data;
ret = -EINVAL;
i_mark = inotify_idr_find(group, wd);
if (unlikely(!i_mark))
goto out;
ret = 0;
fsnotify_destroy_mark(&i_mark->fsn_mark);
/* match ref taken by inotify_idr_find */
fsnotify_put_mark(&i_mark->fsn_mark);
out:
fput_light(filp, fput_needed);
return ret;
}
/*
* inotify_user_setup - Our initialization function. Note that we cannnot return
* error because we have compiled-in VFS hooks. So an (unlikely) failure here
* must result in panic().
*/
static int __init inotify_user_setup(void)
{
BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
BUILD_BUG_ON(IN_OPEN != FS_OPEN);
BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
BUILD_BUG_ON(IN_CREATE != FS_CREATE);
BUILD_BUG_ON(IN_DELETE != FS_DELETE);
BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);
inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
inotify_max_queued_events = 16384;
inotify_max_user_instances = 128;
inotify_max_user_watches = 8192;
return 0;
}
module_init(inotify_user_setup);
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_4715_0 |
crossvul-cpp_data_bad_2262_0 | /*
* linux/fs/isofs/inode.c
*
* (C) 1991 Linus Torvalds - minix filesystem
* 1992, 1993, 1994 Eric Youngdale Modified for ISO 9660 filesystem.
* 1994 Eberhard Mönkeberg - multi session handling.
* 1995 Mark Dobie - allow mounting of some weird VideoCDs and PhotoCDs.
* 1997 Gordon Chaffee - Joliet CDs
* 1998 Eric Lammerts - ISO 9660 Level 3
* 2004 Paul Serice - Inode Support pushed out from 4GB to 128GB
* 2004 Paul Serice - NFS Export Operations
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/nls.h>
#include <linux/ctype.h>
#include <linux/statfs.h>
#include <linux/cdrom.h>
#include <linux/parser.h>
#include <linux/mpage.h>
#include <linux/user_namespace.h>
#include "isofs.h"
#include "zisofs.h"
#define BEQUIET
static int isofs_hashi(const struct dentry *parent, struct qstr *qstr);
static int isofs_hash(const struct dentry *parent, struct qstr *qstr);
static int isofs_dentry_cmpi(const struct dentry *parent,
const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
static int isofs_dentry_cmp(const struct dentry *parent,
const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
#ifdef CONFIG_JOLIET
static int isofs_hashi_ms(const struct dentry *parent, struct qstr *qstr);
static int isofs_hash_ms(const struct dentry *parent, struct qstr *qstr);
static int isofs_dentry_cmpi_ms(const struct dentry *parent,
const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
static int isofs_dentry_cmp_ms(const struct dentry *parent,
const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name);
#endif
static void isofs_put_super(struct super_block *sb)
{
struct isofs_sb_info *sbi = ISOFS_SB(sb);
#ifdef CONFIG_JOLIET
unload_nls(sbi->s_nls_iocharset);
#endif
kfree(sbi);
sb->s_fs_info = NULL;
return;
}
static int isofs_read_inode(struct inode *);
static int isofs_statfs (struct dentry *, struct kstatfs *);
static struct kmem_cache *isofs_inode_cachep;
static struct inode *isofs_alloc_inode(struct super_block *sb)
{
struct iso_inode_info *ei;
ei = kmem_cache_alloc(isofs_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
return &ei->vfs_inode;
}
static void isofs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(isofs_inode_cachep, ISOFS_I(inode));
}
static void isofs_destroy_inode(struct inode *inode)
{
call_rcu(&inode->i_rcu, isofs_i_callback);
}
static void init_once(void *foo)
{
struct iso_inode_info *ei = foo;
inode_init_once(&ei->vfs_inode);
}
static int __init init_inodecache(void)
{
isofs_inode_cachep = kmem_cache_create("isofs_inode_cache",
sizeof(struct iso_inode_info),
0, (SLAB_RECLAIM_ACCOUNT|
SLAB_MEM_SPREAD),
init_once);
if (isofs_inode_cachep == NULL)
return -ENOMEM;
return 0;
}
static void destroy_inodecache(void)
{
/*
* Make sure all delayed rcu free inodes are flushed before we
* destroy cache.
*/
rcu_barrier();
kmem_cache_destroy(isofs_inode_cachep);
}
static int isofs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
if (!(*flags & MS_RDONLY))
return -EROFS;
return 0;
}
static const struct super_operations isofs_sops = {
.alloc_inode = isofs_alloc_inode,
.destroy_inode = isofs_destroy_inode,
.put_super = isofs_put_super,
.statfs = isofs_statfs,
.remount_fs = isofs_remount,
.show_options = generic_show_options,
};
static const struct dentry_operations isofs_dentry_ops[] = {
{
.d_hash = isofs_hash,
.d_compare = isofs_dentry_cmp,
},
{
.d_hash = isofs_hashi,
.d_compare = isofs_dentry_cmpi,
},
#ifdef CONFIG_JOLIET
{
.d_hash = isofs_hash_ms,
.d_compare = isofs_dentry_cmp_ms,
},
{
.d_hash = isofs_hashi_ms,
.d_compare = isofs_dentry_cmpi_ms,
},
#endif
};
struct iso9660_options{
unsigned int rock:1;
unsigned int joliet:1;
unsigned int cruft:1;
unsigned int hide:1;
unsigned int showassoc:1;
unsigned int nocompress:1;
unsigned int overriderockperm:1;
unsigned int uid_set:1;
unsigned int gid_set:1;
unsigned int utf8:1;
unsigned char map;
unsigned char check;
unsigned int blocksize;
umode_t fmode;
umode_t dmode;
kgid_t gid;
kuid_t uid;
char *iocharset;
/* LVE */
s32 session;
s32 sbsector;
};
/*
* Compute the hash for the isofs name corresponding to the dentry.
*/
static int
isofs_hash_common(struct qstr *qstr, int ms)
{
const char *name;
int len;
len = qstr->len;
name = qstr->name;
if (ms) {
while (len && name[len-1] == '.')
len--;
}
qstr->hash = full_name_hash(name, len);
return 0;
}
/*
* Compute the hash for the isofs name corresponding to the dentry.
*/
static int
isofs_hashi_common(struct qstr *qstr, int ms)
{
const char *name;
int len;
char c;
unsigned long hash;
len = qstr->len;
name = qstr->name;
if (ms) {
while (len && name[len-1] == '.')
len--;
}
hash = init_name_hash();
while (len--) {
c = tolower(*name++);
hash = partial_name_hash(c, hash);
}
qstr->hash = end_name_hash(hash);
return 0;
}
/*
* Compare of two isofs names.
*/
static int isofs_dentry_cmp_common(
unsigned int len, const char *str,
const struct qstr *name, int ms, int ci)
{
int alen, blen;
/* A filename cannot end in '.' or we treat it like it has none */
alen = name->len;
blen = len;
if (ms) {
while (alen && name->name[alen-1] == '.')
alen--;
while (blen && str[blen-1] == '.')
blen--;
}
if (alen == blen) {
if (ci) {
if (strnicmp(name->name, str, alen) == 0)
return 0;
} else {
if (strncmp(name->name, str, alen) == 0)
return 0;
}
}
return 1;
}
static int
isofs_hash(const struct dentry *dentry, struct qstr *qstr)
{
return isofs_hash_common(qstr, 0);
}
static int
isofs_hashi(const struct dentry *dentry, struct qstr *qstr)
{
return isofs_hashi_common(qstr, 0);
}
static int
isofs_dentry_cmp(const struct dentry *parent, const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return isofs_dentry_cmp_common(len, str, name, 0, 0);
}
static int
isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return isofs_dentry_cmp_common(len, str, name, 0, 1);
}
#ifdef CONFIG_JOLIET
static int
isofs_hash_ms(const struct dentry *dentry, struct qstr *qstr)
{
return isofs_hash_common(qstr, 1);
}
static int
isofs_hashi_ms(const struct dentry *dentry, struct qstr *qstr)
{
return isofs_hashi_common(qstr, 1);
}
static int
isofs_dentry_cmp_ms(const struct dentry *parent, const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return isofs_dentry_cmp_common(len, str, name, 1, 0);
}
static int
isofs_dentry_cmpi_ms(const struct dentry *parent, const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
return isofs_dentry_cmp_common(len, str, name, 1, 1);
}
#endif
enum {
Opt_block, Opt_check_r, Opt_check_s, Opt_cruft, Opt_gid, Opt_ignore,
Opt_iocharset, Opt_map_a, Opt_map_n, Opt_map_o, Opt_mode, Opt_nojoliet,
Opt_norock, Opt_sb, Opt_session, Opt_uid, Opt_unhide, Opt_utf8, Opt_err,
Opt_nocompress, Opt_hide, Opt_showassoc, Opt_dmode, Opt_overriderockperm,
};
static const match_table_t tokens = {
{Opt_norock, "norock"},
{Opt_nojoliet, "nojoliet"},
{Opt_unhide, "unhide"},
{Opt_hide, "hide"},
{Opt_showassoc, "showassoc"},
{Opt_cruft, "cruft"},
{Opt_utf8, "utf8"},
{Opt_iocharset, "iocharset=%s"},
{Opt_map_a, "map=acorn"},
{Opt_map_a, "map=a"},
{Opt_map_n, "map=normal"},
{Opt_map_n, "map=n"},
{Opt_map_o, "map=off"},
{Opt_map_o, "map=o"},
{Opt_session, "session=%u"},
{Opt_sb, "sbsector=%u"},
{Opt_check_r, "check=relaxed"},
{Opt_check_r, "check=r"},
{Opt_check_s, "check=strict"},
{Opt_check_s, "check=s"},
{Opt_uid, "uid=%u"},
{Opt_gid, "gid=%u"},
{Opt_mode, "mode=%u"},
{Opt_dmode, "dmode=%u"},
{Opt_overriderockperm, "overriderockperm"},
{Opt_block, "block=%u"},
{Opt_ignore, "conv=binary"},
{Opt_ignore, "conv=b"},
{Opt_ignore, "conv=text"},
{Opt_ignore, "conv=t"},
{Opt_ignore, "conv=mtext"},
{Opt_ignore, "conv=m"},
{Opt_ignore, "conv=auto"},
{Opt_ignore, "conv=a"},
{Opt_nocompress, "nocompress"},
{Opt_err, NULL}
};
static int parse_options(char *options, struct iso9660_options *popt)
{
char *p;
int option;
popt->map = 'n';
popt->rock = 1;
popt->joliet = 1;
popt->cruft = 0;
popt->hide = 0;
popt->showassoc = 0;
popt->check = 'u'; /* unset */
popt->nocompress = 0;
popt->blocksize = 1024;
popt->fmode = popt->dmode = ISOFS_INVALID_MODE;
popt->uid_set = 0;
popt->gid_set = 0;
popt->gid = GLOBAL_ROOT_GID;
popt->uid = GLOBAL_ROOT_UID;
popt->iocharset = NULL;
popt->utf8 = 0;
popt->overriderockperm = 0;
popt->session=-1;
popt->sbsector=-1;
if (!options)
return 1;
while ((p = strsep(&options, ",")) != NULL) {
int token;
substring_t args[MAX_OPT_ARGS];
unsigned n;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_norock:
popt->rock = 0;
break;
case Opt_nojoliet:
popt->joliet = 0;
break;
case Opt_hide:
popt->hide = 1;
break;
case Opt_unhide:
case Opt_showassoc:
popt->showassoc = 1;
break;
case Opt_cruft:
popt->cruft = 1;
break;
case Opt_utf8:
popt->utf8 = 1;
break;
#ifdef CONFIG_JOLIET
case Opt_iocharset:
popt->iocharset = match_strdup(&args[0]);
break;
#endif
case Opt_map_a:
popt->map = 'a';
break;
case Opt_map_o:
popt->map = 'o';
break;
case Opt_map_n:
popt->map = 'n';
break;
case Opt_session:
if (match_int(&args[0], &option))
return 0;
n = option;
if (n > 99)
return 0;
popt->session = n + 1;
break;
case Opt_sb:
if (match_int(&args[0], &option))
return 0;
popt->sbsector = option;
break;
case Opt_check_r:
popt->check = 'r';
break;
case Opt_check_s:
popt->check = 's';
break;
case Opt_ignore:
break;
case Opt_uid:
if (match_int(&args[0], &option))
return 0;
popt->uid = make_kuid(current_user_ns(), option);
if (!uid_valid(popt->uid))
return 0;
popt->uid_set = 1;
break;
case Opt_gid:
if (match_int(&args[0], &option))
return 0;
popt->gid = make_kgid(current_user_ns(), option);
if (!gid_valid(popt->gid))
return 0;
popt->gid_set = 1;
break;
case Opt_mode:
if (match_int(&args[0], &option))
return 0;
popt->fmode = option;
break;
case Opt_dmode:
if (match_int(&args[0], &option))
return 0;
popt->dmode = option;
break;
case Opt_overriderockperm:
popt->overriderockperm = 1;
break;
case Opt_block:
if (match_int(&args[0], &option))
return 0;
n = option;
if (n != 512 && n != 1024 && n != 2048)
return 0;
popt->blocksize = n;
break;
case Opt_nocompress:
popt->nocompress = 1;
break;
default:
return 0;
}
}
return 1;
}
/*
* look if the driver can tell the multi session redirection value
*
* don't change this if you don't know what you do, please!
* Multisession is legal only with XA disks.
* A non-XA disk with more than one volume descriptor may do it right, but
* usually is written in a nowhere standardized "multi-partition" manner.
* Multisession uses absolute addressing (solely the first frame of the whole
* track is #0), multi-partition uses relative addressing (each first frame of
* each track is #0), and a track is not a session.
*
* A broken CDwriter software or drive firmware does not set new standards,
* at least not if conflicting with the existing ones.
*
* emoenke@gwdg.de
*/
#define WE_OBEY_THE_WRITTEN_STANDARDS 1
static unsigned int isofs_get_last_session(struct super_block *sb, s32 session)
{
struct cdrom_multisession ms_info;
unsigned int vol_desc_start;
struct block_device *bdev = sb->s_bdev;
int i;
vol_desc_start=0;
ms_info.addr_format=CDROM_LBA;
if(session >= 0 && session <= 99) {
struct cdrom_tocentry Te;
Te.cdte_track=session;
Te.cdte_format=CDROM_LBA;
i = ioctl_by_bdev(bdev, CDROMREADTOCENTRY, (unsigned long) &Te);
if (!i) {
printk(KERN_DEBUG "ISOFS: Session %d start %d type %d\n",
session, Te.cdte_addr.lba,
Te.cdte_ctrl&CDROM_DATA_TRACK);
if ((Te.cdte_ctrl&CDROM_DATA_TRACK) == 4)
return Te.cdte_addr.lba;
}
printk(KERN_ERR "ISOFS: Invalid session number or type of track\n");
}
i = ioctl_by_bdev(bdev, CDROMMULTISESSION, (unsigned long) &ms_info);
if (session > 0)
printk(KERN_ERR "ISOFS: Invalid session number\n");
#if 0
printk(KERN_DEBUG "isofs.inode: CDROMMULTISESSION: rc=%d\n",i);
if (i==0) {
printk(KERN_DEBUG "isofs.inode: XA disk: %s\n",ms_info.xa_flag?"yes":"no");
printk(KERN_DEBUG "isofs.inode: vol_desc_start = %d\n", ms_info.addr.lba);
}
#endif
if (i==0)
#if WE_OBEY_THE_WRITTEN_STANDARDS
if (ms_info.xa_flag) /* necessary for a valid ms_info.addr */
#endif
vol_desc_start=ms_info.addr.lba;
return vol_desc_start;
}
/*
* Check if root directory is empty (has less than 3 files).
*
* Used to detect broken CDs where ISO root directory is empty but Joliet root
* directory is OK. If such CD has Rock Ridge extensions, they will be disabled
* (and Joliet used instead) or else no files would be visible.
*/
static bool rootdir_empty(struct super_block *sb, unsigned long block)
{
int offset = 0, files = 0, de_len;
struct iso_directory_record *de;
struct buffer_head *bh;
bh = sb_bread(sb, block);
if (!bh)
return true;
while (files < 3) {
de = (struct iso_directory_record *) (bh->b_data + offset);
de_len = *(unsigned char *) de;
if (de_len == 0)
break;
files++;
offset += de_len;
}
brelse(bh);
return files < 3;
}
/*
* Initialize the superblock and read the root inode.
*
* Note: a check_disk_change() has been done immediately prior
* to this call, so we don't need to check again.
*/
static int isofs_fill_super(struct super_block *s, void *data, int silent)
{
struct buffer_head *bh = NULL, *pri_bh = NULL;
struct hs_primary_descriptor *h_pri = NULL;
struct iso_primary_descriptor *pri = NULL;
struct iso_supplementary_descriptor *sec = NULL;
struct iso_directory_record *rootp;
struct inode *inode;
struct iso9660_options opt;
struct isofs_sb_info *sbi;
unsigned long first_data_zone;
int joliet_level = 0;
int iso_blknum, block;
int orig_zonesize;
int table, error = -EINVAL;
unsigned int vol_desc_start;
save_mount_options(s, data);
sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
s->s_fs_info = sbi;
if (!parse_options((char *)data, &opt))
goto out_freesbi;
/*
* First of all, get the hardware blocksize for this device.
* If we don't know what it is, or the hardware blocksize is
* larger than the blocksize the user specified, then use
* that value.
*/
/*
* What if bugger tells us to go beyond page size?
*/
opt.blocksize = sb_min_blocksize(s, opt.blocksize);
sbi->s_high_sierra = 0; /* default is iso9660 */
vol_desc_start = (opt.sbsector != -1) ?
opt.sbsector : isofs_get_last_session(s,opt.session);
for (iso_blknum = vol_desc_start+16;
iso_blknum < vol_desc_start+100; iso_blknum++) {
struct hs_volume_descriptor *hdp;
struct iso_volume_descriptor *vdp;
block = iso_blknum << (ISOFS_BLOCK_BITS - s->s_blocksize_bits);
if (!(bh = sb_bread(s, block)))
goto out_no_read;
vdp = (struct iso_volume_descriptor *)bh->b_data;
hdp = (struct hs_volume_descriptor *)bh->b_data;
/*
* Due to the overlapping physical location of the descriptors,
* ISO CDs can match hdp->id==HS_STANDARD_ID as well. To ensure
* proper identification in this case, we first check for ISO.
*/
if (strncmp (vdp->id, ISO_STANDARD_ID, sizeof vdp->id) == 0) {
if (isonum_711(vdp->type) == ISO_VD_END)
break;
if (isonum_711(vdp->type) == ISO_VD_PRIMARY) {
if (pri == NULL) {
pri = (struct iso_primary_descriptor *)vdp;
/* Save the buffer in case we need it ... */
pri_bh = bh;
bh = NULL;
}
}
#ifdef CONFIG_JOLIET
else if (isonum_711(vdp->type) == ISO_VD_SUPPLEMENTARY) {
sec = (struct iso_supplementary_descriptor *)vdp;
if (sec->escape[0] == 0x25 && sec->escape[1] == 0x2f) {
if (opt.joliet) {
if (sec->escape[2] == 0x40)
joliet_level = 1;
else if (sec->escape[2] == 0x43)
joliet_level = 2;
else if (sec->escape[2] == 0x45)
joliet_level = 3;
printk(KERN_DEBUG "ISO 9660 Extensions: "
"Microsoft Joliet Level %d\n",
joliet_level);
}
goto root_found;
} else {
/* Unknown supplementary volume descriptor */
sec = NULL;
}
}
#endif
} else {
if (strncmp (hdp->id, HS_STANDARD_ID, sizeof hdp->id) == 0) {
if (isonum_711(hdp->type) != ISO_VD_PRIMARY)
goto out_freebh;
sbi->s_high_sierra = 1;
opt.rock = 0;
h_pri = (struct hs_primary_descriptor *)vdp;
goto root_found;
}
}
/* Just skip any volume descriptors we don't recognize */
brelse(bh);
bh = NULL;
}
/*
* If we fall through, either no volume descriptor was found,
* or else we passed a primary descriptor looking for others.
*/
if (!pri)
goto out_unknown_format;
brelse(bh);
bh = pri_bh;
pri_bh = NULL;
root_found:
if (joliet_level && (pri == NULL || !opt.rock)) {
/* This is the case of Joliet with the norock mount flag.
* A disc with both Joliet and Rock Ridge is handled later
*/
pri = (struct iso_primary_descriptor *) sec;
}
if(sbi->s_high_sierra){
rootp = (struct iso_directory_record *) h_pri->root_directory_record;
sbi->s_nzones = isonum_733(h_pri->volume_space_size);
sbi->s_log_zone_size = isonum_723(h_pri->logical_block_size);
sbi->s_max_size = isonum_733(h_pri->volume_space_size);
} else {
if (!pri)
goto out_freebh;
rootp = (struct iso_directory_record *) pri->root_directory_record;
sbi->s_nzones = isonum_733(pri->volume_space_size);
sbi->s_log_zone_size = isonum_723(pri->logical_block_size);
sbi->s_max_size = isonum_733(pri->volume_space_size);
}
sbi->s_ninodes = 0; /* No way to figure this out easily */
orig_zonesize = sbi->s_log_zone_size;
/*
* If the zone size is smaller than the hardware sector size,
* this is a fatal error. This would occur if the disc drive
* had sectors that were 2048 bytes, but the filesystem had
* blocks that were 512 bytes (which should only very rarely
* happen.)
*/
if (orig_zonesize < opt.blocksize)
goto out_bad_size;
/* RDE: convert log zone size to bit shift */
switch (sbi->s_log_zone_size) {
case 512: sbi->s_log_zone_size = 9; break;
case 1024: sbi->s_log_zone_size = 10; break;
case 2048: sbi->s_log_zone_size = 11; break;
default:
goto out_bad_zone_size;
}
s->s_magic = ISOFS_SUPER_MAGIC;
/*
* With multi-extent files, file size is only limited by the maximum
* size of a file system, which is 8 TB.
*/
s->s_maxbytes = 0x80000000000LL;
/* Set this for reference. Its not currently used except on write
which we don't have .. */
first_data_zone = isonum_733(rootp->extent) +
isonum_711(rootp->ext_attr_length);
sbi->s_firstdatazone = first_data_zone;
#ifndef BEQUIET
printk(KERN_DEBUG "ISOFS: Max size:%ld Log zone size:%ld\n",
sbi->s_max_size, 1UL << sbi->s_log_zone_size);
printk(KERN_DEBUG "ISOFS: First datazone:%ld\n", sbi->s_firstdatazone);
if(sbi->s_high_sierra)
printk(KERN_DEBUG "ISOFS: Disc in High Sierra format.\n");
#endif
/*
* If the Joliet level is set, we _may_ decide to use the
* secondary descriptor, but can't be sure until after we
* read the root inode. But before reading the root inode
* we may need to change the device blocksize, and would
* rather release the old buffer first. So, we cache the
* first_data_zone value from the secondary descriptor.
*/
if (joliet_level) {
pri = (struct iso_primary_descriptor *) sec;
rootp = (struct iso_directory_record *)
pri->root_directory_record;
first_data_zone = isonum_733(rootp->extent) +
isonum_711(rootp->ext_attr_length);
}
/*
* We're all done using the volume descriptor, and may need
* to change the device blocksize, so release the buffer now.
*/
brelse(pri_bh);
brelse(bh);
/*
* Force the blocksize to 512 for 512 byte sectors. The file
* read primitives really get it wrong in a bad way if we don't
* do this.
*
* Note - we should never be setting the blocksize to something
* less than the hardware sector size for the device. If we
* do, we would end up having to read larger buffers and split
* out portions to satisfy requests.
*
* Note2- the idea here is that we want to deal with the optimal
* zonesize in the filesystem. If we have it set to something less,
* then we have horrible problems with trying to piece together
* bits of adjacent blocks in order to properly read directory
* entries. By forcing the blocksize in this way, we ensure
* that we will never be required to do this.
*/
sb_set_blocksize(s, orig_zonesize);
sbi->s_nls_iocharset = NULL;
#ifdef CONFIG_JOLIET
if (joliet_level && opt.utf8 == 0) {
char *p = opt.iocharset ? opt.iocharset : CONFIG_NLS_DEFAULT;
sbi->s_nls_iocharset = load_nls(p);
if (! sbi->s_nls_iocharset) {
/* Fail only if explicit charset specified */
if (opt.iocharset)
goto out_freesbi;
sbi->s_nls_iocharset = load_nls_default();
}
}
#endif
s->s_op = &isofs_sops;
s->s_export_op = &isofs_export_ops;
sbi->s_mapping = opt.map;
sbi->s_rock = (opt.rock ? 2 : 0);
sbi->s_rock_offset = -1; /* initial offset, will guess until SP is found*/
sbi->s_cruft = opt.cruft;
sbi->s_hide = opt.hide;
sbi->s_showassoc = opt.showassoc;
sbi->s_uid = opt.uid;
sbi->s_gid = opt.gid;
sbi->s_uid_set = opt.uid_set;
sbi->s_gid_set = opt.gid_set;
sbi->s_utf8 = opt.utf8;
sbi->s_nocompress = opt.nocompress;
sbi->s_overriderockperm = opt.overriderockperm;
/*
* It would be incredibly stupid to allow people to mark every file
* on the disk as suid, so we merely allow them to set the default
* permissions.
*/
if (opt.fmode != ISOFS_INVALID_MODE)
sbi->s_fmode = opt.fmode & 0777;
else
sbi->s_fmode = ISOFS_INVALID_MODE;
if (opt.dmode != ISOFS_INVALID_MODE)
sbi->s_dmode = opt.dmode & 0777;
else
sbi->s_dmode = ISOFS_INVALID_MODE;
/*
* Read the root inode, which _may_ result in changing
* the s_rock flag. Once we have the final s_rock value,
* we then decide whether to use the Joliet descriptor.
*/
inode = isofs_iget(s, sbi->s_firstdatazone, 0);
if (IS_ERR(inode))
goto out_no_root;
/*
* Fix for broken CDs with Rock Ridge and empty ISO root directory but
* correct Joliet root directory.
*/
if (sbi->s_rock == 1 && joliet_level &&
rootdir_empty(s, sbi->s_firstdatazone)) {
printk(KERN_NOTICE
"ISOFS: primary root directory is empty. "
"Disabling Rock Ridge and switching to Joliet.");
sbi->s_rock = 0;
}
/*
* If this disk has both Rock Ridge and Joliet on it, then we
* want to use Rock Ridge by default. This can be overridden
* by using the norock mount option. There is still one other
* possibility that is not taken into account: a Rock Ridge
* CD with Unicode names. Until someone sees such a beast, it
* will not be supported.
*/
if (sbi->s_rock == 1) {
joliet_level = 0;
} else if (joliet_level) {
sbi->s_rock = 0;
if (sbi->s_firstdatazone != first_data_zone) {
sbi->s_firstdatazone = first_data_zone;
printk(KERN_DEBUG
"ISOFS: changing to secondary root\n");
iput(inode);
inode = isofs_iget(s, sbi->s_firstdatazone, 0);
if (IS_ERR(inode))
goto out_no_root;
}
}
if (opt.check == 'u') {
/* Only Joliet is case insensitive by default */
if (joliet_level)
opt.check = 'r';
else
opt.check = 's';
}
sbi->s_joliet_level = joliet_level;
/* Make sure the root inode is a directory */
if (!S_ISDIR(inode->i_mode)) {
printk(KERN_WARNING
"isofs_fill_super: root inode is not a directory. "
"Corrupted media?\n");
goto out_iput;
}
table = 0;
if (joliet_level)
table += 2;
if (opt.check == 'r')
table++;
s->s_d_op = &isofs_dentry_ops[table];
/* get the root dentry */
s->s_root = d_make_root(inode);
if (!(s->s_root)) {
error = -ENOMEM;
goto out_no_inode;
}
kfree(opt.iocharset);
return 0;
/*
* Display error messages and free resources.
*/
out_iput:
iput(inode);
goto out_no_inode;
out_no_root:
error = PTR_ERR(inode);
if (error != -ENOMEM)
printk(KERN_WARNING "%s: get root inode failed\n", __func__);
out_no_inode:
#ifdef CONFIG_JOLIET
unload_nls(sbi->s_nls_iocharset);
#endif
goto out_freesbi;
out_no_read:
printk(KERN_WARNING "%s: bread failed, dev=%s, iso_blknum=%d, block=%d\n",
__func__, s->s_id, iso_blknum, block);
goto out_freebh;
out_bad_zone_size:
printk(KERN_WARNING "ISOFS: Bad logical zone size %ld\n",
sbi->s_log_zone_size);
goto out_freebh;
out_bad_size:
printk(KERN_WARNING "ISOFS: Logical zone size(%d) < hardware blocksize(%u)\n",
orig_zonesize, opt.blocksize);
goto out_freebh;
out_unknown_format:
if (!silent)
printk(KERN_WARNING "ISOFS: Unable to identify CD-ROM format.\n");
out_freebh:
brelse(bh);
brelse(pri_bh);
out_freesbi:
kfree(opt.iocharset);
kfree(sbi);
s->s_fs_info = NULL;
return error;
}
static int isofs_statfs (struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
buf->f_type = ISOFS_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = (ISOFS_SB(sb)->s_nzones
<< (ISOFS_SB(sb)->s_log_zone_size - sb->s_blocksize_bits));
buf->f_bfree = 0;
buf->f_bavail = 0;
buf->f_files = ISOFS_SB(sb)->s_ninodes;
buf->f_ffree = 0;
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
buf->f_namelen = NAME_MAX;
return 0;
}
/*
* Get a set of blocks; filling in buffer_heads if already allocated
* or getblk() if they are not. Returns the number of blocks inserted
* (-ve == error.)
*/
int isofs_get_blocks(struct inode *inode, sector_t iblock,
struct buffer_head **bh, unsigned long nblocks)
{
unsigned long b_off = iblock;
unsigned offset, sect_size;
unsigned int firstext;
unsigned long nextblk, nextoff;
int section, rv, error;
struct iso_inode_info *ei = ISOFS_I(inode);
error = -EIO;
rv = 0;
if (iblock != b_off) {
printk(KERN_DEBUG "%s: block number too large\n", __func__);
goto abort;
}
offset = 0;
firstext = ei->i_first_extent;
sect_size = ei->i_section_size >> ISOFS_BUFFER_BITS(inode);
nextblk = ei->i_next_section_block;
nextoff = ei->i_next_section_offset;
section = 0;
while (nblocks) {
/* If we are *way* beyond the end of the file, print a message.
* Access beyond the end of the file up to the next page boundary
* is normal, however because of the way the page cache works.
* In this case, we just return 0 so that we can properly fill
* the page with useless information without generating any
* I/O errors.
*/
if (b_off > ((inode->i_size + PAGE_CACHE_SIZE - 1) >> ISOFS_BUFFER_BITS(inode))) {
printk(KERN_DEBUG "%s: block >= EOF (%lu, %llu)\n",
__func__, b_off,
(unsigned long long)inode->i_size);
goto abort;
}
/* On the last section, nextblk == 0, section size is likely to
* exceed sect_size by a partial block, and access beyond the
* end of the file will reach beyond the section size, too.
*/
while (nextblk && (b_off >= (offset + sect_size))) {
struct inode *ninode;
offset += sect_size;
ninode = isofs_iget(inode->i_sb, nextblk, nextoff);
if (IS_ERR(ninode)) {
error = PTR_ERR(ninode);
goto abort;
}
firstext = ISOFS_I(ninode)->i_first_extent;
sect_size = ISOFS_I(ninode)->i_section_size >> ISOFS_BUFFER_BITS(ninode);
nextblk = ISOFS_I(ninode)->i_next_section_block;
nextoff = ISOFS_I(ninode)->i_next_section_offset;
iput(ninode);
if (++section > 100) {
printk(KERN_DEBUG "%s: More than 100 file sections ?!?"
" aborting...\n", __func__);
printk(KERN_DEBUG "%s: block=%lu firstext=%u sect_size=%u "
"nextblk=%lu nextoff=%lu\n", __func__,
b_off, firstext, (unsigned) sect_size,
nextblk, nextoff);
goto abort;
}
}
if (*bh) {
map_bh(*bh, inode->i_sb, firstext + b_off - offset);
} else {
*bh = sb_getblk(inode->i_sb, firstext+b_off-offset);
if (!*bh)
goto abort;
}
bh++; /* Next buffer head */
b_off++; /* Next buffer offset */
nblocks--;
rv++;
}
error = 0;
abort:
return rv != 0 ? rv : error;
}
/*
* Used by the standard interfaces.
*/
static int isofs_get_block(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create)
{
int ret;
if (create) {
printk(KERN_DEBUG "%s: Kernel tries to allocate a block\n", __func__);
return -EROFS;
}
ret = isofs_get_blocks(inode, iblock, &bh_result, 1);
return ret < 0 ? ret : 0;
}
static int isofs_bmap(struct inode *inode, sector_t block)
{
struct buffer_head dummy;
int error;
dummy.b_state = 0;
dummy.b_blocknr = -1000;
error = isofs_get_block(inode, block, &dummy, 0);
if (!error)
return dummy.b_blocknr;
return 0;
}
struct buffer_head *isofs_bread(struct inode *inode, sector_t block)
{
sector_t blknr = isofs_bmap(inode, block);
if (!blknr)
return NULL;
return sb_bread(inode->i_sb, blknr);
}
static int isofs_readpage(struct file *file, struct page *page)
{
return mpage_readpage(page, isofs_get_block);
}
static int isofs_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
return mpage_readpages(mapping, pages, nr_pages, isofs_get_block);
}
static sector_t _isofs_bmap(struct address_space *mapping, sector_t block)
{
return generic_block_bmap(mapping,block,isofs_get_block);
}
static const struct address_space_operations isofs_aops = {
.readpage = isofs_readpage,
.readpages = isofs_readpages,
.bmap = _isofs_bmap
};
static int isofs_read_level3_size(struct inode *inode)
{
unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
int high_sierra = ISOFS_SB(inode->i_sb)->s_high_sierra;
struct buffer_head *bh = NULL;
unsigned long block, offset, block_saved, offset_saved;
int i = 0;
int more_entries = 0;
struct iso_directory_record *tmpde = NULL;
struct iso_inode_info *ei = ISOFS_I(inode);
inode->i_size = 0;
/* The first 16 blocks are reserved as the System Area. Thus,
* no inodes can appear in block 0. We use this to flag that
* this is the last section. */
ei->i_next_section_block = 0;
ei->i_next_section_offset = 0;
block = ei->i_iget5_block;
offset = ei->i_iget5_offset;
do {
struct iso_directory_record *de;
unsigned int de_len;
if (!bh) {
bh = sb_bread(inode->i_sb, block);
if (!bh)
goto out_noread;
}
de = (struct iso_directory_record *) (bh->b_data + offset);
de_len = *(unsigned char *) de;
if (de_len == 0) {
brelse(bh);
bh = NULL;
++block;
offset = 0;
continue;
}
block_saved = block;
offset_saved = offset;
offset += de_len;
/* Make sure we have a full directory entry */
if (offset >= bufsize) {
int slop = bufsize - offset + de_len;
if (!tmpde) {
tmpde = kmalloc(256, GFP_KERNEL);
if (!tmpde)
goto out_nomem;
}
memcpy(tmpde, de, slop);
offset &= bufsize - 1;
block++;
brelse(bh);
bh = NULL;
if (offset) {
bh = sb_bread(inode->i_sb, block);
if (!bh)
goto out_noread;
memcpy((void *)tmpde+slop, bh->b_data, offset);
}
de = tmpde;
}
inode->i_size += isonum_733(de->size);
if (i == 1) {
ei->i_next_section_block = block_saved;
ei->i_next_section_offset = offset_saved;
}
more_entries = de->flags[-high_sierra] & 0x80;
i++;
if (i > 100)
goto out_toomany;
} while (more_entries);
out:
kfree(tmpde);
if (bh)
brelse(bh);
return 0;
out_nomem:
if (bh)
brelse(bh);
return -ENOMEM;
out_noread:
printk(KERN_INFO "ISOFS: unable to read i-node block %lu\n", block);
kfree(tmpde);
return -EIO;
out_toomany:
printk(KERN_INFO "%s: More than 100 file sections ?!?, aborting...\n"
"isofs_read_level3_size: inode=%lu\n",
__func__, inode->i_ino);
goto out;
}
static int isofs_read_inode(struct inode *inode)
{
struct super_block *sb = inode->i_sb;
struct isofs_sb_info *sbi = ISOFS_SB(sb);
unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
unsigned long block;
int high_sierra = sbi->s_high_sierra;
struct buffer_head *bh = NULL;
struct iso_directory_record *de;
struct iso_directory_record *tmpde = NULL;
unsigned int de_len;
unsigned long offset;
struct iso_inode_info *ei = ISOFS_I(inode);
int ret = -EIO;
block = ei->i_iget5_block;
bh = sb_bread(inode->i_sb, block);
if (!bh)
goto out_badread;
offset = ei->i_iget5_offset;
de = (struct iso_directory_record *) (bh->b_data + offset);
de_len = *(unsigned char *) de;
if (offset + de_len > bufsize) {
int frag1 = bufsize - offset;
tmpde = kmalloc(de_len, GFP_KERNEL);
if (tmpde == NULL) {
printk(KERN_INFO "%s: out of memory\n", __func__);
ret = -ENOMEM;
goto fail;
}
memcpy(tmpde, bh->b_data + offset, frag1);
brelse(bh);
bh = sb_bread(inode->i_sb, ++block);
if (!bh)
goto out_badread;
memcpy((char *)tmpde+frag1, bh->b_data, de_len - frag1);
de = tmpde;
}
inode->i_ino = isofs_get_ino(ei->i_iget5_block,
ei->i_iget5_offset,
ISOFS_BUFFER_BITS(inode));
/* Assume it is a normal-format file unless told otherwise */
ei->i_file_format = isofs_file_normal;
if (de->flags[-high_sierra] & 2) {
if (sbi->s_dmode != ISOFS_INVALID_MODE)
inode->i_mode = S_IFDIR | sbi->s_dmode;
else
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO;
set_nlink(inode, 1); /*
* Set to 1. We know there are 2, but
* the find utility tries to optimize
* if it is 2, and it screws up. It is
* easier to give 1 which tells find to
* do it the hard way.
*/
} else {
if (sbi->s_fmode != ISOFS_INVALID_MODE) {
inode->i_mode = S_IFREG | sbi->s_fmode;
} else {
/*
* Set default permissions: r-x for all. The disc
* could be shared with DOS machines so virtually
* anything could be a valid executable.
*/
inode->i_mode = S_IFREG | S_IRUGO | S_IXUGO;
}
set_nlink(inode, 1);
}
inode->i_uid = sbi->s_uid;
inode->i_gid = sbi->s_gid;
inode->i_blocks = 0;
ei->i_format_parm[0] = 0;
ei->i_format_parm[1] = 0;
ei->i_format_parm[2] = 0;
ei->i_section_size = isonum_733(de->size);
if (de->flags[-high_sierra] & 0x80) {
ret = isofs_read_level3_size(inode);
if (ret < 0)
goto fail;
ret = -EIO;
} else {
ei->i_next_section_block = 0;
ei->i_next_section_offset = 0;
inode->i_size = isonum_733(de->size);
}
/*
* Some dipshit decided to store some other bit of information
* in the high byte of the file length. Truncate size in case
* this CDROM was mounted with the cruft option.
*/
if (sbi->s_cruft)
inode->i_size &= 0x00ffffff;
if (de->interleave[0]) {
printk(KERN_DEBUG "ISOFS: Interleaved files not (yet) supported.\n");
inode->i_size = 0;
}
/* I have no idea what file_unit_size is used for, so
we will flag it for now */
if (de->file_unit_size[0] != 0) {
printk(KERN_DEBUG "ISOFS: File unit size != 0 for ISO file (%ld).\n",
inode->i_ino);
}
/* I have no idea what other flag bits are used for, so
we will flag it for now */
#ifdef DEBUG
if((de->flags[-high_sierra] & ~2)!= 0){
printk(KERN_DEBUG "ISOFS: Unusual flag settings for ISO file "
"(%ld %x).\n",
inode->i_ino, de->flags[-high_sierra]);
}
#endif
inode->i_mtime.tv_sec =
inode->i_atime.tv_sec =
inode->i_ctime.tv_sec = iso_date(de->date, high_sierra);
inode->i_mtime.tv_nsec =
inode->i_atime.tv_nsec =
inode->i_ctime.tv_nsec = 0;
ei->i_first_extent = (isonum_733(de->extent) +
isonum_711(de->ext_attr_length));
/* Set the number of blocks for stat() - should be done before RR */
inode->i_blocks = (inode->i_size + 511) >> 9;
/*
* Now test for possible Rock Ridge extensions which will override
* some of these numbers in the inode structure.
*/
if (!high_sierra) {
parse_rock_ridge_inode(de, inode);
/* if we want uid/gid set, override the rock ridge setting */
if (sbi->s_uid_set)
inode->i_uid = sbi->s_uid;
if (sbi->s_gid_set)
inode->i_gid = sbi->s_gid;
}
/* Now set final access rights if overriding rock ridge setting */
if (S_ISDIR(inode->i_mode) && sbi->s_overriderockperm &&
sbi->s_dmode != ISOFS_INVALID_MODE)
inode->i_mode = S_IFDIR | sbi->s_dmode;
if (S_ISREG(inode->i_mode) && sbi->s_overriderockperm &&
sbi->s_fmode != ISOFS_INVALID_MODE)
inode->i_mode = S_IFREG | sbi->s_fmode;
/* Install the inode operations vector */
if (S_ISREG(inode->i_mode)) {
inode->i_fop = &generic_ro_fops;
switch (ei->i_file_format) {
#ifdef CONFIG_ZISOFS
case isofs_file_compressed:
inode->i_data.a_ops = &zisofs_aops;
break;
#endif
default:
inode->i_data.a_ops = &isofs_aops;
break;
}
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &isofs_dir_inode_operations;
inode->i_fop = &isofs_dir_operations;
} else if (S_ISLNK(inode->i_mode)) {
inode->i_op = &page_symlink_inode_operations;
inode->i_data.a_ops = &isofs_symlink_aops;
} else
/* XXX - parse_rock_ridge_inode() had already set i_rdev. */
init_special_inode(inode, inode->i_mode, inode->i_rdev);
ret = 0;
out:
kfree(tmpde);
if (bh)
brelse(bh);
return ret;
out_badread:
printk(KERN_WARNING "ISOFS: unable to read i-node block\n");
fail:
goto out;
}
struct isofs_iget5_callback_data {
unsigned long block;
unsigned long offset;
};
static int isofs_iget5_test(struct inode *ino, void *data)
{
struct iso_inode_info *i = ISOFS_I(ino);
struct isofs_iget5_callback_data *d =
(struct isofs_iget5_callback_data*)data;
return (i->i_iget5_block == d->block)
&& (i->i_iget5_offset == d->offset);
}
static int isofs_iget5_set(struct inode *ino, void *data)
{
struct iso_inode_info *i = ISOFS_I(ino);
struct isofs_iget5_callback_data *d =
(struct isofs_iget5_callback_data*)data;
i->i_iget5_block = d->block;
i->i_iget5_offset = d->offset;
return 0;
}
/* Store, in the inode's containing structure, the block and block
* offset that point to the underlying meta-data for the inode. The
* code below is otherwise similar to the iget() code in
* include/linux/fs.h */
struct inode *isofs_iget(struct super_block *sb,
unsigned long block,
unsigned long offset)
{
unsigned long hashval;
struct inode *inode;
struct isofs_iget5_callback_data data;
long ret;
if (offset >= 1ul << sb->s_blocksize_bits)
return ERR_PTR(-EINVAL);
data.block = block;
data.offset = offset;
hashval = (block << sb->s_blocksize_bits) | offset;
inode = iget5_locked(sb, hashval, &isofs_iget5_test,
&isofs_iget5_set, &data);
if (!inode)
return ERR_PTR(-ENOMEM);
if (inode->i_state & I_NEW) {
ret = isofs_read_inode(inode);
if (ret < 0) {
iget_failed(inode);
inode = ERR_PTR(ret);
} else {
unlock_new_inode(inode);
}
}
return inode;
}
static struct dentry *isofs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
/* We don't support read-write mounts */
if (!(flags & MS_RDONLY))
return ERR_PTR(-EACCES);
return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
}
static struct file_system_type iso9660_fs_type = {
.owner = THIS_MODULE,
.name = "iso9660",
.mount = isofs_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
MODULE_ALIAS_FS("iso9660");
MODULE_ALIAS("iso9660");
static int __init init_iso9660_fs(void)
{
int err = init_inodecache();
if (err)
goto out;
#ifdef CONFIG_ZISOFS
err = zisofs_init();
if (err)
goto out1;
#endif
err = register_filesystem(&iso9660_fs_type);
if (err)
goto out2;
return 0;
out2:
#ifdef CONFIG_ZISOFS
zisofs_cleanup();
out1:
#endif
destroy_inodecache();
out:
return err;
}
static void __exit exit_iso9660_fs(void)
{
unregister_filesystem(&iso9660_fs_type);
#ifdef CONFIG_ZISOFS
zisofs_cleanup();
#endif
destroy_inodecache();
}
module_init(init_iso9660_fs)
module_exit(exit_iso9660_fs)
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_2262_0 |
crossvul-cpp_data_bad_3433_1 | /*
* metadata/gc.c: GC icalls.
*
* Author: Paolo Molaro <lupus@ximian.com>
*
* Copyright 2002-2003 Ximian, Inc (http://www.ximian.com)
* Copyright 2004-2009 Novell, Inc (http://www.novell.com)
*/
#include <config.h>
#include <glib.h>
#include <string.h>
#include <errno.h>
#include <mono/metadata/gc-internal.h>
#include <mono/metadata/mono-gc.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/tabledefs.h>
#include <mono/metadata/exception.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/domain-internals.h>
#include <mono/metadata/class-internals.h>
#include <mono/metadata/metadata-internals.h>
#include <mono/metadata/mono-mlist.h>
#include <mono/metadata/threadpool.h>
#include <mono/metadata/threads-types.h>
#include <mono/utils/mono-logger-internal.h>
#include <mono/metadata/gc-internal.h>
#include <mono/metadata/marshal.h> /* for mono_delegate_free_ftnptr () */
#include <mono/metadata/attach.h>
#include <mono/metadata/console-io.h>
#include <mono/utils/mono-semaphore.h>
#ifndef HOST_WIN32
#include <pthread.h>
#endif
typedef struct DomainFinalizationReq {
MonoDomain *domain;
HANDLE done_event;
} DomainFinalizationReq;
#ifdef PLATFORM_WINCE /* FIXME: add accessors to gc.dll API */
extern void (*__imp_GC_finalizer_notifier)(void);
#define GC_finalizer_notifier __imp_GC_finalizer_notifier
extern int __imp_GC_finalize_on_demand;
#define GC_finalize_on_demand __imp_GC_finalize_on_demand
#endif
static gboolean gc_disabled = FALSE;
static gboolean finalizing_root_domain = FALSE;
#define mono_finalizer_lock() EnterCriticalSection (&finalizer_mutex)
#define mono_finalizer_unlock() LeaveCriticalSection (&finalizer_mutex)
static CRITICAL_SECTION finalizer_mutex;
static GSList *domains_to_finalize= NULL;
static MonoMList *threads_to_finalize = NULL;
static MonoInternalThread *gc_thread;
static void object_register_finalizer (MonoObject *obj, void (*callback)(void *, void*));
static void mono_gchandle_set_target (guint32 gchandle, MonoObject *obj);
#ifndef HAVE_NULL_GC
static HANDLE pending_done_event;
static HANDLE shutdown_event;
#endif
static void
add_thread_to_finalize (MonoInternalThread *thread)
{
mono_finalizer_lock ();
if (!threads_to_finalize)
MONO_GC_REGISTER_ROOT_SINGLE (threads_to_finalize);
threads_to_finalize = mono_mlist_append (threads_to_finalize, (MonoObject*)thread);
mono_finalizer_unlock ();
}
static gboolean suspend_finalizers = FALSE;
/*
* actually, we might want to queue the finalize requests in a separate thread,
* but we need to be careful about the execution domain of the thread...
*/
void
mono_gc_run_finalize (void *obj, void *data)
{
MonoObject *exc = NULL;
MonoObject *o;
#ifndef HAVE_SGEN_GC
MonoObject *o2;
#endif
MonoMethod* finalizer = NULL;
MonoDomain *caller_domain = mono_domain_get ();
MonoDomain *domain;
RuntimeInvokeFunction runtime_invoke;
GSList *l, *refs = NULL;
o = (MonoObject*)((char*)obj + GPOINTER_TO_UINT (data));
if (suspend_finalizers)
return;
domain = o->vtable->domain;
#ifndef HAVE_SGEN_GC
mono_domain_finalizers_lock (domain);
o2 = g_hash_table_lookup (domain->finalizable_objects_hash, o);
refs = mono_gc_remove_weak_track_object (domain, o);
mono_domain_finalizers_unlock (domain);
if (!o2)
/* Already finalized somehow */
return;
#endif
if (refs) {
/*
* Support for GCHandles of type WeakTrackResurrection:
*
* Its not exactly clear how these are supposed to work, or how their
* semantics can be implemented. We only implement one crucial thing:
* these handles are only cleared after the finalizer has ran.
*/
for (l = refs; l; l = l->next) {
guint32 gchandle = GPOINTER_TO_UINT (l->data);
mono_gchandle_set_target (gchandle, o);
}
g_slist_free (refs);
}
/* make sure the finalizer is not called again if the object is resurrected */
object_register_finalizer (obj, NULL);
if (o->vtable->klass == mono_defaults.internal_thread_class) {
MonoInternalThread *t = (MonoInternalThread*)o;
if (mono_gc_is_finalizer_internal_thread (t))
/* Avoid finalizing ourselves */
return;
if (t->threadpool_thread && finalizing_root_domain) {
/* Don't finalize threadpool threads when
shutting down - they're finalized when the
threadpool shuts down. */
add_thread_to_finalize (t);
return;
}
}
if (o->vtable->klass->image == mono_defaults.corlib && !strcmp (o->vtable->klass->name, "DynamicMethod") && finalizing_root_domain) {
/*
* These can't be finalized during unloading/shutdown, since that would
* free the native code which can still be referenced by other
* finalizers.
* FIXME: This is not perfect, objects dying at the same time as
* dynamic methods can still reference them even when !shutdown.
*/
return;
}
if (mono_runtime_get_no_exec ())
return;
/* speedup later... and use a timeout */
/* g_print ("Finalize run on %p %s.%s\n", o, mono_object_class (o)->name_space, mono_object_class (o)->name); */
/* Use _internal here, since this thread can enter a doomed appdomain */
mono_domain_set_internal (mono_object_domain (o));
/* delegates that have a native function pointer allocated are
* registered for finalization, but they don't have a Finalize
* method, because in most cases it's not needed and it's just a waste.
*/
if (o->vtable->klass->delegate) {
MonoDelegate* del = (MonoDelegate*)o;
if (del->delegate_trampoline)
mono_delegate_free_ftnptr ((MonoDelegate*)o);
mono_domain_set_internal (caller_domain);
return;
}
finalizer = mono_class_get_finalizer (o->vtable->klass);
#ifndef DISABLE_COM
/* If object has a CCW but has no finalizer, it was only
* registered for finalization in order to free the CCW.
* Else it needs the regular finalizer run.
* FIXME: what to do about ressurection and suppression
* of finalizer on object with CCW.
*/
if (mono_marshal_free_ccw (o) && !finalizer) {
mono_domain_set_internal (caller_domain);
return;
}
#endif
/*
* To avoid the locking plus the other overhead of mono_runtime_invoke (),
* create and precompile a wrapper which calls the finalize method using
* a CALLVIRT.
*/
if (!domain->finalize_runtime_invoke) {
MonoMethod *invoke = mono_marshal_get_runtime_invoke (mono_class_get_method_from_name_flags (mono_defaults.object_class, "Finalize", 0, 0), TRUE);
domain->finalize_runtime_invoke = mono_compile_method (invoke);
}
runtime_invoke = domain->finalize_runtime_invoke;
mono_runtime_class_init (o->vtable);
runtime_invoke (o, NULL, &exc, NULL);
if (exc) {
/* fixme: do something useful */
}
mono_domain_set_internal (caller_domain);
}
void
mono_gc_finalize_threadpool_threads (void)
{
while (threads_to_finalize) {
MonoInternalThread *thread = (MonoInternalThread*) mono_mlist_get_data (threads_to_finalize);
/* Force finalization of the thread. */
thread->threadpool_thread = FALSE;
mono_object_register_finalizer ((MonoObject*)thread);
mono_gc_run_finalize (thread, NULL);
threads_to_finalize = mono_mlist_next (threads_to_finalize);
}
}
gpointer
mono_gc_out_of_memory (size_t size)
{
/*
* we could allocate at program startup some memory that we could release
* back to the system at this point if we're really low on memory (ie, size is
* lower than the memory we set apart)
*/
mono_raise_exception (mono_domain_get ()->out_of_memory_ex);
return NULL;
}
/*
* Some of our objects may point to a different address than the address returned by GC_malloc()
* (because of the GetHashCode hack), but we need to pass the real address to register_finalizer.
* This also means that in the callback we need to adjust the pointer to get back the real
* MonoObject*.
* We also need to be consistent in the use of the GC_debug* variants of malloc and register_finalizer,
* since that, too, can cause the underlying pointer to be offset.
*/
static void
object_register_finalizer (MonoObject *obj, void (*callback)(void *, void*))
{
#if HAVE_BOEHM_GC
guint offset = 0;
MonoDomain *domain;
if (obj == NULL)
mono_raise_exception (mono_get_exception_argument_null ("obj"));
domain = obj->vtable->domain;
#ifndef GC_DEBUG
/* This assertion is not valid when GC_DEBUG is defined */
g_assert (GC_base (obj) == (char*)obj - offset);
#endif
if (mono_domain_is_unloading (domain) && (callback != NULL))
/*
* Can't register finalizers in a dying appdomain, since they
* could be invoked after the appdomain has been unloaded.
*/
return;
mono_domain_finalizers_lock (domain);
if (callback)
g_hash_table_insert (domain->finalizable_objects_hash, obj, obj);
else
g_hash_table_remove (domain->finalizable_objects_hash, obj);
mono_domain_finalizers_unlock (domain);
GC_REGISTER_FINALIZER_NO_ORDER ((char*)obj - offset, callback, GUINT_TO_POINTER (offset), NULL, NULL);
#elif defined(HAVE_SGEN_GC)
if (obj == NULL)
mono_raise_exception (mono_get_exception_argument_null ("obj"));
/*
* If we register finalizers for domains that are unloading we might
* end up running them while or after the domain is being cleared, so
* the objects will not be valid anymore.
*/
if (!mono_domain_is_unloading (obj->vtable->domain))
mono_gc_register_for_finalization (obj, callback);
#endif
}
/**
* mono_object_register_finalizer:
* @obj: object to register
*
* Records that object @obj has a finalizer, this will call the
* Finalize method when the garbage collector disposes the object.
*
*/
void
mono_object_register_finalizer (MonoObject *obj)
{
/* g_print ("Registered finalizer on %p %s.%s\n", obj, mono_object_class (obj)->name_space, mono_object_class (obj)->name); */
object_register_finalizer (obj, mono_gc_run_finalize);
}
/**
* mono_domain_finalize:
* @domain: the domain to finalize
* @timeout: msects to wait for the finalization to complete, -1 to wait indefinitely
*
* Request finalization of all finalizable objects inside @domain. Wait
* @timeout msecs for the finalization to complete.
*
* Returns: TRUE if succeeded, FALSE if there was a timeout
*/
gboolean
mono_domain_finalize (MonoDomain *domain, guint32 timeout)
{
DomainFinalizationReq *req;
guint32 res;
HANDLE done_event;
if (mono_thread_internal_current () == gc_thread)
/* We are called from inside a finalizer, not much we can do here */
return FALSE;
/*
* No need to create another thread 'cause the finalizer thread
* is still working and will take care of running the finalizers
*/
#ifndef HAVE_NULL_GC
if (gc_disabled)
return TRUE;
mono_gc_collect (mono_gc_max_generation ());
done_event = CreateEvent (NULL, TRUE, FALSE, NULL);
if (done_event == NULL) {
return FALSE;
}
req = g_new0 (DomainFinalizationReq, 1);
req->domain = domain;
req->done_event = done_event;
if (domain == mono_get_root_domain ())
finalizing_root_domain = TRUE;
mono_finalizer_lock ();
domains_to_finalize = g_slist_append (domains_to_finalize, req);
mono_finalizer_unlock ();
/* Tell the finalizer thread to finalize this appdomain */
mono_gc_finalize_notify ();
if (timeout == -1)
timeout = INFINITE;
res = WaitForSingleObjectEx (done_event, timeout, TRUE);
/* printf ("WAIT RES: %d.\n", res); */
if (res == WAIT_TIMEOUT) {
/* We leak the handle here */
return FALSE;
}
CloseHandle (done_event);
if (domain == mono_get_root_domain ()) {
mono_thread_pool_cleanup ();
mono_gc_finalize_threadpool_threads ();
}
return TRUE;
#else
/* We don't support domain finalization without a GC */
return FALSE;
#endif
}
void
ves_icall_System_GC_InternalCollect (int generation)
{
mono_gc_collect (generation);
}
gint64
ves_icall_System_GC_GetTotalMemory (MonoBoolean forceCollection)
{
MONO_ARCH_SAVE_REGS;
if (forceCollection)
mono_gc_collect (mono_gc_max_generation ());
return mono_gc_get_used_size ();
}
void
ves_icall_System_GC_KeepAlive (MonoObject *obj)
{
MONO_ARCH_SAVE_REGS;
/*
* Does nothing.
*/
}
void
ves_icall_System_GC_ReRegisterForFinalize (MonoObject *obj)
{
if (!obj)
mono_raise_exception (mono_get_exception_argument_null ("obj"));
object_register_finalizer (obj, mono_gc_run_finalize);
}
void
ves_icall_System_GC_SuppressFinalize (MonoObject *obj)
{
if (!obj)
mono_raise_exception (mono_get_exception_argument_null ("obj"));
/* delegates have no finalizers, but we register them to deal with the
* unmanaged->managed trampoline. We don't let the user suppress it
* otherwise we'd leak it.
*/
if (obj->vtable->klass->delegate)
return;
/* FIXME: Need to handle case where obj has COM Callable Wrapper
* generated for it that needs cleaned up, but user wants to suppress
* their derived object finalizer. */
object_register_finalizer (obj, NULL);
}
void
ves_icall_System_GC_WaitForPendingFinalizers (void)
{
#ifndef HAVE_NULL_GC
if (!mono_gc_pending_finalizers ())
return;
if (mono_thread_internal_current () == gc_thread)
/* Avoid deadlocks */
return;
ResetEvent (pending_done_event);
mono_gc_finalize_notify ();
/* g_print ("Waiting for pending finalizers....\n"); */
WaitForSingleObjectEx (pending_done_event, INFINITE, TRUE);
/* g_print ("Done pending....\n"); */
#endif
}
void
ves_icall_System_GC_register_ephemeron_array (MonoObject *array)
{
#ifdef HAVE_SGEN_GC
if (!mono_gc_ephemeron_array_add (array))
mono_raise_exception (mono_object_domain (array)->out_of_memory_ex);
#endif
}
MonoObject*
ves_icall_System_GC_get_ephemeron_tombstone (void)
{
return mono_domain_get ()->ephemeron_tombstone;
}
#define mono_allocator_lock() EnterCriticalSection (&allocator_section)
#define mono_allocator_unlock() LeaveCriticalSection (&allocator_section)
static CRITICAL_SECTION allocator_section;
static CRITICAL_SECTION handle_section;
typedef enum {
HANDLE_WEAK,
HANDLE_WEAK_TRACK,
HANDLE_NORMAL,
HANDLE_PINNED
} HandleType;
static HandleType mono_gchandle_get_type (guint32 gchandle);
MonoObject *
ves_icall_System_GCHandle_GetTarget (guint32 handle)
{
return mono_gchandle_get_target (handle);
}
/*
* if type == -1, change the target of the handle, otherwise allocate a new handle.
*/
guint32
ves_icall_System_GCHandle_GetTargetHandle (MonoObject *obj, guint32 handle, gint32 type)
{
if (type == -1) {
mono_gchandle_set_target (handle, obj);
/* the handle doesn't change */
return handle;
}
switch (type) {
case HANDLE_WEAK:
return mono_gchandle_new_weakref (obj, FALSE);
case HANDLE_WEAK_TRACK:
return mono_gchandle_new_weakref (obj, TRUE);
case HANDLE_NORMAL:
return mono_gchandle_new (obj, FALSE);
case HANDLE_PINNED:
return mono_gchandle_new (obj, TRUE);
default:
g_assert_not_reached ();
}
return 0;
}
void
ves_icall_System_GCHandle_FreeHandle (guint32 handle)
{
mono_gchandle_free (handle);
}
gpointer
ves_icall_System_GCHandle_GetAddrOfPinnedObject (guint32 handle)
{
MonoObject *obj;
if (mono_gchandle_get_type (handle) != HANDLE_PINNED)
return (gpointer)-2;
obj = mono_gchandle_get_target (handle);
if (obj) {
MonoClass *klass = mono_object_class (obj);
if (klass == mono_defaults.string_class) {
return mono_string_chars ((MonoString*)obj);
} else if (klass->rank) {
return mono_array_addr ((MonoArray*)obj, char, 0);
} else {
/* the C# code will check and throw the exception */
/* FIXME: missing !klass->blittable test, see bug #61134 */
if ((klass->flags & TYPE_ATTRIBUTE_LAYOUT_MASK) == TYPE_ATTRIBUTE_AUTO_LAYOUT)
return (gpointer)-1;
return (char*)obj + sizeof (MonoObject);
}
}
return NULL;
}
typedef struct {
guint32 *bitmap;
gpointer *entries;
guint32 size;
guint8 type;
guint slot_hint : 24; /* starting slot for search */
/* 2^16 appdomains should be enough for everyone (though I know I'll regret this in 20 years) */
/* we alloc this only for weak refs, since we can get the domain directly in the other cases */
guint16 *domain_ids;
} HandleData;
/* weak and weak-track arrays will be allocated in malloc memory
*/
static HandleData gc_handles [] = {
{NULL, NULL, 0, HANDLE_WEAK, 0},
{NULL, NULL, 0, HANDLE_WEAK_TRACK, 0},
{NULL, NULL, 0, HANDLE_NORMAL, 0},
{NULL, NULL, 0, HANDLE_PINNED, 0}
};
#define lock_handles(handles) EnterCriticalSection (&handle_section)
#define unlock_handles(handles) LeaveCriticalSection (&handle_section)
static int
find_first_unset (guint32 bitmap)
{
int i;
for (i = 0; i < 32; ++i) {
if (!(bitmap & (1 << i)))
return i;
}
return -1;
}
static guint32
alloc_handle (HandleData *handles, MonoObject *obj, gboolean track)
{
gint slot, i;
guint32 res;
lock_handles (handles);
if (!handles->size) {
handles->size = 32;
if (handles->type > HANDLE_WEAK_TRACK) {
handles->entries = mono_gc_alloc_fixed (sizeof (gpointer) * handles->size, mono_gc_make_root_descr_all_refs (handles->size));
} else {
handles->entries = g_malloc0 (sizeof (gpointer) * handles->size);
handles->domain_ids = g_malloc0 (sizeof (guint16) * handles->size);
}
handles->bitmap = g_malloc0 (handles->size / 8);
}
i = -1;
for (slot = handles->slot_hint; slot < handles->size / 32; ++slot) {
if (handles->bitmap [slot] != 0xffffffff) {
i = find_first_unset (handles->bitmap [slot]);
handles->slot_hint = slot;
break;
}
}
if (i == -1 && handles->slot_hint != 0) {
for (slot = 0; slot < handles->slot_hint; ++slot) {
if (handles->bitmap [slot] != 0xffffffff) {
i = find_first_unset (handles->bitmap [slot]);
handles->slot_hint = slot;
break;
}
}
}
if (i == -1) {
guint32 *new_bitmap;
guint32 new_size = handles->size * 2; /* always double: we memset to 0 based on this below */
/* resize and copy the bitmap */
new_bitmap = g_malloc0 (new_size / 8);
memcpy (new_bitmap, handles->bitmap, handles->size / 8);
g_free (handles->bitmap);
handles->bitmap = new_bitmap;
/* resize and copy the entries */
if (handles->type > HANDLE_WEAK_TRACK) {
gpointer *entries;
entries = mono_gc_alloc_fixed (sizeof (gpointer) * new_size, mono_gc_make_root_descr_all_refs (new_size));
memcpy (entries, handles->entries, sizeof (gpointer) * handles->size);
mono_gc_free_fixed (handles->entries);
handles->entries = entries;
} else {
gpointer *entries;
guint16 *domain_ids;
domain_ids = g_malloc0 (sizeof (guint16) * new_size);
entries = g_malloc (sizeof (gpointer) * new_size);
/* we disable GC because we could lose some disappearing link updates */
mono_gc_disable ();
memcpy (entries, handles->entries, sizeof (gpointer) * handles->size);
memset (entries + handles->size, 0, sizeof (gpointer) * handles->size);
memcpy (domain_ids, handles->domain_ids, sizeof (guint16) * handles->size);
for (i = 0; i < handles->size; ++i) {
MonoObject *obj = mono_gc_weak_link_get (&(handles->entries [i]));
if (handles->entries [i])
mono_gc_weak_link_remove (&(handles->entries [i]));
/*g_print ("reg/unreg entry %d of type %d at %p to object %p (%p), was: %p\n", i, handles->type, &(entries [i]), obj, entries [i], handles->entries [i]);*/
if (obj) {
mono_gc_weak_link_add (&(entries [i]), obj, track);
}
}
g_free (handles->entries);
g_free (handles->domain_ids);
handles->entries = entries;
handles->domain_ids = domain_ids;
mono_gc_enable ();
}
/* set i and slot to the next free position */
i = 0;
slot = (handles->size + 1) / 32;
handles->slot_hint = handles->size + 1;
handles->size = new_size;
}
handles->bitmap [slot] |= 1 << i;
slot = slot * 32 + i;
handles->entries [slot] = obj;
if (handles->type <= HANDLE_WEAK_TRACK) {
/*FIXME, what to use when obj == null?*/
handles->domain_ids [slot] = (obj ? mono_object_get_domain (obj) : mono_domain_get ())->domain_id;
if (obj)
mono_gc_weak_link_add (&(handles->entries [slot]), obj, track);
}
mono_perfcounters->gc_num_handles++;
unlock_handles (handles);
/*g_print ("allocated entry %d of type %d to object %p (in slot: %p)\n", slot, handles->type, obj, handles->entries [slot]);*/
res = (slot << 3) | (handles->type + 1);
mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_CREATED, handles->type, res, obj);
return res;
}
/**
* mono_gchandle_new:
* @obj: managed object to get a handle for
* @pinned: whether the object should be pinned
*
* This returns a handle that wraps the object, this is used to keep a
* reference to a managed object from the unmanaged world and preventing the
* object from being disposed.
*
* If @pinned is false the address of the object can not be obtained, if it is
* true the address of the object can be obtained. This will also pin the
* object so it will not be possible by a moving garbage collector to move the
* object.
*
* Returns: a handle that can be used to access the object from
* unmanaged code.
*/
guint32
mono_gchandle_new (MonoObject *obj, gboolean pinned)
{
return alloc_handle (&gc_handles [pinned? HANDLE_PINNED: HANDLE_NORMAL], obj, FALSE);
}
/**
* mono_gchandle_new_weakref:
* @obj: managed object to get a handle for
* @pinned: whether the object should be pinned
*
* This returns a weak handle that wraps the object, this is used to
* keep a reference to a managed object from the unmanaged world.
* Unlike the mono_gchandle_new the object can be reclaimed by the
* garbage collector. In this case the value of the GCHandle will be
* set to zero.
*
* If @pinned is false the address of the object can not be obtained, if it is
* true the address of the object can be obtained. This will also pin the
* object so it will not be possible by a moving garbage collector to move the
* object.
*
* Returns: a handle that can be used to access the object from
* unmanaged code.
*/
guint32
mono_gchandle_new_weakref (MonoObject *obj, gboolean track_resurrection)
{
guint32 handle = alloc_handle (&gc_handles [track_resurrection? HANDLE_WEAK_TRACK: HANDLE_WEAK], obj, track_resurrection);
#ifndef HAVE_SGEN_GC
if (track_resurrection)
mono_gc_add_weak_track_handle (obj, handle);
#endif
return handle;
}
static HandleType
mono_gchandle_get_type (guint32 gchandle)
{
guint type = (gchandle & 7) - 1;
return type;
}
/**
* mono_gchandle_get_target:
* @gchandle: a GCHandle's handle.
*
* The handle was previously created by calling mono_gchandle_new or
* mono_gchandle_new_weakref.
*
* Returns a pointer to the MonoObject represented by the handle or
* NULL for a collected object if using a weakref handle.
*/
MonoObject*
mono_gchandle_get_target (guint32 gchandle)
{
guint slot = gchandle >> 3;
guint type = (gchandle & 7) - 1;
HandleData *handles = &gc_handles [type];
MonoObject *obj = NULL;
if (type > 3)
return NULL;
lock_handles (handles);
if (slot < handles->size && (handles->bitmap [slot / 32] & (1 << (slot % 32)))) {
if (handles->type <= HANDLE_WEAK_TRACK) {
obj = mono_gc_weak_link_get (&handles->entries [slot]);
} else {
obj = handles->entries [slot];
}
} else {
/* print a warning? */
}
unlock_handles (handles);
/*g_print ("get target of entry %d of type %d: %p\n", slot, handles->type, obj);*/
return obj;
}
static void
mono_gchandle_set_target (guint32 gchandle, MonoObject *obj)
{
guint slot = gchandle >> 3;
guint type = (gchandle & 7) - 1;
HandleData *handles = &gc_handles [type];
MonoObject *old_obj = NULL;
if (type > 3)
return;
lock_handles (handles);
if (slot < handles->size && (handles->bitmap [slot / 32] & (1 << (slot % 32)))) {
if (handles->type <= HANDLE_WEAK_TRACK) {
old_obj = handles->entries [slot];
if (handles->entries [slot])
mono_gc_weak_link_remove (&handles->entries [slot]);
if (obj)
mono_gc_weak_link_add (&handles->entries [slot], obj, handles->type == HANDLE_WEAK_TRACK);
/*FIXME, what to use when obj == null?*/
handles->domain_ids [slot] = (obj ? mono_object_get_domain (obj) : mono_domain_get ())->domain_id;
} else {
handles->entries [slot] = obj;
}
} else {
/* print a warning? */
}
/*g_print ("changed entry %d of type %d to object %p (in slot: %p)\n", slot, handles->type, obj, handles->entries [slot]);*/
unlock_handles (handles);
#ifndef HAVE_SGEN_GC
if (type == HANDLE_WEAK_TRACK)
mono_gc_change_weak_track_handle (old_obj, obj, gchandle);
#endif
}
/**
* mono_gchandle_is_in_domain:
* @gchandle: a GCHandle's handle.
* @domain: An application domain.
*
* Returns: true if the object wrapped by the @gchandle belongs to the specific @domain.
*/
gboolean
mono_gchandle_is_in_domain (guint32 gchandle, MonoDomain *domain)
{
guint slot = gchandle >> 3;
guint type = (gchandle & 7) - 1;
HandleData *handles = &gc_handles [type];
gboolean result = FALSE;
if (type > 3)
return FALSE;
lock_handles (handles);
if (slot < handles->size && (handles->bitmap [slot / 32] & (1 << (slot % 32)))) {
if (handles->type <= HANDLE_WEAK_TRACK) {
result = domain->domain_id == handles->domain_ids [slot];
} else {
MonoObject *obj;
obj = handles->entries [slot];
if (obj == NULL)
result = TRUE;
else
result = domain == mono_object_domain (obj);
}
} else {
/* print a warning? */
}
unlock_handles (handles);
return result;
}
/**
* mono_gchandle_free:
* @gchandle: a GCHandle's handle.
*
* Frees the @gchandle handle. If there are no outstanding
* references, the garbage collector can reclaim the memory of the
* object wrapped.
*/
void
mono_gchandle_free (guint32 gchandle)
{
guint slot = gchandle >> 3;
guint type = (gchandle & 7) - 1;
HandleData *handles = &gc_handles [type];
if (type > 3)
return;
#ifndef HAVE_SGEN_GC
if (type == HANDLE_WEAK_TRACK)
mono_gc_remove_weak_track_handle (gchandle);
#endif
lock_handles (handles);
if (slot < handles->size && (handles->bitmap [slot / 32] & (1 << (slot % 32)))) {
if (handles->type <= HANDLE_WEAK_TRACK) {
if (handles->entries [slot])
mono_gc_weak_link_remove (&handles->entries [slot]);
} else {
handles->entries [slot] = NULL;
}
handles->bitmap [slot / 32] &= ~(1 << (slot % 32));
} else {
/* print a warning? */
}
mono_perfcounters->gc_num_handles--;
/*g_print ("freed entry %d of type %d\n", slot, handles->type);*/
unlock_handles (handles);
mono_profiler_gc_handle (MONO_PROFILER_GC_HANDLE_DESTROYED, handles->type, gchandle, NULL);
}
/**
* mono_gchandle_free_domain:
* @domain: domain that is unloading
*
* Function used internally to cleanup any GC handle for objects belonging
* to the specified domain during appdomain unload.
*/
void
mono_gchandle_free_domain (MonoDomain *domain)
{
guint type;
for (type = 0; type < 3; ++type) {
guint slot;
HandleData *handles = &gc_handles [type];
lock_handles (handles);
for (slot = 0; slot < handles->size; ++slot) {
if (!(handles->bitmap [slot / 32] & (1 << (slot % 32))))
continue;
if (type <= HANDLE_WEAK_TRACK) {
if (domain->domain_id == handles->domain_ids [slot]) {
handles->bitmap [slot / 32] &= ~(1 << (slot % 32));
if (handles->entries [slot])
mono_gc_weak_link_remove (&handles->entries [slot]);
}
} else {
if (handles->entries [slot] && mono_object_domain (handles->entries [slot]) == domain) {
handles->bitmap [slot / 32] &= ~(1 << (slot % 32));
handles->entries [slot] = NULL;
}
}
}
unlock_handles (handles);
}
}
MonoBoolean
GCHandle_CheckCurrentDomain (guint32 gchandle)
{
return mono_gchandle_is_in_domain (gchandle, mono_domain_get ());
}
#ifndef HAVE_NULL_GC
#ifdef MONO_HAS_SEMAPHORES
static MonoSemType finalizer_sem;
#endif
static HANDLE finalizer_event;
static volatile gboolean finished=FALSE;
void
mono_gc_finalize_notify (void)
{
#ifdef DEBUG
g_message ( "%s: prodding finalizer", __func__);
#endif
#ifdef MONO_HAS_SEMAPHORES
MONO_SEM_POST (&finalizer_sem);
#else
SetEvent (finalizer_event);
#endif
}
#ifdef HAVE_BOEHM_GC
static void
collect_objects (gpointer key, gpointer value, gpointer user_data)
{
GPtrArray *arr = (GPtrArray*)user_data;
g_ptr_array_add (arr, key);
}
#endif
/*
* finalize_domain_objects:
*
* Run the finalizers of all finalizable objects in req->domain.
*/
static void
finalize_domain_objects (DomainFinalizationReq *req)
{
MonoDomain *domain = req->domain;
#ifdef HAVE_BOEHM_GC
while (g_hash_table_size (domain->finalizable_objects_hash) > 0) {
int i;
GPtrArray *objs;
/*
* Since the domain is unloading, nobody is allowed to put
* new entries into the hash table. But finalize_object might
* remove entries from the hash table, so we make a copy.
*/
objs = g_ptr_array_new ();
g_hash_table_foreach (domain->finalizable_objects_hash, collect_objects, objs);
/* printf ("FINALIZING %d OBJECTS.\n", objs->len); */
for (i = 0; i < objs->len; ++i) {
MonoObject *o = (MonoObject*)g_ptr_array_index (objs, i);
/* FIXME: Avoid finalizing threads, etc */
mono_gc_run_finalize (o, 0);
}
g_ptr_array_free (objs, TRUE);
}
#elif defined(HAVE_SGEN_GC)
#define NUM_FOBJECTS 64
MonoObject *to_finalize [NUM_FOBJECTS];
int count;
while ((count = mono_gc_finalizers_for_domain (domain, to_finalize, NUM_FOBJECTS))) {
int i;
for (i = 0; i < count; ++i) {
mono_gc_run_finalize (to_finalize [i], 0);
}
}
#endif
/* Process finalizers which are already in the queue */
mono_gc_invoke_finalizers ();
/* printf ("DONE.\n"); */
SetEvent (req->done_event);
/* The event is closed in mono_domain_finalize if we get here */
g_free (req);
}
static guint32
finalizer_thread (gpointer unused)
{
while (!finished) {
/* Wait to be notified that there's at least one
* finaliser to run
*/
g_assert (mono_domain_get () == mono_get_root_domain ());
#ifdef MONO_HAS_SEMAPHORES
MONO_SEM_WAIT (&finalizer_sem);
#else
/* Use alertable=FALSE since we will be asked to exit using the event too */
WaitForSingleObjectEx (finalizer_event, INFINITE, FALSE);
#endif
mono_console_handle_async_ops ();
#ifndef DISABLE_ATTACH
mono_attach_maybe_start ();
#endif
if (domains_to_finalize) {
mono_finalizer_lock ();
if (domains_to_finalize) {
DomainFinalizationReq *req = domains_to_finalize->data;
domains_to_finalize = g_slist_remove (domains_to_finalize, req);
mono_finalizer_unlock ();
finalize_domain_objects (req);
} else {
mono_finalizer_unlock ();
}
}
/* If finished == TRUE, mono_gc_cleanup has been called (from mono_runtime_cleanup),
* before the domain is unloaded.
*/
mono_gc_invoke_finalizers ();
SetEvent (pending_done_event);
}
SetEvent (shutdown_event);
return 0;
}
void
mono_gc_init (void)
{
InitializeCriticalSection (&handle_section);
InitializeCriticalSection (&allocator_section);
InitializeCriticalSection (&finalizer_mutex);
MONO_GC_REGISTER_ROOT_FIXED (gc_handles [HANDLE_NORMAL].entries);
MONO_GC_REGISTER_ROOT_FIXED (gc_handles [HANDLE_PINNED].entries);
mono_gc_base_init ();
if (mono_gc_is_disabled ()) {
gc_disabled = TRUE;
return;
}
finalizer_event = CreateEvent (NULL, FALSE, FALSE, NULL);
pending_done_event = CreateEvent (NULL, TRUE, FALSE, NULL);
shutdown_event = CreateEvent (NULL, TRUE, FALSE, NULL);
if (finalizer_event == NULL || pending_done_event == NULL || shutdown_event == NULL) {
g_assert_not_reached ();
}
#ifdef MONO_HAS_SEMAPHORES
MONO_SEM_INIT (&finalizer_sem, 0);
#endif
gc_thread = mono_thread_create_internal (mono_domain_get (), finalizer_thread, NULL, FALSE);
ves_icall_System_Threading_Thread_SetName_internal (gc_thread, mono_string_new (mono_domain_get (), "Finalizer"));
}
void
mono_gc_cleanup (void)
{
#ifdef DEBUG
g_message ("%s: cleaning up finalizer", __func__);
#endif
if (!gc_disabled) {
ResetEvent (shutdown_event);
finished = TRUE;
if (mono_thread_internal_current () != gc_thread) {
mono_gc_finalize_notify ();
/* Finishing the finalizer thread, so wait a little bit... */
/* MS seems to wait for about 2 seconds */
if (WaitForSingleObjectEx (shutdown_event, 2000, FALSE) == WAIT_TIMEOUT) {
int ret;
/* Set a flag which the finalizer thread can check */
suspend_finalizers = TRUE;
/* Try to abort the thread, in the hope that it is running managed code */
mono_thread_internal_stop (gc_thread);
/* Wait for it to stop */
ret = WaitForSingleObjectEx (gc_thread->handle, 100, TRUE);
if (ret == WAIT_TIMEOUT) {
/*
* The finalizer thread refused to die. There is not much we
* can do here, since the runtime is shutting down so the
* state the finalizer thread depends on will vanish.
*/
g_warning ("Shutting down finalizer thread timed out.");
} else {
/*
* FIXME: On unix, when the above wait returns, the thread
* might still be running io-layer code, or pthreads code.
*/
Sleep (100);
}
}
}
gc_thread = NULL;
#ifdef HAVE_BOEHM_GC
GC_finalizer_notifier = NULL;
#endif
}
DeleteCriticalSection (&handle_section);
DeleteCriticalSection (&allocator_section);
DeleteCriticalSection (&finalizer_mutex);
}
#else
/* Null GC dummy functions */
void
mono_gc_finalize_notify (void)
{
}
void mono_gc_init (void)
{
InitializeCriticalSection (&handle_section);
}
void mono_gc_cleanup (void)
{
}
#endif
gboolean
mono_gc_is_finalizer_internal_thread (MonoInternalThread *thread)
{
return thread == gc_thread;
}
/**
* mono_gc_is_finalizer_thread:
* @thread: the thread to test.
*
* In Mono objects are finalized asynchronously on a separate thread.
* This routine tests whether the @thread argument represents the
* finalization thread.
*
* Returns true if @thread is the finalization thread.
*/
gboolean
mono_gc_is_finalizer_thread (MonoThread *thread)
{
return mono_gc_is_finalizer_internal_thread (thread->internal_thread);
}
#if defined(__MACH__)
static pthread_t mach_exception_thread;
void
mono_gc_register_mach_exception_thread (pthread_t thread)
{
mach_exception_thread = thread;
}
pthread_t
mono_gc_get_mach_exception_thread (void)
{
return mach_exception_thread;
}
#endif
/**
* mono_gc_parse_environment_string_extract_number:
*
* @str: points to the first digit of the number
* @out: pointer to the variable that will receive the value
*
* Tries to extract a number from the passed string, taking in to account m, k
* and g suffixes
*
* Returns true if passing was successful
*/
gboolean
mono_gc_parse_environment_string_extract_number (const char *str, glong *out)
{
char *endptr;
int len = strlen (str), shift = 0;
glong val;
gboolean is_suffix = FALSE;
char suffix;
switch (str [len - 1]) {
case 'g':
case 'G':
shift += 10;
case 'm':
case 'M':
shift += 10;
case 'k':
case 'K':
shift += 10;
is_suffix = TRUE;
suffix = str [len - 1];
break;
}
errno = 0;
val = strtol (str, &endptr, 10);
if ((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN))
|| (errno != 0 && val == 0) || (endptr == str))
return FALSE;
if (is_suffix) {
if (*(endptr + 1)) /* Invalid string. */
return FALSE;
val <<= shift;
}
*out = val;
return TRUE;
}
#ifndef HAVE_SGEN_GC
void*
mono_gc_alloc_mature (MonoVTable *vtable)
{
return mono_object_new_specific (vtable);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3433_1 |
crossvul-cpp_data_good_3486_0 | /*
* Hardware performance events for the Alpha.
*
* We implement HW counts on the EV67 and subsequent CPUs only.
*
* (C) 2010 Michael J. Cree
*
* Somewhat based on the Sparc code, and to a lesser extent the PowerPC and
* ARM code, which are copyright by their respective authors.
*/
#include <linux/perf_event.h>
#include <linux/kprobes.h>
#include <linux/kernel.h>
#include <linux/kdebug.h>
#include <linux/mutex.h>
#include <linux/init.h>
#include <asm/hwrpb.h>
#include <asm/atomic.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/pal.h>
#include <asm/wrperfmon.h>
#include <asm/hw_irq.h>
/* The maximum number of PMCs on any Alpha CPU whatsoever. */
#define MAX_HWEVENTS 3
#define PMC_NO_INDEX -1
/* For tracking PMCs and the hw events they monitor on each CPU. */
struct cpu_hw_events {
int enabled;
/* Number of events scheduled; also number entries valid in arrays below. */
int n_events;
/* Number events added since last hw_perf_disable(). */
int n_added;
/* Events currently scheduled. */
struct perf_event *event[MAX_HWEVENTS];
/* Event type of each scheduled event. */
unsigned long evtype[MAX_HWEVENTS];
/* Current index of each scheduled event; if not yet determined
* contains PMC_NO_INDEX.
*/
int current_idx[MAX_HWEVENTS];
/* The active PMCs' config for easy use with wrperfmon(). */
unsigned long config;
/* The active counters' indices for easy use with wrperfmon(). */
unsigned long idx_mask;
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
/*
* A structure to hold the description of the PMCs available on a particular
* type of Alpha CPU.
*/
struct alpha_pmu_t {
/* Mapping of the perf system hw event types to indigenous event types */
const int *event_map;
/* The number of entries in the event_map */
int max_events;
/* The number of PMCs on this Alpha */
int num_pmcs;
/*
* All PMC counters reside in the IBOX register PCTR. This is the
* LSB of the counter.
*/
int pmc_count_shift[MAX_HWEVENTS];
/*
* The mask that isolates the PMC bits when the LSB of the counter
* is shifted to bit 0.
*/
unsigned long pmc_count_mask[MAX_HWEVENTS];
/* The maximum period the PMC can count. */
unsigned long pmc_max_period[MAX_HWEVENTS];
/*
* The maximum value that may be written to the counter due to
* hardware restrictions is pmc_max_period - pmc_left.
*/
long pmc_left[3];
/* Subroutine for allocation of PMCs. Enforces constraints. */
int (*check_constraints)(struct perf_event **, unsigned long *, int);
};
/*
* The Alpha CPU PMU description currently in operation. This is set during
* the boot process to the specific CPU of the machine.
*/
static const struct alpha_pmu_t *alpha_pmu;
#define HW_OP_UNSUPPORTED -1
/*
* The hardware description of the EV67, EV68, EV69, EV7 and EV79 PMUs
* follow. Since they are identical we refer to them collectively as the
* EV67 henceforth.
*/
/*
* EV67 PMC event types
*
* There is no one-to-one mapping of the possible hw event types to the
* actual codes that are used to program the PMCs hence we introduce our
* own hw event type identifiers.
*/
enum ev67_pmc_event_type {
EV67_CYCLES = 1,
EV67_INSTRUCTIONS,
EV67_BCACHEMISS,
EV67_MBOXREPLAY,
EV67_LAST_ET
};
#define EV67_NUM_EVENT_TYPES (EV67_LAST_ET-EV67_CYCLES)
/* Mapping of the hw event types to the perf tool interface */
static const int ev67_perfmon_event_map[] = {
[PERF_COUNT_HW_CPU_CYCLES] = EV67_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = EV67_INSTRUCTIONS,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = EV67_BCACHEMISS,
};
struct ev67_mapping_t {
int config;
int idx;
};
/*
* The mapping used for one event only - these must be in same order as enum
* ev67_pmc_event_type definition.
*/
static const struct ev67_mapping_t ev67_mapping[] = {
{EV67_PCTR_INSTR_CYCLES, 1}, /* EV67_CYCLES, */
{EV67_PCTR_INSTR_CYCLES, 0}, /* EV67_INSTRUCTIONS */
{EV67_PCTR_INSTR_BCACHEMISS, 1}, /* EV67_BCACHEMISS */
{EV67_PCTR_CYCLES_MBOX, 1} /* EV67_MBOXREPLAY */
};
/*
* Check that a group of events can be simultaneously scheduled on to the
* EV67 PMU. Also allocate counter indices and config.
*/
static int ev67_check_constraints(struct perf_event **event,
unsigned long *evtype, int n_ev)
{
int idx0;
unsigned long config;
idx0 = ev67_mapping[evtype[0]-1].idx;
config = ev67_mapping[evtype[0]-1].config;
if (n_ev == 1)
goto success;
BUG_ON(n_ev != 2);
if (evtype[0] == EV67_MBOXREPLAY || evtype[1] == EV67_MBOXREPLAY) {
/* MBOX replay traps must be on PMC 1 */
idx0 = (evtype[0] == EV67_MBOXREPLAY) ? 1 : 0;
/* Only cycles can accompany MBOX replay traps */
if (evtype[idx0] == EV67_CYCLES) {
config = EV67_PCTR_CYCLES_MBOX;
goto success;
}
}
if (evtype[0] == EV67_BCACHEMISS || evtype[1] == EV67_BCACHEMISS) {
/* Bcache misses must be on PMC 1 */
idx0 = (evtype[0] == EV67_BCACHEMISS) ? 1 : 0;
/* Only instructions can accompany Bcache misses */
if (evtype[idx0] == EV67_INSTRUCTIONS) {
config = EV67_PCTR_INSTR_BCACHEMISS;
goto success;
}
}
if (evtype[0] == EV67_INSTRUCTIONS || evtype[1] == EV67_INSTRUCTIONS) {
/* Instructions must be on PMC 0 */
idx0 = (evtype[0] == EV67_INSTRUCTIONS) ? 0 : 1;
/* By this point only cycles can accompany instructions */
if (evtype[idx0^1] == EV67_CYCLES) {
config = EV67_PCTR_INSTR_CYCLES;
goto success;
}
}
/* Otherwise, darn it, there is a conflict. */
return -1;
success:
event[0]->hw.idx = idx0;
event[0]->hw.config_base = config;
if (n_ev == 2) {
event[1]->hw.idx = idx0 ^ 1;
event[1]->hw.config_base = config;
}
return 0;
}
static const struct alpha_pmu_t ev67_pmu = {
.event_map = ev67_perfmon_event_map,
.max_events = ARRAY_SIZE(ev67_perfmon_event_map),
.num_pmcs = 2,
.pmc_count_shift = {EV67_PCTR_0_COUNT_SHIFT, EV67_PCTR_1_COUNT_SHIFT, 0},
.pmc_count_mask = {EV67_PCTR_0_COUNT_MASK, EV67_PCTR_1_COUNT_MASK, 0},
.pmc_max_period = {(1UL<<20) - 1, (1UL<<20) - 1, 0},
.pmc_left = {16, 4, 0},
.check_constraints = ev67_check_constraints
};
/*
* Helper routines to ensure that we read/write only the correct PMC bits
* when calling the wrperfmon PALcall.
*/
static inline void alpha_write_pmc(int idx, unsigned long val)
{
val &= alpha_pmu->pmc_count_mask[idx];
val <<= alpha_pmu->pmc_count_shift[idx];
val |= (1<<idx);
wrperfmon(PERFMON_CMD_WRITE, val);
}
static inline unsigned long alpha_read_pmc(int idx)
{
unsigned long val;
val = wrperfmon(PERFMON_CMD_READ, 0);
val >>= alpha_pmu->pmc_count_shift[idx];
val &= alpha_pmu->pmc_count_mask[idx];
return val;
}
/* Set a new period to sample over */
static int alpha_perf_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc, int idx)
{
long left = local64_read(&hwc->period_left);
long period = hwc->sample_period;
int ret = 0;
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
/*
* Hardware restrictions require that the counters must not be
* written with values that are too close to the maximum period.
*/
if (unlikely(left < alpha_pmu->pmc_left[idx]))
left = alpha_pmu->pmc_left[idx];
if (left > (long)alpha_pmu->pmc_max_period[idx])
left = alpha_pmu->pmc_max_period[idx];
local64_set(&hwc->prev_count, (unsigned long)(-left));
alpha_write_pmc(idx, (unsigned long)(-left));
perf_event_update_userpage(event);
return ret;
}
/*
* Calculates the count (the 'delta') since the last time the PMC was read.
*
* As the PMCs' full period can easily be exceeded within the perf system
* sampling period we cannot use any high order bits as a guard bit in the
* PMCs to detect overflow as is done by other architectures. The code here
* calculates the delta on the basis that there is no overflow when ovf is
* zero. The value passed via ovf by the interrupt handler corrects for
* overflow.
*
* This can be racey on rare occasions -- a call to this routine can occur
* with an overflowed counter just before the PMI service routine is called.
* The check for delta negative hopefully always rectifies this situation.
*/
static unsigned long alpha_perf_event_update(struct perf_event *event,
struct hw_perf_event *hwc, int idx, long ovf)
{
long prev_raw_count, new_raw_count;
long delta;
again:
prev_raw_count = local64_read(&hwc->prev_count);
new_raw_count = alpha_read_pmc(idx);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count - (prev_raw_count & alpha_pmu->pmc_count_mask[idx])) + ovf;
/* It is possible on very rare occasions that the PMC has overflowed
* but the interrupt is yet to come. Detect and fix this situation.
*/
if (unlikely(delta < 0)) {
delta += alpha_pmu->pmc_max_period[idx] + 1;
}
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return new_raw_count;
}
/*
* Collect all HW events into the array event[].
*/
static int collect_events(struct perf_event *group, int max_count,
struct perf_event *event[], unsigned long *evtype,
int *current_idx)
{
struct perf_event *pe;
int n = 0;
if (!is_software_event(group)) {
if (n >= max_count)
return -1;
event[n] = group;
evtype[n] = group->hw.event_base;
current_idx[n++] = PMC_NO_INDEX;
}
list_for_each_entry(pe, &group->sibling_list, group_entry) {
if (!is_software_event(pe) && pe->state != PERF_EVENT_STATE_OFF) {
if (n >= max_count)
return -1;
event[n] = pe;
evtype[n] = pe->hw.event_base;
current_idx[n++] = PMC_NO_INDEX;
}
}
return n;
}
/*
* Check that a group of events can be simultaneously scheduled on to the PMU.
*/
static int alpha_check_constraints(struct perf_event **events,
unsigned long *evtypes, int n_ev)
{
/* No HW events is possible from hw_perf_group_sched_in(). */
if (n_ev == 0)
return 0;
if (n_ev > alpha_pmu->num_pmcs)
return -1;
return alpha_pmu->check_constraints(events, evtypes, n_ev);
}
/*
* If new events have been scheduled then update cpuc with the new
* configuration. This may involve shifting cycle counts from one PMC to
* another.
*/
static void maybe_change_configuration(struct cpu_hw_events *cpuc)
{
int j;
if (cpuc->n_added == 0)
return;
/* Find counters that are moving to another PMC and update */
for (j = 0; j < cpuc->n_events; j++) {
struct perf_event *pe = cpuc->event[j];
if (cpuc->current_idx[j] != PMC_NO_INDEX &&
cpuc->current_idx[j] != pe->hw.idx) {
alpha_perf_event_update(pe, &pe->hw, cpuc->current_idx[j], 0);
cpuc->current_idx[j] = PMC_NO_INDEX;
}
}
/* Assign to counters all unassigned events. */
cpuc->idx_mask = 0;
for (j = 0; j < cpuc->n_events; j++) {
struct perf_event *pe = cpuc->event[j];
struct hw_perf_event *hwc = &pe->hw;
int idx = hwc->idx;
if (cpuc->current_idx[j] == PMC_NO_INDEX) {
alpha_perf_event_set_period(pe, hwc, idx);
cpuc->current_idx[j] = idx;
}
if (!(hwc->state & PERF_HES_STOPPED))
cpuc->idx_mask |= (1<<cpuc->current_idx[j]);
}
cpuc->config = cpuc->event[0]->hw.config_base;
}
/* Schedule perf HW event on to PMU.
* - this function is called from outside this module via the pmu struct
* returned from perf event initialisation.
*/
static int alpha_pmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int n0;
int ret;
unsigned long irq_flags;
/*
* The Sparc code has the IRQ disable first followed by the perf
* disable, however this can lead to an overflowed counter with the
* PMI disabled on rare occasions. The alpha_perf_event_update()
* routine should detect this situation by noting a negative delta,
* nevertheless we disable the PMCs first to enable a potential
* final PMI to occur before we disable interrupts.
*/
perf_pmu_disable(event->pmu);
local_irq_save(irq_flags);
/* Default to error to be returned */
ret = -EAGAIN;
/* Insert event on to PMU and if successful modify ret to valid return */
n0 = cpuc->n_events;
if (n0 < alpha_pmu->num_pmcs) {
cpuc->event[n0] = event;
cpuc->evtype[n0] = event->hw.event_base;
cpuc->current_idx[n0] = PMC_NO_INDEX;
if (!alpha_check_constraints(cpuc->event, cpuc->evtype, n0+1)) {
cpuc->n_events++;
cpuc->n_added++;
ret = 0;
}
}
hwc->state = PERF_HES_UPTODATE;
if (!(flags & PERF_EF_START))
hwc->state |= PERF_HES_STOPPED;
local_irq_restore(irq_flags);
perf_pmu_enable(event->pmu);
return ret;
}
/* Disable performance monitoring unit
* - this function is called from outside this module via the pmu struct
* returned from perf event initialisation.
*/
static void alpha_pmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
unsigned long irq_flags;
int j;
perf_pmu_disable(event->pmu);
local_irq_save(irq_flags);
for (j = 0; j < cpuc->n_events; j++) {
if (event == cpuc->event[j]) {
int idx = cpuc->current_idx[j];
/* Shift remaining entries down into the existing
* slot.
*/
while (++j < cpuc->n_events) {
cpuc->event[j - 1] = cpuc->event[j];
cpuc->evtype[j - 1] = cpuc->evtype[j];
cpuc->current_idx[j - 1] =
cpuc->current_idx[j];
}
/* Absorb the final count and turn off the event. */
alpha_perf_event_update(event, hwc, idx, 0);
perf_event_update_userpage(event);
cpuc->idx_mask &= ~(1UL<<idx);
cpuc->n_events--;
break;
}
}
local_irq_restore(irq_flags);
perf_pmu_enable(event->pmu);
}
static void alpha_pmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
alpha_perf_event_update(event, hwc, hwc->idx, 0);
}
static void alpha_pmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!(hwc->state & PERF_HES_STOPPED)) {
cpuc->idx_mask &= ~(1UL<<hwc->idx);
hwc->state |= PERF_HES_STOPPED;
}
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
alpha_perf_event_update(event, hwc, hwc->idx, 0);
hwc->state |= PERF_HES_UPTODATE;
}
if (cpuc->enabled)
wrperfmon(PERFMON_CMD_DISABLE, (1UL<<hwc->idx));
}
static void alpha_pmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
if (flags & PERF_EF_RELOAD) {
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
alpha_perf_event_set_period(event, hwc, hwc->idx);
}
hwc->state = 0;
cpuc->idx_mask |= 1UL<<hwc->idx;
if (cpuc->enabled)
wrperfmon(PERFMON_CMD_ENABLE, (1UL<<hwc->idx));
}
/*
* Check that CPU performance counters are supported.
* - currently support EV67 and later CPUs.
* - actually some later revisions of the EV6 have the same PMC model as the
* EV67 but we don't do suffiently deep CPU detection to detect them.
* Bad luck to the very few people who might have one, I guess.
*/
static int supported_cpu(void)
{
struct percpu_struct *cpu;
unsigned long cputype;
/* Get cpu type from HW */
cpu = (struct percpu_struct *)((char *)hwrpb + hwrpb->processor_offset);
cputype = cpu->type & 0xffffffff;
/* Include all of EV67, EV68, EV7, EV79 and EV69 as supported. */
return (cputype >= EV67_CPU) && (cputype <= EV69_CPU);
}
static void hw_perf_event_destroy(struct perf_event *event)
{
/* Nothing to be done! */
return;
}
static int __hw_perf_event_init(struct perf_event *event)
{
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
struct perf_event *evts[MAX_HWEVENTS];
unsigned long evtypes[MAX_HWEVENTS];
int idx_rubbish_bin[MAX_HWEVENTS];
int ev;
int n;
/* We only support a limited range of HARDWARE event types with one
* only programmable via a RAW event type.
*/
if (attr->type == PERF_TYPE_HARDWARE) {
if (attr->config >= alpha_pmu->max_events)
return -EINVAL;
ev = alpha_pmu->event_map[attr->config];
} else if (attr->type == PERF_TYPE_HW_CACHE) {
return -EOPNOTSUPP;
} else if (attr->type == PERF_TYPE_RAW) {
ev = attr->config & 0xff;
} else {
return -EOPNOTSUPP;
}
if (ev < 0) {
return ev;
}
/* The EV67 does not support mode exclusion */
if (attr->exclude_kernel || attr->exclude_user
|| attr->exclude_hv || attr->exclude_idle) {
return -EPERM;
}
/*
* We place the event type in event_base here and leave calculation
* of the codes to programme the PMU for alpha_pmu_enable() because
* it is only then we will know what HW events are actually
* scheduled on to the PMU. At that point the code to programme the
* PMU is put into config_base and the PMC to use is placed into
* idx. We initialise idx (below) to PMC_NO_INDEX to indicate that
* it is yet to be determined.
*/
hwc->event_base = ev;
/* Collect events in a group together suitable for calling
* alpha_check_constraints() to verify that the group as a whole can
* be scheduled on to the PMU.
*/
n = 0;
if (event->group_leader != event) {
n = collect_events(event->group_leader,
alpha_pmu->num_pmcs - 1,
evts, evtypes, idx_rubbish_bin);
if (n < 0)
return -EINVAL;
}
evtypes[n] = hwc->event_base;
evts[n] = event;
if (alpha_check_constraints(evts, evtypes, n + 1))
return -EINVAL;
/* Indicate that PMU config and idx are yet to be determined. */
hwc->config_base = 0;
hwc->idx = PMC_NO_INDEX;
event->destroy = hw_perf_event_destroy;
/*
* Most architectures reserve the PMU for their use at this point.
* As there is no existing mechanism to arbitrate usage and there
* appears to be no other user of the Alpha PMU we just assume
* that we can just use it, hence a NO-OP here.
*
* Maybe an alpha_reserve_pmu() routine should be implemented but is
* anything else ever going to use it?
*/
if (!hwc->sample_period) {
hwc->sample_period = alpha_pmu->pmc_max_period[0];
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
}
return 0;
}
/*
* Main entry point to initialise a HW performance event.
*/
static int alpha_pmu_event_init(struct perf_event *event)
{
int err;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT;
}
if (!alpha_pmu)
return -ENODEV;
/* Do the real initialisation work. */
err = __hw_perf_event_init(event);
return err;
}
/*
* Main entry point - enable HW performance counters.
*/
static void alpha_pmu_enable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (cpuc->enabled)
return;
cpuc->enabled = 1;
barrier();
if (cpuc->n_events > 0) {
/* Update cpuc with information from any new scheduled events. */
maybe_change_configuration(cpuc);
/* Start counting the desired events. */
wrperfmon(PERFMON_CMD_LOGGING_OPTIONS, EV67_PCTR_MODE_AGGREGATE);
wrperfmon(PERFMON_CMD_DESIRED_EVENTS, cpuc->config);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
}
}
/*
* Main entry point - disable HW performance counters.
*/
static void alpha_pmu_disable(struct pmu *pmu)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
if (!cpuc->enabled)
return;
cpuc->enabled = 0;
cpuc->n_added = 0;
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
}
static struct pmu pmu = {
.pmu_enable = alpha_pmu_enable,
.pmu_disable = alpha_pmu_disable,
.event_init = alpha_pmu_event_init,
.add = alpha_pmu_add,
.del = alpha_pmu_del,
.start = alpha_pmu_start,
.stop = alpha_pmu_stop,
.read = alpha_pmu_read,
};
/*
* Main entry point - don't know when this is called but it
* obviously dumps debug info.
*/
void perf_event_print_debug(void)
{
unsigned long flags;
unsigned long pcr;
int pcr0, pcr1;
int cpu;
if (!supported_cpu())
return;
local_irq_save(flags);
cpu = smp_processor_id();
pcr = wrperfmon(PERFMON_CMD_READ, 0);
pcr0 = (pcr >> alpha_pmu->pmc_count_shift[0]) & alpha_pmu->pmc_count_mask[0];
pcr1 = (pcr >> alpha_pmu->pmc_count_shift[1]) & alpha_pmu->pmc_count_mask[1];
pr_info("CPU#%d: PCTR0[%06x] PCTR1[%06x]\n", cpu, pcr0, pcr1);
local_irq_restore(flags);
}
/*
* Performance Monitoring Interrupt Service Routine called when a PMC
* overflows. The PMC that overflowed is passed in la_ptr.
*/
static void alpha_perf_event_irq_handler(unsigned long la_ptr,
struct pt_regs *regs)
{
struct cpu_hw_events *cpuc;
struct perf_sample_data data;
struct perf_event *event;
struct hw_perf_event *hwc;
int idx, j;
__get_cpu_var(irq_pmi_count)++;
cpuc = &__get_cpu_var(cpu_hw_events);
/* Completely counting through the PMC's period to trigger a new PMC
* overflow interrupt while in this interrupt routine is utterly
* disastrous! The EV6 and EV67 counters are sufficiently large to
* prevent this but to be really sure disable the PMCs.
*/
wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
/* la_ptr is the counter that overflowed. */
if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: silly index %ld\n", la_ptr);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
idx = la_ptr;
perf_sample_data_init(&data, 0);
for (j = 0; j < cpuc->n_events; j++) {
if (cpuc->current_idx[j] == idx)
break;
}
if (unlikely(j == cpuc->n_events)) {
/* This can occur if the event is disabled right on a PMC overflow. */
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
event = cpuc->event[j];
if (unlikely(!event)) {
/* This should never occur! */
irq_err_count++;
pr_warning("PMI: No event at index %d!\n", idx);
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
hwc = &event->hw;
alpha_perf_event_update(event, hwc, idx, alpha_pmu->pmc_max_period[idx]+1);
data.period = event->hw.last_period;
if (alpha_perf_event_set_period(event, hwc, idx)) {
if (perf_event_overflow(event, &data, regs)) {
/* Interrupts coming too quickly; "throttle" the
* counter, i.e., disable it for a little while.
*/
alpha_pmu_stop(event, 0);
}
}
wrperfmon(PERFMON_CMD_ENABLE, cpuc->idx_mask);
return;
}
/*
* Init call to initialise performance events at kernel startup.
*/
int __init init_hw_perf_events(void)
{
pr_info("Performance events: ");
if (!supported_cpu()) {
pr_cont("No support for your CPU.\n");
return 0;
}
pr_cont("Supported CPU type!\n");
/* Override performance counter IRQ vector */
perf_irq = alpha_perf_event_irq_handler;
/* And set up PMU specification */
alpha_pmu = &ev67_pmu;
perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
return 0;
}
early_initcall(init_hw_perf_events);
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3486_0 |
crossvul-cpp_data_bad_5852_0 | /*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/in.h>
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/if_arp.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "rds.h"
#include "ib.h"
static unsigned int fmr_pool_size = RDS_FMR_POOL_SIZE;
unsigned int fmr_message_size = RDS_FMR_SIZE + 1; /* +1 allows for unaligned MRs */
unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
module_param(fmr_pool_size, int, 0444);
MODULE_PARM_DESC(fmr_pool_size, " Max number of fmr per HCA");
module_param(fmr_message_size, int, 0444);
MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer");
module_param(rds_ib_retry_count, int, 0444);
MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
/*
* we have a clumsy combination of RCU and a rwsem protecting this list
* because it is used both in the get_mr fast path and while blocking in
* the FMR flushing path.
*/
DECLARE_RWSEM(rds_ib_devices_lock);
struct list_head rds_ib_devices;
/* NOTE: if also grabbing ibdev lock, grab this first */
DEFINE_SPINLOCK(ib_nodev_conns_lock);
LIST_HEAD(ib_nodev_conns);
static void rds_ib_nodev_connect(void)
{
struct rds_ib_connection *ic;
spin_lock(&ib_nodev_conns_lock);
list_for_each_entry(ic, &ib_nodev_conns, ib_node)
rds_conn_connect_if_down(ic->conn);
spin_unlock(&ib_nodev_conns_lock);
}
static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
{
struct rds_ib_connection *ic;
unsigned long flags;
spin_lock_irqsave(&rds_ibdev->spinlock, flags);
list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
rds_conn_drop(ic->conn);
spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
}
/*
* rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references
* from interrupt context so we push freing off into a work struct in krdsd.
*/
static void rds_ib_dev_free(struct work_struct *work)
{
struct rds_ib_ipaddr *i_ipaddr, *i_next;
struct rds_ib_device *rds_ibdev = container_of(work,
struct rds_ib_device, free_work);
if (rds_ibdev->mr_pool)
rds_ib_destroy_mr_pool(rds_ibdev->mr_pool);
if (rds_ibdev->mr)
ib_dereg_mr(rds_ibdev->mr);
if (rds_ibdev->pd)
ib_dealloc_pd(rds_ibdev->pd);
list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
list_del(&i_ipaddr->list);
kfree(i_ipaddr);
}
kfree(rds_ibdev);
}
void rds_ib_dev_put(struct rds_ib_device *rds_ibdev)
{
BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0);
if (atomic_dec_and_test(&rds_ibdev->refcount))
queue_work(rds_wq, &rds_ibdev->free_work);
}
static void rds_ib_add_one(struct ib_device *device)
{
struct rds_ib_device *rds_ibdev;
struct ib_device_attr *dev_attr;
/* Only handle IB (no iWARP) devices */
if (device->node_type != RDMA_NODE_IB_CA)
return;
dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
if (!dev_attr)
return;
if (ib_query_device(device, dev_attr)) {
rdsdebug("Query device failed for %s\n", device->name);
goto free_attr;
}
rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL,
ibdev_to_node(device));
if (!rds_ibdev)
goto free_attr;
spin_lock_init(&rds_ibdev->spinlock);
atomic_set(&rds_ibdev->refcount, 1);
INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
rds_ibdev->max_wrs = dev_attr->max_qp_wr;
rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE);
rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32;
rds_ibdev->max_fmrs = dev_attr->max_fmr ?
min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) :
fmr_pool_size;
rds_ibdev->max_initiator_depth = dev_attr->max_qp_init_rd_atom;
rds_ibdev->max_responder_resources = dev_attr->max_qp_rd_atom;
rds_ibdev->dev = device;
rds_ibdev->pd = ib_alloc_pd(device);
if (IS_ERR(rds_ibdev->pd)) {
rds_ibdev->pd = NULL;
goto put_dev;
}
rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(rds_ibdev->mr)) {
rds_ibdev->mr = NULL;
goto put_dev;
}
rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev);
if (IS_ERR(rds_ibdev->mr_pool)) {
rds_ibdev->mr_pool = NULL;
goto put_dev;
}
INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
INIT_LIST_HEAD(&rds_ibdev->conn_list);
down_write(&rds_ib_devices_lock);
list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
up_write(&rds_ib_devices_lock);
atomic_inc(&rds_ibdev->refcount);
ib_set_client_data(device, &rds_ib_client, rds_ibdev);
atomic_inc(&rds_ibdev->refcount);
rds_ib_nodev_connect();
put_dev:
rds_ib_dev_put(rds_ibdev);
free_attr:
kfree(dev_attr);
}
/*
* New connections use this to find the device to associate with the
* connection. It's not in the fast path so we're not concerned about the
* performance of the IB call. (As of this writing, it uses an interrupt
* blocking spinlock to serialize walking a per-device list of all registered
* clients.)
*
* RCU is used to handle incoming connections racing with device teardown.
* Rather than use a lock to serialize removal from the client_data and
* getting a new reference, we use an RCU grace period. The destruction
* path removes the device from client_data and then waits for all RCU
* readers to finish.
*
* A new connection can get NULL from this if its arriving on a
* device that is in the process of being removed.
*/
struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device)
{
struct rds_ib_device *rds_ibdev;
rcu_read_lock();
rds_ibdev = ib_get_client_data(device, &rds_ib_client);
if (rds_ibdev)
atomic_inc(&rds_ibdev->refcount);
rcu_read_unlock();
return rds_ibdev;
}
/*
* The IB stack is letting us know that a device is going away. This can
* happen if the underlying HCA driver is removed or if PCI hotplug is removing
* the pci function, for example.
*
* This can be called at any time and can be racing with any other RDS path.
*/
static void rds_ib_remove_one(struct ib_device *device)
{
struct rds_ib_device *rds_ibdev;
rds_ibdev = ib_get_client_data(device, &rds_ib_client);
if (!rds_ibdev)
return;
rds_ib_dev_shutdown(rds_ibdev);
/* stop connection attempts from getting a reference to this device. */
ib_set_client_data(device, &rds_ib_client, NULL);
down_write(&rds_ib_devices_lock);
list_del_rcu(&rds_ibdev->list);
up_write(&rds_ib_devices_lock);
/*
* This synchronize rcu is waiting for readers of both the ib
* client data and the devices list to finish before we drop
* both of those references.
*/
synchronize_rcu();
rds_ib_dev_put(rds_ibdev);
rds_ib_dev_put(rds_ibdev);
}
struct ib_client rds_ib_client = {
.name = "rds_ib",
.add = rds_ib_add_one,
.remove = rds_ib_remove_one
};
static int rds_ib_conn_info_visitor(struct rds_connection *conn,
void *buffer)
{
struct rds_info_rdma_connection *iinfo = buffer;
struct rds_ib_connection *ic;
/* We will only ever look at IB transports */
if (conn->c_trans != &rds_ib_transport)
return 0;
iinfo->src_addr = conn->c_laddr;
iinfo->dst_addr = conn->c_faddr;
memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
if (rds_conn_state(conn) == RDS_CONN_UP) {
struct rds_ib_device *rds_ibdev;
struct rdma_dev_addr *dev_addr;
ic = conn->c_transport_data;
dev_addr = &ic->i_cm_id->route.addr.dev_addr;
rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
rds_ibdev = ic->rds_ibdev;
iinfo->max_send_wr = ic->i_send_ring.w_nr;
iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
iinfo->max_send_sge = rds_ibdev->max_sge;
rds_ib_get_mr_info(rds_ibdev, iinfo);
}
return 1;
}
static void rds_ib_ic_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
rds_for_each_conn_info(sock, len, iter, lens,
rds_ib_conn_info_visitor,
sizeof(struct rds_info_rdma_connection));
}
/*
* Early RDS/IB was built to only bind to an address if there is an IPoIB
* device with that address set.
*
* If it were me, I'd advocate for something more flexible. Sending and
* receiving should be device-agnostic. Transports would try and maintain
* connections between peers who have messages queued. Userspace would be
* allowed to influence which paths have priority. We could call userspace
* asserting this policy "routing".
*/
static int rds_ib_laddr_check(__be32 addr)
{
int ret;
struct rdma_cm_id *cm_id;
struct sockaddr_in sin;
/* Create a CMA ID and try to bind it. This catches both
* IB and iWARP capable NICs.
*/
cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = addr;
/* rdma_bind_addr will only succeed for IB & iWARP devices */
ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
/* due to this, we will claim to support iWARP devices unless we
check node_type. */
if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)
ret = -EADDRNOTAVAIL;
rdsdebug("addr %pI4 ret %d node type %d\n",
&addr, ret,
cm_id->device ? cm_id->device->node_type : -1);
rdma_destroy_id(cm_id);
return ret;
}
static void rds_ib_unregister_client(void)
{
ib_unregister_client(&rds_ib_client);
/* wait for rds_ib_dev_free() to complete */
flush_workqueue(rds_wq);
}
void rds_ib_exit(void)
{
rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
rds_ib_unregister_client();
rds_ib_destroy_nodev_conns();
rds_ib_sysctl_exit();
rds_ib_recv_exit();
rds_trans_unregister(&rds_ib_transport);
}
struct rds_transport rds_ib_transport = {
.laddr_check = rds_ib_laddr_check,
.xmit_complete = rds_ib_xmit_complete,
.xmit = rds_ib_xmit,
.xmit_rdma = rds_ib_xmit_rdma,
.xmit_atomic = rds_ib_xmit_atomic,
.recv = rds_ib_recv,
.conn_alloc = rds_ib_conn_alloc,
.conn_free = rds_ib_conn_free,
.conn_connect = rds_ib_conn_connect,
.conn_shutdown = rds_ib_conn_shutdown,
.inc_copy_to_user = rds_ib_inc_copy_to_user,
.inc_free = rds_ib_inc_free,
.cm_initiate_connect = rds_ib_cm_initiate_connect,
.cm_handle_connect = rds_ib_cm_handle_connect,
.cm_connect_complete = rds_ib_cm_connect_complete,
.stats_info_copy = rds_ib_stats_info_copy,
.exit = rds_ib_exit,
.get_mr = rds_ib_get_mr,
.sync_mr = rds_ib_sync_mr,
.free_mr = rds_ib_free_mr,
.flush_mrs = rds_ib_flush_mrs,
.t_owner = THIS_MODULE,
.t_name = "infiniband",
.t_type = RDS_TRANS_IB
};
int rds_ib_init(void)
{
int ret;
INIT_LIST_HEAD(&rds_ib_devices);
ret = ib_register_client(&rds_ib_client);
if (ret)
goto out;
ret = rds_ib_sysctl_init();
if (ret)
goto out_ibreg;
ret = rds_ib_recv_init();
if (ret)
goto out_sysctl;
ret = rds_trans_register(&rds_ib_transport);
if (ret)
goto out_recv;
rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
goto out;
out_recv:
rds_ib_recv_exit();
out_sysctl:
rds_ib_sysctl_exit();
out_ibreg:
rds_ib_unregister_client();
out:
return ret;
}
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_5852_0 |
crossvul-cpp_data_good_3719_0 | /*
* tsacct.c - System accounting over taskstats interface
*
* Copyright (C) Jay Lan, <jlan@sgi.com>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/tsacct_kern.h>
#include <linux/acct.h>
#include <linux/jiffies.h>
#define USEC_PER_TICK (USEC_PER_SEC/HZ)
/*
* fill in basic accounting fields
*/
void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
{
struct timespec uptime, ts;
s64 ac_etime;
BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN);
/* calculate task elapsed time in timespec */
do_posix_clock_monotonic_gettime(&uptime);
ts = timespec_sub(uptime, tsk->start_time);
/* rebase elapsed time to usec */
ac_etime = timespec_to_ns(&ts);
do_div(ac_etime, NSEC_PER_USEC);
stats->ac_etime = ac_etime;
stats->ac_btime = xtime.tv_sec - ts.tv_sec;
if (thread_group_leader(tsk)) {
stats->ac_exitcode = tsk->exit_code;
if (tsk->flags & PF_FORKNOEXEC)
stats->ac_flag |= AFORK;
}
if (tsk->flags & PF_SUPERPRIV)
stats->ac_flag |= ASU;
if (tsk->flags & PF_DUMPCORE)
stats->ac_flag |= ACORE;
if (tsk->flags & PF_SIGNALED)
stats->ac_flag |= AXSIG;
stats->ac_nice = task_nice(tsk);
stats->ac_sched = tsk->policy;
stats->ac_uid = tsk->uid;
stats->ac_gid = tsk->gid;
stats->ac_pid = tsk->pid;
rcu_read_lock();
stats->ac_ppid = pid_alive(tsk) ?
rcu_dereference(tsk->real_parent)->tgid : 0;
rcu_read_unlock();
stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC;
stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC;
stats->ac_minflt = tsk->min_flt;
stats->ac_majflt = tsk->maj_flt;
strncpy(stats->ac_comm, tsk->comm, sizeof(stats->ac_comm));
}
#ifdef CONFIG_TASK_XACCT
#define KB 1024
#define MB (1024*KB)
/*
* fill in extended accounting fields
*/
void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
{
struct mm_struct *mm;
/* convert pages-jiffies to Mbyte-usec */
stats->coremem = jiffies_to_usecs(p->acct_rss_mem1) * PAGE_SIZE / MB;
stats->virtmem = jiffies_to_usecs(p->acct_vm_mem1) * PAGE_SIZE / MB;
mm = get_task_mm(p);
if (mm) {
/* adjust to KB unit */
stats->hiwater_rss = mm->hiwater_rss * PAGE_SIZE / KB;
stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB;
mmput(mm);
}
stats->read_char = p->rchar;
stats->write_char = p->wchar;
stats->read_syscalls = p->syscr;
stats->write_syscalls = p->syscw;
}
#undef KB
#undef MB
/**
* acct_update_integrals - update mm integral fields in task_struct
* @tsk: task_struct for accounting
*/
void acct_update_integrals(struct task_struct *tsk)
{
if (likely(tsk->mm)) {
long delta = cputime_to_jiffies(
cputime_sub(tsk->stime, tsk->acct_stimexpd));
if (delta == 0)
return;
tsk->acct_stimexpd = tsk->stime;
tsk->acct_rss_mem1 += delta * get_mm_rss(tsk->mm);
tsk->acct_vm_mem1 += delta * tsk->mm->total_vm;
}
}
/**
* acct_clear_integrals - clear the mm integral fields in task_struct
* @tsk: task_struct whose accounting fields are cleared
*/
void acct_clear_integrals(struct task_struct *tsk)
{
tsk->acct_stimexpd = 0;
tsk->acct_rss_mem1 = 0;
tsk->acct_vm_mem1 = 0;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3719_0 |
crossvul-cpp_data_bad_3486_3 | /*
* ARMv5 [xscale] Performance counter handling code.
*
* Copyright (C) 2010, ARM Ltd., Will Deacon <will.deacon@arm.com>
*
* Based on the previous xscale OProfile code.
*
* There are two variants of the xscale PMU that we support:
* - xscale1pmu: 2 event counters and a cycle counter
* - xscale2pmu: 4 event counters and a cycle counter
* The two variants share event definitions, but have different
* PMU structures.
*/
#ifdef CONFIG_CPU_XSCALE
enum xscale_perf_types {
XSCALE_PERFCTR_ICACHE_MISS = 0x00,
XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
XSCALE_PERFCTR_DATA_STALL = 0x02,
XSCALE_PERFCTR_ITLB_MISS = 0x03,
XSCALE_PERFCTR_DTLB_MISS = 0x04,
XSCALE_PERFCTR_BRANCH = 0x05,
XSCALE_PERFCTR_BRANCH_MISS = 0x06,
XSCALE_PERFCTR_INSTRUCTION = 0x07,
XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
XSCALE_PERFCTR_PC_CHANGED = 0x0D,
XSCALE_PERFCTR_BCU_REQUEST = 0x10,
XSCALE_PERFCTR_BCU_FULL = 0x11,
XSCALE_PERFCTR_BCU_DRAIN = 0x12,
XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
XSCALE_PERFCTR_RMW = 0x16,
/* XSCALE_PERFCTR_CCNT is not hardware defined */
XSCALE_PERFCTR_CCNT = 0xFE,
XSCALE_PERFCTR_UNUSED = 0xFF,
};
enum xscale_counters {
XSCALE_CYCLE_COUNTER = 1,
XSCALE_COUNTER0,
XSCALE_COUNTER1,
XSCALE_COUNTER2,
XSCALE_COUNTER3,
};
static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
[PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
[PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
};
static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
},
},
};
#define XSCALE_PMU_ENABLE 0x001
#define XSCALE_PMN_RESET 0x002
#define XSCALE_CCNT_RESET 0x004
#define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
#define XSCALE_PMU_CNT64 0x008
#define XSCALE1_OVERFLOWED_MASK 0x700
#define XSCALE1_CCOUNT_OVERFLOW 0x400
#define XSCALE1_COUNT0_OVERFLOW 0x100
#define XSCALE1_COUNT1_OVERFLOW 0x200
#define XSCALE1_CCOUNT_INT_EN 0x040
#define XSCALE1_COUNT0_INT_EN 0x010
#define XSCALE1_COUNT1_INT_EN 0x020
#define XSCALE1_COUNT0_EVT_SHFT 12
#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
#define XSCALE1_COUNT1_EVT_SHFT 20
#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
static inline u32
xscale1pmu_read_pmnc(void)
{
u32 val;
asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
return val;
}
static inline void
xscale1pmu_write_pmnc(u32 val)
{
/* upper 4bits and 7, 11 are write-as-0 */
val &= 0xffff77f;
asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
}
static inline int
xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
enum xscale_counters counter)
{
int ret = 0;
switch (counter) {
case XSCALE_CYCLE_COUNTER:
ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
break;
case XSCALE_COUNTER0:
ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
break;
case XSCALE_COUNTER1:
ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
}
return ret;
}
static irqreturn_t
xscale1pmu_handle_irq(int irq_num, void *dev)
{
unsigned long pmnc;
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
/*
* NOTE: there's an A stepping erratum that states if an overflow
* bit already exists and another occurs, the previous
* Overflow bit gets cleared. There's no workaround.
* Fixed in B stepping or later.
*/
pmnc = xscale1pmu_read_pmnc();
/*
* Write the value back to clear the overflow flags. Overflow
* flags remain in pmnc for use below. We also disable the PMU
* while we process the interrupt.
*/
xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
return IRQ_NONE;
regs = get_irq_regs();
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, 0, &data, regs))
armpmu->disable(hwc, idx);
}
irq_work_run();
/*
* Re-enable the PMU.
*/
pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(pmnc);
return IRQ_HANDLED;
}
static void
xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long val, mask, evt, flags;
switch (idx) {
case XSCALE_CYCLE_COUNTER:
mask = 0;
evt = XSCALE1_CCOUNT_INT_EN;
break;
case XSCALE_COUNTER0:
mask = XSCALE1_COUNT0_EVT_MASK;
evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
XSCALE1_COUNT0_INT_EN;
break;
case XSCALE_COUNTER1:
mask = XSCALE1_COUNT1_EVT_MASK;
evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
XSCALE1_COUNT1_INT_EN;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long val, mask, evt, flags;
switch (idx) {
case XSCALE_CYCLE_COUNTER:
mask = XSCALE1_CCOUNT_INT_EN;
evt = 0;
break;
case XSCALE_COUNTER0:
mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
break;
case XSCALE_COUNTER1:
mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int
xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
struct hw_perf_event *event)
{
if (XSCALE_PERFCTR_CCNT == event->config_base) {
if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
return -EAGAIN;
return XSCALE_CYCLE_COUNTER;
} else {
if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask))
return XSCALE_COUNTER1;
if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask))
return XSCALE_COUNTER0;
return -EAGAIN;
}
}
static void
xscale1pmu_start(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val |= XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
xscale1pmu_stop(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static inline u32
xscale1pmu_read_counter(int counter)
{
u32 val = 0;
switch (counter) {
case XSCALE_CYCLE_COUNTER:
asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
break;
case XSCALE_COUNTER0:
asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
break;
case XSCALE_COUNTER1:
asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
break;
}
return val;
}
static inline void
xscale1pmu_write_counter(int counter, u32 val)
{
switch (counter) {
case XSCALE_CYCLE_COUNTER:
asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
break;
case XSCALE_COUNTER0:
asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
break;
case XSCALE_COUNTER1:
asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
break;
}
}
static const struct arm_pmu xscale1pmu = {
.id = ARM_PERF_PMU_ID_XSCALE1,
.name = "xscale1",
.handle_irq = xscale1pmu_handle_irq,
.enable = xscale1pmu_enable_event,
.disable = xscale1pmu_disable_event,
.read_counter = xscale1pmu_read_counter,
.write_counter = xscale1pmu_write_counter,
.get_event_idx = xscale1pmu_get_event_idx,
.start = xscale1pmu_start,
.stop = xscale1pmu_stop,
.cache_map = &xscale_perf_cache_map,
.event_map = &xscale_perf_map,
.raw_event_mask = 0xFF,
.num_events = 3,
.max_period = (1LLU << 32) - 1,
};
static const struct arm_pmu *__init xscale1pmu_init(void)
{
return &xscale1pmu;
}
#define XSCALE2_OVERFLOWED_MASK 0x01f
#define XSCALE2_CCOUNT_OVERFLOW 0x001
#define XSCALE2_COUNT0_OVERFLOW 0x002
#define XSCALE2_COUNT1_OVERFLOW 0x004
#define XSCALE2_COUNT2_OVERFLOW 0x008
#define XSCALE2_COUNT3_OVERFLOW 0x010
#define XSCALE2_CCOUNT_INT_EN 0x001
#define XSCALE2_COUNT0_INT_EN 0x002
#define XSCALE2_COUNT1_INT_EN 0x004
#define XSCALE2_COUNT2_INT_EN 0x008
#define XSCALE2_COUNT3_INT_EN 0x010
#define XSCALE2_COUNT0_EVT_SHFT 0
#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
#define XSCALE2_COUNT1_EVT_SHFT 8
#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
#define XSCALE2_COUNT2_EVT_SHFT 16
#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
#define XSCALE2_COUNT3_EVT_SHFT 24
#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
static inline u32
xscale2pmu_read_pmnc(void)
{
u32 val;
asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
/* bits 1-2 and 4-23 are read-unpredictable */
return val & 0xff000009;
}
static inline void
xscale2pmu_write_pmnc(u32 val)
{
/* bits 4-23 are write-as-0, 24-31 are write ignored */
val &= 0xf;
asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
}
static inline u32
xscale2pmu_read_overflow_flags(void)
{
u32 val;
asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
return val;
}
static inline void
xscale2pmu_write_overflow_flags(u32 val)
{
asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
}
static inline u32
xscale2pmu_read_event_select(void)
{
u32 val;
asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
return val;
}
static inline void
xscale2pmu_write_event_select(u32 val)
{
asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
}
static inline u32
xscale2pmu_read_int_enable(void)
{
u32 val;
asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
return val;
}
static void
xscale2pmu_write_int_enable(u32 val)
{
asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
}
static inline int
xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
enum xscale_counters counter)
{
int ret = 0;
switch (counter) {
case XSCALE_CYCLE_COUNTER:
ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
break;
case XSCALE_COUNTER0:
ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
break;
case XSCALE_COUNTER1:
ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
break;
case XSCALE_COUNTER2:
ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
break;
case XSCALE_COUNTER3:
ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", counter);
}
return ret;
}
static irqreturn_t
xscale2pmu_handle_irq(int irq_num, void *dev)
{
unsigned long pmnc, of_flags;
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
struct pt_regs *regs;
int idx;
/* Disable the PMU. */
pmnc = xscale2pmu_read_pmnc();
xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
/* Check the overflow flag register. */
of_flags = xscale2pmu_read_overflow_flags();
if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
return IRQ_NONE;
/* Clear the overflow bits. */
xscale2pmu_write_overflow_flags(of_flags);
regs = get_irq_regs();
perf_sample_data_init(&data, 0);
cpuc = &__get_cpu_var(cpu_hw_events);
for (idx = 0; idx <= armpmu->num_events; ++idx) {
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc;
if (!test_bit(idx, cpuc->active_mask))
continue;
if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
continue;
hwc = &event->hw;
armpmu_event_update(event, hwc, idx, 1);
data.period = event->hw.last_period;
if (!armpmu_event_set_period(event, hwc, idx))
continue;
if (perf_event_overflow(event, 0, &data, regs))
armpmu->disable(hwc, idx);
}
irq_work_run();
/*
* Re-enable the PMU.
*/
pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(pmnc);
return IRQ_HANDLED;
}
static void
xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags, ien, evtsel;
ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select();
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien |= XSCALE2_CCOUNT_INT_EN;
break;
case XSCALE_COUNTER0:
ien |= XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
break;
case XSCALE_COUNTER1:
ien |= XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
break;
case XSCALE_COUNTER2:
ien |= XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
break;
case XSCALE_COUNTER3:
ien |= XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
raw_spin_lock_irqsave(&pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
{
unsigned long flags, ien, evtsel;
ien = xscale2pmu_read_int_enable();
evtsel = xscale2pmu_read_event_select();
switch (idx) {
case XSCALE_CYCLE_COUNTER:
ien &= ~XSCALE2_CCOUNT_INT_EN;
break;
case XSCALE_COUNTER0:
ien &= ~XSCALE2_COUNT0_INT_EN;
evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
break;
case XSCALE_COUNTER1:
ien &= ~XSCALE2_COUNT1_INT_EN;
evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
break;
case XSCALE_COUNTER2:
ien &= ~XSCALE2_COUNT2_INT_EN;
evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
break;
case XSCALE_COUNTER3:
ien &= ~XSCALE2_COUNT3_INT_EN;
evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
break;
default:
WARN_ONCE(1, "invalid counter number (%d)\n", idx);
return;
}
raw_spin_lock_irqsave(&pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int
xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
struct hw_perf_event *event)
{
int idx = xscale1pmu_get_event_idx(cpuc, event);
if (idx >= 0)
goto out;
if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
idx = XSCALE_COUNTER3;
else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
idx = XSCALE_COUNTER2;
out:
return idx;
}
static void
xscale2pmu_start(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
val |= XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
xscale2pmu_stop(void)
{
unsigned long flags, val;
raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale2pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static inline u32
xscale2pmu_read_counter(int counter)
{
u32 val = 0;
switch (counter) {
case XSCALE_CYCLE_COUNTER:
asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
break;
case XSCALE_COUNTER0:
asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
break;
case XSCALE_COUNTER1:
asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
break;
case XSCALE_COUNTER2:
asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
break;
case XSCALE_COUNTER3:
asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
break;
}
return val;
}
static inline void
xscale2pmu_write_counter(int counter, u32 val)
{
switch (counter) {
case XSCALE_CYCLE_COUNTER:
asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
break;
case XSCALE_COUNTER0:
asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
break;
case XSCALE_COUNTER1:
asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
break;
case XSCALE_COUNTER2:
asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
break;
case XSCALE_COUNTER3:
asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
break;
}
}
static const struct arm_pmu xscale2pmu = {
.id = ARM_PERF_PMU_ID_XSCALE2,
.name = "xscale2",
.handle_irq = xscale2pmu_handle_irq,
.enable = xscale2pmu_enable_event,
.disable = xscale2pmu_disable_event,
.read_counter = xscale2pmu_read_counter,
.write_counter = xscale2pmu_write_counter,
.get_event_idx = xscale2pmu_get_event_idx,
.start = xscale2pmu_start,
.stop = xscale2pmu_stop,
.cache_map = &xscale_perf_cache_map,
.event_map = &xscale_perf_map,
.raw_event_mask = 0xFF,
.num_events = 5,
.max_period = (1LLU << 32) - 1,
};
static const struct arm_pmu *__init xscale2pmu_init(void)
{
return &xscale2pmu;
}
#else
static const struct arm_pmu *__init xscale1pmu_init(void)
{
return NULL;
}
static const struct arm_pmu *__init xscale2pmu_init(void)
{
return NULL;
}
#endif /* CONFIG_CPU_XSCALE */
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3486_3 |
crossvul-cpp_data_bad_2285_0 | /*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* Copyright (c) 2013 Red Hat, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
#include "xfs_buf_item.h"
/*
* xfs_da_btree.c
*
* Routines to implement directories as Btrees of hashed names.
*/
/*========================================================================
* Function prototypes for the kernel.
*========================================================================*/
/*
* Routines used for growing the Btree.
*/
STATIC int xfs_da3_root_split(xfs_da_state_t *state,
xfs_da_state_blk_t *existing_root,
xfs_da_state_blk_t *new_child);
STATIC int xfs_da3_node_split(xfs_da_state_t *state,
xfs_da_state_blk_t *existing_blk,
xfs_da_state_blk_t *split_blk,
xfs_da_state_blk_t *blk_to_add,
int treelevel,
int *result);
STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
xfs_da_state_blk_t *node_blk_1,
xfs_da_state_blk_t *node_blk_2);
STATIC void xfs_da3_node_add(xfs_da_state_t *state,
xfs_da_state_blk_t *old_node_blk,
xfs_da_state_blk_t *new_node_blk);
/*
* Routines used for shrinking the Btree.
*/
STATIC int xfs_da3_root_join(xfs_da_state_t *state,
xfs_da_state_blk_t *root_blk);
STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
xfs_da_state_blk_t *drop_blk);
STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
xfs_da_state_blk_t *src_node_blk,
xfs_da_state_blk_t *dst_node_blk);
/*
* Utility routines.
*/
STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
xfs_da_state_blk_t *drop_blk,
xfs_da_state_blk_t *save_blk);
kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
/*
* Allocate a dir-state structure.
* We don't put them on the stack since they're large.
*/
xfs_da_state_t *
xfs_da_state_alloc(void)
{
return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
}
/*
* Kill the altpath contents of a da-state structure.
*/
STATIC void
xfs_da_state_kill_altpath(xfs_da_state_t *state)
{
int i;
for (i = 0; i < state->altpath.active; i++)
state->altpath.blk[i].bp = NULL;
state->altpath.active = 0;
}
/*
* Free a da-state structure.
*/
void
xfs_da_state_free(xfs_da_state_t *state)
{
xfs_da_state_kill_altpath(state);
#ifdef DEBUG
memset((char *)state, 0, sizeof(*state));
#endif /* DEBUG */
kmem_zone_free(xfs_da_state_zone, state);
}
static bool
xfs_da3_node_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_da_intnode *hdr = bp->b_addr;
struct xfs_da3_icnode_hdr ichdr;
const struct xfs_dir_ops *ops;
ops = xfs_dir_get_ops(mp, NULL);
ops->node_hdr_from_disk(&ichdr, hdr);
if (xfs_sb_version_hascrc(&mp->m_sb)) {
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
if (ichdr.magic != XFS_DA3_NODE_MAGIC)
return false;
if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_uuid))
return false;
if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
return false;
} else {
if (ichdr.magic != XFS_DA_NODE_MAGIC)
return false;
}
if (ichdr.level == 0)
return false;
if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
return false;
if (ichdr.count == 0)
return false;
/*
* we don't know if the node is for and attribute or directory tree,
* so only fail if the count is outside both bounds
*/
if (ichdr.count > mp->m_dir_node_ents &&
ichdr.count > mp->m_attr_node_ents)
return false;
/* XXX: hash order check? */
return true;
}
static void
xfs_da3_node_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_buf_log_item *bip = bp->b_fspriv;
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
if (!xfs_da3_node_verify(bp)) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
return;
}
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
if (bip)
hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), XFS_DA3_NODE_CRC_OFF);
}
/*
* leaf/node format detection on trees is sketchy, so a node read can be done on
* leaf level blocks when detection identifies the tree as a node format tree
* incorrectly. In this case, we need to swap the verifier to match the correct
* format of the block being read.
*/
static void
xfs_da3_node_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_da_blkinfo *info = bp->b_addr;
switch (be16_to_cpu(info->magic)) {
case XFS_DA3_NODE_MAGIC:
if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
XFS_DA3_NODE_CRC_OFF))
break;
/* fall through */
case XFS_DA_NODE_MAGIC:
if (!xfs_da3_node_verify(bp))
break;
return;
case XFS_ATTR_LEAF_MAGIC:
case XFS_ATTR3_LEAF_MAGIC:
bp->b_ops = &xfs_attr3_leaf_buf_ops;
bp->b_ops->verify_read(bp);
return;
case XFS_DIR2_LEAFN_MAGIC:
case XFS_DIR3_LEAFN_MAGIC:
bp->b_ops = &xfs_dir3_leafn_buf_ops;
bp->b_ops->verify_read(bp);
return;
default:
break;
}
/* corrupt block */
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
const struct xfs_buf_ops xfs_da3_node_buf_ops = {
.verify_read = xfs_da3_node_read_verify,
.verify_write = xfs_da3_node_write_verify,
};
int
xfs_da3_node_read(
struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t bno,
xfs_daddr_t mappedbno,
struct xfs_buf **bpp,
int which_fork)
{
int err;
err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
which_fork, &xfs_da3_node_buf_ops);
if (!err && tp) {
struct xfs_da_blkinfo *info = (*bpp)->b_addr;
int type;
switch (be16_to_cpu(info->magic)) {
case XFS_DA_NODE_MAGIC:
case XFS_DA3_NODE_MAGIC:
type = XFS_BLFT_DA_NODE_BUF;
break;
case XFS_ATTR_LEAF_MAGIC:
case XFS_ATTR3_LEAF_MAGIC:
type = XFS_BLFT_ATTR_LEAF_BUF;
break;
case XFS_DIR2_LEAFN_MAGIC:
case XFS_DIR3_LEAFN_MAGIC:
type = XFS_BLFT_DIR_LEAFN_BUF;
break;
default:
type = 0;
ASSERT(0);
break;
}
xfs_trans_buf_set_type(tp, *bpp, type);
}
return err;
}
/*========================================================================
* Routines used for growing the Btree.
*========================================================================*/
/*
* Create the initial contents of an intermediate node.
*/
int
xfs_da3_node_create(
struct xfs_da_args *args,
xfs_dablk_t blkno,
int level,
struct xfs_buf **bpp,
int whichfork)
{
struct xfs_da_intnode *node;
struct xfs_trans *tp = args->trans;
struct xfs_mount *mp = tp->t_mountp;
struct xfs_da3_icnode_hdr ichdr = {0};
struct xfs_buf *bp;
int error;
struct xfs_inode *dp = args->dp;
trace_xfs_da_node_create(args);
ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
if (error)
return(error);
bp->b_ops = &xfs_da3_node_buf_ops;
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
node = bp->b_addr;
if (xfs_sb_version_hascrc(&mp->m_sb)) {
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
ichdr.magic = XFS_DA3_NODE_MAGIC;
hdr3->info.blkno = cpu_to_be64(bp->b_bn);
hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_uuid);
} else {
ichdr.magic = XFS_DA_NODE_MAGIC;
}
ichdr.level = level;
dp->d_ops->node_hdr_to_disk(node, &ichdr);
xfs_trans_log_buf(tp, bp,
XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
*bpp = bp;
return(0);
}
/*
* Split a leaf node, rebalance, then possibly split
* intermediate nodes, rebalance, etc.
*/
int /* error */
xfs_da3_split(
struct xfs_da_state *state)
{
struct xfs_da_state_blk *oldblk;
struct xfs_da_state_blk *newblk;
struct xfs_da_state_blk *addblk;
struct xfs_da_intnode *node;
struct xfs_buf *bp;
int max;
int action = 0;
int error;
int i;
trace_xfs_da_split(state->args);
/*
* Walk back up the tree splitting/inserting/adjusting as necessary.
* If we need to insert and there isn't room, split the node, then
* decide which fragment to insert the new block from below into.
* Note that we may split the root this way, but we need more fixup.
*/
max = state->path.active - 1;
ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
addblk = &state->path.blk[max]; /* initial dummy value */
for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
oldblk = &state->path.blk[i];
newblk = &state->altpath.blk[i];
/*
* If a leaf node then
* Allocate a new leaf node, then rebalance across them.
* else if an intermediate node then
* We split on the last layer, must we split the node?
*/
switch (oldblk->magic) {
case XFS_ATTR_LEAF_MAGIC:
error = xfs_attr3_leaf_split(state, oldblk, newblk);
if ((error != 0) && (error != ENOSPC)) {
return(error); /* GROT: attr is inconsistent */
}
if (!error) {
addblk = newblk;
break;
}
/*
* Entry wouldn't fit, split the leaf again.
*/
state->extravalid = 1;
if (state->inleaf) {
state->extraafter = 0; /* before newblk */
trace_xfs_attr_leaf_split_before(state->args);
error = xfs_attr3_leaf_split(state, oldblk,
&state->extrablk);
} else {
state->extraafter = 1; /* after newblk */
trace_xfs_attr_leaf_split_after(state->args);
error = xfs_attr3_leaf_split(state, newblk,
&state->extrablk);
}
if (error)
return(error); /* GROT: attr inconsistent */
addblk = newblk;
break;
case XFS_DIR2_LEAFN_MAGIC:
error = xfs_dir2_leafn_split(state, oldblk, newblk);
if (error)
return error;
addblk = newblk;
break;
case XFS_DA_NODE_MAGIC:
error = xfs_da3_node_split(state, oldblk, newblk, addblk,
max - i, &action);
addblk->bp = NULL;
if (error)
return(error); /* GROT: dir is inconsistent */
/*
* Record the newly split block for the next time thru?
*/
if (action)
addblk = newblk;
else
addblk = NULL;
break;
}
/*
* Update the btree to show the new hashval for this child.
*/
xfs_da3_fixhashpath(state, &state->path);
}
if (!addblk)
return(0);
/*
* Split the root node.
*/
ASSERT(state->path.active == 0);
oldblk = &state->path.blk[0];
error = xfs_da3_root_split(state, oldblk, addblk);
if (error) {
addblk->bp = NULL;
return(error); /* GROT: dir is inconsistent */
}
/*
* Update pointers to the node which used to be block 0 and
* just got bumped because of the addition of a new root node.
* There might be three blocks involved if a double split occurred,
* and the original block 0 could be at any position in the list.
*
* Note: the magic numbers and sibling pointers are in the same
* physical place for both v2 and v3 headers (by design). Hence it
* doesn't matter which version of the xfs_da_intnode structure we use
* here as the result will be the same using either structure.
*/
node = oldblk->bp->b_addr;
if (node->hdr.info.forw) {
if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
bp = addblk->bp;
} else {
ASSERT(state->extravalid);
bp = state->extrablk.bp;
}
node = bp->b_addr;
node->hdr.info.back = cpu_to_be32(oldblk->blkno);
xfs_trans_log_buf(state->args->trans, bp,
XFS_DA_LOGRANGE(node, &node->hdr.info,
sizeof(node->hdr.info)));
}
node = oldblk->bp->b_addr;
if (node->hdr.info.back) {
if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
bp = addblk->bp;
} else {
ASSERT(state->extravalid);
bp = state->extrablk.bp;
}
node = bp->b_addr;
node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
xfs_trans_log_buf(state->args->trans, bp,
XFS_DA_LOGRANGE(node, &node->hdr.info,
sizeof(node->hdr.info)));
}
addblk->bp = NULL;
return(0);
}
/*
* Split the root. We have to create a new root and point to the two
* parts (the split old root) that we just created. Copy block zero to
* the EOF, extending the inode in process.
*/
STATIC int /* error */
xfs_da3_root_split(
struct xfs_da_state *state,
struct xfs_da_state_blk *blk1,
struct xfs_da_state_blk *blk2)
{
struct xfs_da_intnode *node;
struct xfs_da_intnode *oldroot;
struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
struct xfs_da_args *args;
struct xfs_buf *bp;
struct xfs_inode *dp;
struct xfs_trans *tp;
struct xfs_mount *mp;
struct xfs_dir2_leaf *leaf;
xfs_dablk_t blkno;
int level;
int error;
int size;
trace_xfs_da_root_split(state->args);
/*
* Copy the existing (incorrect) block from the root node position
* to a free space somewhere.
*/
args = state->args;
error = xfs_da_grow_inode(args, &blkno);
if (error)
return error;
dp = args->dp;
tp = args->trans;
mp = state->mp;
error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
if (error)
return error;
node = bp->b_addr;
oldroot = blk1->bp->b_addr;
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
struct xfs_da3_icnode_hdr nodehdr;
dp->d_ops->node_hdr_from_disk(&nodehdr, oldroot);
btree = dp->d_ops->node_tree_p(oldroot);
size = (int)((char *)&btree[nodehdr.count] - (char *)oldroot);
level = nodehdr.level;
/*
* we are about to copy oldroot to bp, so set up the type
* of bp while we know exactly what it will be.
*/
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
} else {
struct xfs_dir3_icleaf_hdr leafhdr;
struct xfs_dir2_leaf_entry *ents;
leaf = (xfs_dir2_leaf_t *)oldroot;
dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
ents = dp->d_ops->leaf_ents_p(leaf);
ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
level = 0;
/*
* we are about to copy oldroot to bp, so set up the type
* of bp while we know exactly what it will be.
*/
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
}
/*
* we can copy most of the information in the node from one block to
* another, but for CRC enabled headers we have to make sure that the
* block specific identifiers are kept intact. We update the buffer
* directly for this.
*/
memcpy(node, oldroot, size);
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
}
xfs_trans_log_buf(tp, bp, 0, size - 1);
bp->b_ops = blk1->bp->b_ops;
xfs_trans_buf_copy_type(bp, blk1->bp);
blk1->bp = bp;
blk1->blkno = blkno;
/*
* Set up the new root node.
*/
error = xfs_da3_node_create(args,
(args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
level + 1, &bp, args->whichfork);
if (error)
return error;
node = bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
btree = dp->d_ops->node_tree_p(node);
btree[0].hashval = cpu_to_be32(blk1->hashval);
btree[0].before = cpu_to_be32(blk1->blkno);
btree[1].hashval = cpu_to_be32(blk2->hashval);
btree[1].before = cpu_to_be32(blk2->blkno);
nodehdr.count = 2;
dp->d_ops->node_hdr_to_disk(node, &nodehdr);
#ifdef DEBUG
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
ASSERT(blk1->blkno >= mp->m_dirleafblk &&
blk1->blkno < mp->m_dirfreeblk);
ASSERT(blk2->blkno >= mp->m_dirleafblk &&
blk2->blkno < mp->m_dirfreeblk);
}
#endif
/* Header is already logged by xfs_da_node_create */
xfs_trans_log_buf(tp, bp,
XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
return 0;
}
/*
* Split the node, rebalance, then add the new entry.
*/
STATIC int /* error */
xfs_da3_node_split(
struct xfs_da_state *state,
struct xfs_da_state_blk *oldblk,
struct xfs_da_state_blk *newblk,
struct xfs_da_state_blk *addblk,
int treelevel,
int *result)
{
struct xfs_da_intnode *node;
struct xfs_da3_icnode_hdr nodehdr;
xfs_dablk_t blkno;
int newcount;
int error;
int useextra;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_split(state->args);
node = oldblk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
/*
* With V2 dirs the extra block is data or freespace.
*/
useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
newcount = 1 + useextra;
/*
* Do we have to split the node?
*/
if (nodehdr.count + newcount > state->node_ents) {
/*
* Allocate a new node, add to the doubly linked chain of
* nodes, then move some of our excess entries into it.
*/
error = xfs_da_grow_inode(state->args, &blkno);
if (error)
return(error); /* GROT: dir is inconsistent */
error = xfs_da3_node_create(state->args, blkno, treelevel,
&newblk->bp, state->args->whichfork);
if (error)
return(error); /* GROT: dir is inconsistent */
newblk->blkno = blkno;
newblk->magic = XFS_DA_NODE_MAGIC;
xfs_da3_node_rebalance(state, oldblk, newblk);
error = xfs_da3_blk_link(state, oldblk, newblk);
if (error)
return(error);
*result = 1;
} else {
*result = 0;
}
/*
* Insert the new entry(s) into the correct block
* (updating last hashval in the process).
*
* xfs_da3_node_add() inserts BEFORE the given index,
* and as a result of using node_lookup_int() we always
* point to a valid entry (not after one), but a split
* operation always results in a new block whose hashvals
* FOLLOW the current block.
*
* If we had double-split op below us, then add the extra block too.
*/
node = oldblk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
if (oldblk->index <= nodehdr.count) {
oldblk->index++;
xfs_da3_node_add(state, oldblk, addblk);
if (useextra) {
if (state->extraafter)
oldblk->index++;
xfs_da3_node_add(state, oldblk, &state->extrablk);
state->extravalid = 0;
}
} else {
newblk->index++;
xfs_da3_node_add(state, newblk, addblk);
if (useextra) {
if (state->extraafter)
newblk->index++;
xfs_da3_node_add(state, newblk, &state->extrablk);
state->extravalid = 0;
}
}
return(0);
}
/*
* Balance the btree elements between two intermediate nodes,
* usually one full and one empty.
*
* NOTE: if blk2 is empty, then it will get the upper half of blk1.
*/
STATIC void
xfs_da3_node_rebalance(
struct xfs_da_state *state,
struct xfs_da_state_blk *blk1,
struct xfs_da_state_blk *blk2)
{
struct xfs_da_intnode *node1;
struct xfs_da_intnode *node2;
struct xfs_da_intnode *tmpnode;
struct xfs_da_node_entry *btree1;
struct xfs_da_node_entry *btree2;
struct xfs_da_node_entry *btree_s;
struct xfs_da_node_entry *btree_d;
struct xfs_da3_icnode_hdr nodehdr1;
struct xfs_da3_icnode_hdr nodehdr2;
struct xfs_trans *tp;
int count;
int tmp;
int swap = 0;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_rebalance(state->args);
node1 = blk1->bp->b_addr;
node2 = blk2->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
btree1 = dp->d_ops->node_tree_p(node1);
btree2 = dp->d_ops->node_tree_p(node2);
/*
* Figure out how many entries need to move, and in which direction.
* Swap the nodes around if that makes it simpler.
*/
if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
(be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
tmpnode = node1;
node1 = node2;
node2 = tmpnode;
dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
btree1 = dp->d_ops->node_tree_p(node1);
btree2 = dp->d_ops->node_tree_p(node2);
swap = 1;
}
count = (nodehdr1.count - nodehdr2.count) / 2;
if (count == 0)
return;
tp = state->args->trans;
/*
* Two cases: high-to-low and low-to-high.
*/
if (count > 0) {
/*
* Move elements in node2 up to make a hole.
*/
tmp = nodehdr2.count;
if (tmp > 0) {
tmp *= (uint)sizeof(xfs_da_node_entry_t);
btree_s = &btree2[0];
btree_d = &btree2[count];
memmove(btree_d, btree_s, tmp);
}
/*
* Move the req'd B-tree elements from high in node1 to
* low in node2.
*/
nodehdr2.count += count;
tmp = count * (uint)sizeof(xfs_da_node_entry_t);
btree_s = &btree1[nodehdr1.count - count];
btree_d = &btree2[0];
memcpy(btree_d, btree_s, tmp);
nodehdr1.count -= count;
} else {
/*
* Move the req'd B-tree elements from low in node2 to
* high in node1.
*/
count = -count;
tmp = count * (uint)sizeof(xfs_da_node_entry_t);
btree_s = &btree2[0];
btree_d = &btree1[nodehdr1.count];
memcpy(btree_d, btree_s, tmp);
nodehdr1.count += count;
xfs_trans_log_buf(tp, blk1->bp,
XFS_DA_LOGRANGE(node1, btree_d, tmp));
/*
* Move elements in node2 down to fill the hole.
*/
tmp = nodehdr2.count - count;
tmp *= (uint)sizeof(xfs_da_node_entry_t);
btree_s = &btree2[count];
btree_d = &btree2[0];
memmove(btree_d, btree_s, tmp);
nodehdr2.count -= count;
}
/*
* Log header of node 1 and all current bits of node 2.
*/
dp->d_ops->node_hdr_to_disk(node1, &nodehdr1);
xfs_trans_log_buf(tp, blk1->bp,
XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size));
dp->d_ops->node_hdr_to_disk(node2, &nodehdr2);
xfs_trans_log_buf(tp, blk2->bp,
XFS_DA_LOGRANGE(node2, &node2->hdr,
dp->d_ops->node_hdr_size +
(sizeof(btree2[0]) * nodehdr2.count)));
/*
* Record the last hashval from each block for upward propagation.
* (note: don't use the swapped node pointers)
*/
if (swap) {
node1 = blk1->bp->b_addr;
node2 = blk2->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
btree1 = dp->d_ops->node_tree_p(node1);
btree2 = dp->d_ops->node_tree_p(node2);
}
blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
/*
* Adjust the expected index for insertion.
*/
if (blk1->index >= nodehdr1.count) {
blk2->index = blk1->index - nodehdr1.count;
blk1->index = nodehdr1.count + 1; /* make it invalid */
}
}
/*
* Add a new entry to an intermediate node.
*/
STATIC void
xfs_da3_node_add(
struct xfs_da_state *state,
struct xfs_da_state_blk *oldblk,
struct xfs_da_state_blk *newblk)
{
struct xfs_da_intnode *node;
struct xfs_da3_icnode_hdr nodehdr;
struct xfs_da_node_entry *btree;
int tmp;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_add(state->args);
node = oldblk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
btree = dp->d_ops->node_tree_p(node);
ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
ASSERT(newblk->blkno != 0);
if (state->args->whichfork == XFS_DATA_FORK)
ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
newblk->blkno < state->mp->m_dirfreeblk);
/*
* We may need to make some room before we insert the new node.
*/
tmp = 0;
if (oldblk->index < nodehdr.count) {
tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
}
btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
xfs_trans_log_buf(state->args->trans, oldblk->bp,
XFS_DA_LOGRANGE(node, &btree[oldblk->index],
tmp + sizeof(*btree)));
nodehdr.count += 1;
dp->d_ops->node_hdr_to_disk(node, &nodehdr);
xfs_trans_log_buf(state->args->trans, oldblk->bp,
XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
/*
* Copy the last hash value from the oldblk to propagate upwards.
*/
oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
}
/*========================================================================
* Routines used for shrinking the Btree.
*========================================================================*/
/*
* Deallocate an empty leaf node, remove it from its parent,
* possibly deallocating that block, etc...
*/
int
xfs_da3_join(
struct xfs_da_state *state)
{
struct xfs_da_state_blk *drop_blk;
struct xfs_da_state_blk *save_blk;
int action = 0;
int error;
trace_xfs_da_join(state->args);
drop_blk = &state->path.blk[ state->path.active-1 ];
save_blk = &state->altpath.blk[ state->path.active-1 ];
ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
/*
* Walk back up the tree joining/deallocating as necessary.
* When we stop dropping blocks, break out.
*/
for ( ; state->path.active >= 2; drop_blk--, save_blk--,
state->path.active--) {
/*
* See if we can combine the block with a neighbor.
* (action == 0) => no options, just leave
* (action == 1) => coalesce, then unlink
* (action == 2) => block empty, unlink it
*/
switch (drop_blk->magic) {
case XFS_ATTR_LEAF_MAGIC:
error = xfs_attr3_leaf_toosmall(state, &action);
if (error)
return(error);
if (action == 0)
return(0);
xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
break;
case XFS_DIR2_LEAFN_MAGIC:
error = xfs_dir2_leafn_toosmall(state, &action);
if (error)
return error;
if (action == 0)
return 0;
xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
break;
case XFS_DA_NODE_MAGIC:
/*
* Remove the offending node, fixup hashvals,
* check for a toosmall neighbor.
*/
xfs_da3_node_remove(state, drop_blk);
xfs_da3_fixhashpath(state, &state->path);
error = xfs_da3_node_toosmall(state, &action);
if (error)
return(error);
if (action == 0)
return 0;
xfs_da3_node_unbalance(state, drop_blk, save_blk);
break;
}
xfs_da3_fixhashpath(state, &state->altpath);
error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
xfs_da_state_kill_altpath(state);
if (error)
return(error);
error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
drop_blk->bp);
drop_blk->bp = NULL;
if (error)
return(error);
}
/*
* We joined all the way to the top. If it turns out that
* we only have one entry in the root, make the child block
* the new root.
*/
xfs_da3_node_remove(state, drop_blk);
xfs_da3_fixhashpath(state, &state->path);
error = xfs_da3_root_join(state, &state->path.blk[0]);
return(error);
}
#ifdef DEBUG
static void
xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
{
__be16 magic = blkinfo->magic;
if (level == 1) {
ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
} else {
ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
}
ASSERT(!blkinfo->forw);
ASSERT(!blkinfo->back);
}
#else /* !DEBUG */
#define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
#endif /* !DEBUG */
/*
* We have only one entry in the root. Copy the only remaining child of
* the old root to block 0 as the new root node.
*/
STATIC int
xfs_da3_root_join(
struct xfs_da_state *state,
struct xfs_da_state_blk *root_blk)
{
struct xfs_da_intnode *oldroot;
struct xfs_da_args *args;
xfs_dablk_t child;
struct xfs_buf *bp;
struct xfs_da3_icnode_hdr oldroothdr;
struct xfs_da_node_entry *btree;
int error;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_root_join(state->args);
ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
args = state->args;
oldroot = root_blk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot);
ASSERT(oldroothdr.forw == 0);
ASSERT(oldroothdr.back == 0);
/*
* If the root has more than one child, then don't do anything.
*/
if (oldroothdr.count > 1)
return 0;
/*
* Read in the (only) child block, then copy those bytes into
* the root block's buffer and free the original child block.
*/
btree = dp->d_ops->node_tree_p(oldroot);
child = be32_to_cpu(btree[0].before);
ASSERT(child != 0);
error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
args->whichfork);
if (error)
return error;
xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
/*
* This could be copying a leaf back into the root block in the case of
* there only being a single leaf block left in the tree. Hence we have
* to update the b_ops pointer as well to match the buffer type change
* that could occur. For dir3 blocks we also need to update the block
* number in the buffer header.
*/
memcpy(root_blk->bp->b_addr, bp->b_addr, state->blocksize);
root_blk->bp->b_ops = bp->b_ops;
xfs_trans_buf_copy_type(root_blk->bp, bp);
if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
}
xfs_trans_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
error = xfs_da_shrink_inode(args, child, bp);
return(error);
}
/*
* Check a node block and its neighbors to see if the block should be
* collapsed into one or the other neighbor. Always keep the block
* with the smaller block number.
* If the current block is over 50% full, don't try to join it, return 0.
* If the block is empty, fill in the state structure and return 2.
* If it can be collapsed, fill in the state structure and return 1.
* If nothing can be done, return 0.
*/
STATIC int
xfs_da3_node_toosmall(
struct xfs_da_state *state,
int *action)
{
struct xfs_da_intnode *node;
struct xfs_da_state_blk *blk;
struct xfs_da_blkinfo *info;
xfs_dablk_t blkno;
struct xfs_buf *bp;
struct xfs_da3_icnode_hdr nodehdr;
int count;
int forward;
int error;
int retval;
int i;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_toosmall(state->args);
/*
* Check for the degenerate case of the block being over 50% full.
* If so, it's not worth even looking to see if we might be able
* to coalesce with a sibling.
*/
blk = &state->path.blk[ state->path.active-1 ];
info = blk->bp->b_addr;
node = (xfs_da_intnode_t *)info;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
if (nodehdr.count > (state->node_ents >> 1)) {
*action = 0; /* blk over 50%, don't try to join */
return(0); /* blk over 50%, don't try to join */
}
/*
* Check for the degenerate case of the block being empty.
* If the block is empty, we'll simply delete it, no need to
* coalesce it with a sibling block. We choose (arbitrarily)
* to merge with the forward block unless it is NULL.
*/
if (nodehdr.count == 0) {
/*
* Make altpath point to the block we want to keep and
* path point to the block we want to drop (this one).
*/
forward = (info->forw != 0);
memcpy(&state->altpath, &state->path, sizeof(state->path));
error = xfs_da3_path_shift(state, &state->altpath, forward,
0, &retval);
if (error)
return(error);
if (retval) {
*action = 0;
} else {
*action = 2;
}
return(0);
}
/*
* Examine each sibling block to see if we can coalesce with
* at least 25% free space to spare. We need to figure out
* whether to merge with the forward or the backward block.
* We prefer coalescing with the lower numbered sibling so as
* to shrink a directory over time.
*/
count = state->node_ents;
count -= state->node_ents >> 2;
count -= nodehdr.count;
/* start with smaller blk num */
forward = nodehdr.forw < nodehdr.back;
for (i = 0; i < 2; forward = !forward, i++) {
struct xfs_da3_icnode_hdr thdr;
if (forward)
blkno = nodehdr.forw;
else
blkno = nodehdr.back;
if (blkno == 0)
continue;
error = xfs_da3_node_read(state->args->trans, dp,
blkno, -1, &bp, state->args->whichfork);
if (error)
return(error);
node = bp->b_addr;
dp->d_ops->node_hdr_from_disk(&thdr, node);
xfs_trans_brelse(state->args->trans, bp);
if (count - thdr.count >= 0)
break; /* fits with at least 25% to spare */
}
if (i >= 2) {
*action = 0;
return 0;
}
/*
* Make altpath point to the block we want to keep (the lower
* numbered block) and path point to the block we want to drop.
*/
memcpy(&state->altpath, &state->path, sizeof(state->path));
if (blkno < blk->blkno) {
error = xfs_da3_path_shift(state, &state->altpath, forward,
0, &retval);
} else {
error = xfs_da3_path_shift(state, &state->path, forward,
0, &retval);
}
if (error)
return error;
if (retval) {
*action = 0;
return 0;
}
*action = 1;
return 0;
}
/*
* Pick up the last hashvalue from an intermediate node.
*/
STATIC uint
xfs_da3_node_lasthash(
struct xfs_inode *dp,
struct xfs_buf *bp,
int *count)
{
struct xfs_da_intnode *node;
struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
node = bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
if (count)
*count = nodehdr.count;
if (!nodehdr.count)
return 0;
btree = dp->d_ops->node_tree_p(node);
return be32_to_cpu(btree[nodehdr.count - 1].hashval);
}
/*
* Walk back up the tree adjusting hash values as necessary,
* when we stop making changes, return.
*/
void
xfs_da3_fixhashpath(
struct xfs_da_state *state,
struct xfs_da_state_path *path)
{
struct xfs_da_state_blk *blk;
struct xfs_da_intnode *node;
struct xfs_da_node_entry *btree;
xfs_dahash_t lasthash=0;
int level;
int count;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_fixhashpath(state->args);
level = path->active-1;
blk = &path->blk[ level ];
switch (blk->magic) {
case XFS_ATTR_LEAF_MAGIC:
lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
if (count == 0)
return;
break;
case XFS_DIR2_LEAFN_MAGIC:
lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count);
if (count == 0)
return;
break;
case XFS_DA_NODE_MAGIC:
lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
if (count == 0)
return;
break;
}
for (blk--, level--; level >= 0; blk--, level--) {
struct xfs_da3_icnode_hdr nodehdr;
node = blk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
btree = dp->d_ops->node_tree_p(node);
if (be32_to_cpu(btree->hashval) == lasthash)
break;
blk->hashval = lasthash;
btree[blk->index].hashval = cpu_to_be32(lasthash);
xfs_trans_log_buf(state->args->trans, blk->bp,
XFS_DA_LOGRANGE(node, &btree[blk->index],
sizeof(*btree)));
lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
}
}
/*
* Remove an entry from an intermediate node.
*/
STATIC void
xfs_da3_node_remove(
struct xfs_da_state *state,
struct xfs_da_state_blk *drop_blk)
{
struct xfs_da_intnode *node;
struct xfs_da3_icnode_hdr nodehdr;
struct xfs_da_node_entry *btree;
int index;
int tmp;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_remove(state->args);
node = drop_blk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
ASSERT(drop_blk->index < nodehdr.count);
ASSERT(drop_blk->index >= 0);
/*
* Copy over the offending entry, or just zero it out.
*/
index = drop_blk->index;
btree = dp->d_ops->node_tree_p(node);
if (index < nodehdr.count - 1) {
tmp = nodehdr.count - index - 1;
tmp *= (uint)sizeof(xfs_da_node_entry_t);
memmove(&btree[index], &btree[index + 1], tmp);
xfs_trans_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, &btree[index], tmp));
index = nodehdr.count - 1;
}
memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
xfs_trans_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
nodehdr.count -= 1;
dp->d_ops->node_hdr_to_disk(node, &nodehdr);
xfs_trans_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
/*
* Copy the last hash value from the block to propagate upwards.
*/
drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
}
/*
* Unbalance the elements between two intermediate nodes,
* move all Btree elements from one node into another.
*/
STATIC void
xfs_da3_node_unbalance(
struct xfs_da_state *state,
struct xfs_da_state_blk *drop_blk,
struct xfs_da_state_blk *save_blk)
{
struct xfs_da_intnode *drop_node;
struct xfs_da_intnode *save_node;
struct xfs_da_node_entry *drop_btree;
struct xfs_da_node_entry *save_btree;
struct xfs_da3_icnode_hdr drop_hdr;
struct xfs_da3_icnode_hdr save_hdr;
struct xfs_trans *tp;
int sindex;
int tmp;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_unbalance(state->args);
drop_node = drop_blk->bp->b_addr;
save_node = save_blk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node);
dp->d_ops->node_hdr_from_disk(&save_hdr, save_node);
drop_btree = dp->d_ops->node_tree_p(drop_node);
save_btree = dp->d_ops->node_tree_p(save_node);
tp = state->args->trans;
/*
* If the dying block has lower hashvals, then move all the
* elements in the remaining block up to make a hole.
*/
if ((be32_to_cpu(drop_btree[0].hashval) <
be32_to_cpu(save_btree[0].hashval)) ||
(be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
/* XXX: check this - is memmove dst correct? */
tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
sindex = 0;
xfs_trans_log_buf(tp, save_blk->bp,
XFS_DA_LOGRANGE(save_node, &save_btree[0],
(save_hdr.count + drop_hdr.count) *
sizeof(xfs_da_node_entry_t)));
} else {
sindex = save_hdr.count;
xfs_trans_log_buf(tp, save_blk->bp,
XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
drop_hdr.count * sizeof(xfs_da_node_entry_t)));
}
/*
* Move all the B-tree elements from drop_blk to save_blk.
*/
tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
memcpy(&save_btree[sindex], &drop_btree[0], tmp);
save_hdr.count += drop_hdr.count;
dp->d_ops->node_hdr_to_disk(save_node, &save_hdr);
xfs_trans_log_buf(tp, save_blk->bp,
XFS_DA_LOGRANGE(save_node, &save_node->hdr,
dp->d_ops->node_hdr_size));
/*
* Save the last hashval in the remaining block for upward propagation.
*/
save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
}
/*========================================================================
* Routines used for finding things in the Btree.
*========================================================================*/
/*
* Walk down the Btree looking for a particular filename, filling
* in the state structure as we go.
*
* We will set the state structure to point to each of the elements
* in each of the nodes where either the hashval is or should be.
*
* We support duplicate hashval's so for each entry in the current
* node that could contain the desired hashval, descend. This is a
* pruned depth-first tree search.
*/
int /* error */
xfs_da3_node_lookup_int(
struct xfs_da_state *state,
int *result)
{
struct xfs_da_state_blk *blk;
struct xfs_da_blkinfo *curr;
struct xfs_da_intnode *node;
struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
struct xfs_da_args *args;
xfs_dablk_t blkno;
xfs_dahash_t hashval;
xfs_dahash_t btreehashval;
int probe;
int span;
int max;
int error;
int retval;
struct xfs_inode *dp = state->args->dp;
args = state->args;
/*
* Descend thru the B-tree searching each level for the right
* node to use, until the right hashval is found.
*/
blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
for (blk = &state->path.blk[0], state->path.active = 1;
state->path.active <= XFS_DA_NODE_MAXDEPTH;
blk++, state->path.active++) {
/*
* Read the next node down in the tree.
*/
blk->blkno = blkno;
error = xfs_da3_node_read(args->trans, args->dp, blkno,
-1, &blk->bp, args->whichfork);
if (error) {
blk->blkno = 0;
state->path.active--;
return(error);
}
curr = blk->bp->b_addr;
blk->magic = be16_to_cpu(curr->magic);
if (blk->magic == XFS_ATTR_LEAF_MAGIC ||
blk->magic == XFS_ATTR3_LEAF_MAGIC) {
blk->magic = XFS_ATTR_LEAF_MAGIC;
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
break;
}
if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
blk->magic == XFS_DIR3_LEAFN_MAGIC) {
blk->magic = XFS_DIR2_LEAFN_MAGIC;
blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
blk->bp, NULL);
break;
}
blk->magic = XFS_DA_NODE_MAGIC;
/*
* Search an intermediate node for a match.
*/
node = blk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
btree = dp->d_ops->node_tree_p(node);
max = nodehdr.count;
blk->hashval = be32_to_cpu(btree[max - 1].hashval);
/*
* Binary search. (note: small blocks will skip loop)
*/
probe = span = max / 2;
hashval = args->hashval;
while (span > 4) {
span /= 2;
btreehashval = be32_to_cpu(btree[probe].hashval);
if (btreehashval < hashval)
probe += span;
else if (btreehashval > hashval)
probe -= span;
else
break;
}
ASSERT((probe >= 0) && (probe < max));
ASSERT((span <= 4) ||
(be32_to_cpu(btree[probe].hashval) == hashval));
/*
* Since we may have duplicate hashval's, find the first
* matching hashval in the node.
*/
while (probe > 0 &&
be32_to_cpu(btree[probe].hashval) >= hashval) {
probe--;
}
while (probe < max &&
be32_to_cpu(btree[probe].hashval) < hashval) {
probe++;
}
/*
* Pick the right block to descend on.
*/
if (probe == max) {
blk->index = max - 1;
blkno = be32_to_cpu(btree[max - 1].before);
} else {
blk->index = probe;
blkno = be32_to_cpu(btree[probe].before);
}
}
/*
* A leaf block that ends in the hashval that we are interested in
* (final hashval == search hashval) means that the next block may
* contain more entries with the same hashval, shift upward to the
* next leaf and keep searching.
*/
for (;;) {
if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
&blk->index, state);
} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
blk->index = args->index;
args->blkno = blk->blkno;
} else {
ASSERT(0);
return XFS_ERROR(EFSCORRUPTED);
}
if (((retval == ENOENT) || (retval == ENOATTR)) &&
(blk->hashval == args->hashval)) {
error = xfs_da3_path_shift(state, &state->path, 1, 1,
&retval);
if (error)
return(error);
if (retval == 0) {
continue;
} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
/* path_shift() gives ENOENT */
retval = XFS_ERROR(ENOATTR);
}
}
break;
}
*result = retval;
return(0);
}
/*========================================================================
* Utility routines.
*========================================================================*/
/*
* Compare two intermediate nodes for "order".
*/
STATIC int
xfs_da3_node_order(
struct xfs_inode *dp,
struct xfs_buf *node1_bp,
struct xfs_buf *node2_bp)
{
struct xfs_da_intnode *node1;
struct xfs_da_intnode *node2;
struct xfs_da_node_entry *btree1;
struct xfs_da_node_entry *btree2;
struct xfs_da3_icnode_hdr node1hdr;
struct xfs_da3_icnode_hdr node2hdr;
node1 = node1_bp->b_addr;
node2 = node2_bp->b_addr;
dp->d_ops->node_hdr_from_disk(&node1hdr, node1);
dp->d_ops->node_hdr_from_disk(&node2hdr, node2);
btree1 = dp->d_ops->node_tree_p(node1);
btree2 = dp->d_ops->node_tree_p(node2);
if (node1hdr.count > 0 && node2hdr.count > 0 &&
((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
(be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
return 1;
}
return 0;
}
/*
* Link a new block into a doubly linked list of blocks (of whatever type).
*/
int /* error */
xfs_da3_blk_link(
struct xfs_da_state *state,
struct xfs_da_state_blk *old_blk,
struct xfs_da_state_blk *new_blk)
{
struct xfs_da_blkinfo *old_info;
struct xfs_da_blkinfo *new_info;
struct xfs_da_blkinfo *tmp_info;
struct xfs_da_args *args;
struct xfs_buf *bp;
int before = 0;
int error;
struct xfs_inode *dp = state->args->dp;
/*
* Set up environment.
*/
args = state->args;
ASSERT(args != NULL);
old_info = old_blk->bp->b_addr;
new_info = new_blk->bp->b_addr;
ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
old_blk->magic == XFS_ATTR_LEAF_MAGIC);
switch (old_blk->magic) {
case XFS_ATTR_LEAF_MAGIC:
before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
break;
case XFS_DIR2_LEAFN_MAGIC:
before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
break;
case XFS_DA_NODE_MAGIC:
before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
break;
}
/*
* Link blocks in appropriate order.
*/
if (before) {
/*
* Link new block in before existing block.
*/
trace_xfs_da_link_before(args);
new_info->forw = cpu_to_be32(old_blk->blkno);
new_info->back = old_info->back;
if (old_info->back) {
error = xfs_da3_node_read(args->trans, dp,
be32_to_cpu(old_info->back),
-1, &bp, args->whichfork);
if (error)
return(error);
ASSERT(bp != NULL);
tmp_info = bp->b_addr;
ASSERT(tmp_info->magic == old_info->magic);
ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
tmp_info->forw = cpu_to_be32(new_blk->blkno);
xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
}
old_info->back = cpu_to_be32(new_blk->blkno);
} else {
/*
* Link new block in after existing block.
*/
trace_xfs_da_link_after(args);
new_info->forw = old_info->forw;
new_info->back = cpu_to_be32(old_blk->blkno);
if (old_info->forw) {
error = xfs_da3_node_read(args->trans, dp,
be32_to_cpu(old_info->forw),
-1, &bp, args->whichfork);
if (error)
return(error);
ASSERT(bp != NULL);
tmp_info = bp->b_addr;
ASSERT(tmp_info->magic == old_info->magic);
ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
tmp_info->back = cpu_to_be32(new_blk->blkno);
xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
}
old_info->forw = cpu_to_be32(new_blk->blkno);
}
xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
return(0);
}
/*
* Unlink a block from a doubly linked list of blocks.
*/
STATIC int /* error */
xfs_da3_blk_unlink(
struct xfs_da_state *state,
struct xfs_da_state_blk *drop_blk,
struct xfs_da_state_blk *save_blk)
{
struct xfs_da_blkinfo *drop_info;
struct xfs_da_blkinfo *save_info;
struct xfs_da_blkinfo *tmp_info;
struct xfs_da_args *args;
struct xfs_buf *bp;
int error;
/*
* Set up environment.
*/
args = state->args;
ASSERT(args != NULL);
save_info = save_blk->bp->b_addr;
drop_info = drop_blk->bp->b_addr;
ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
save_blk->magic == XFS_ATTR_LEAF_MAGIC);
ASSERT(save_blk->magic == drop_blk->magic);
ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
(be32_to_cpu(save_info->back) == drop_blk->blkno));
ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
(be32_to_cpu(drop_info->back) == save_blk->blkno));
/*
* Unlink the leaf block from the doubly linked chain of leaves.
*/
if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
trace_xfs_da_unlink_back(args);
save_info->back = drop_info->back;
if (drop_info->back) {
error = xfs_da3_node_read(args->trans, args->dp,
be32_to_cpu(drop_info->back),
-1, &bp, args->whichfork);
if (error)
return(error);
ASSERT(bp != NULL);
tmp_info = bp->b_addr;
ASSERT(tmp_info->magic == save_info->magic);
ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
tmp_info->forw = cpu_to_be32(save_blk->blkno);
xfs_trans_log_buf(args->trans, bp, 0,
sizeof(*tmp_info) - 1);
}
} else {
trace_xfs_da_unlink_forward(args);
save_info->forw = drop_info->forw;
if (drop_info->forw) {
error = xfs_da3_node_read(args->trans, args->dp,
be32_to_cpu(drop_info->forw),
-1, &bp, args->whichfork);
if (error)
return(error);
ASSERT(bp != NULL);
tmp_info = bp->b_addr;
ASSERT(tmp_info->magic == save_info->magic);
ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
tmp_info->back = cpu_to_be32(save_blk->blkno);
xfs_trans_log_buf(args->trans, bp, 0,
sizeof(*tmp_info) - 1);
}
}
xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
return(0);
}
/*
* Move a path "forward" or "!forward" one block at the current level.
*
* This routine will adjust a "path" to point to the next block
* "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
* Btree, including updating pointers to the intermediate nodes between
* the new bottom and the root.
*/
int /* error */
xfs_da3_path_shift(
struct xfs_da_state *state,
struct xfs_da_state_path *path,
int forward,
int release,
int *result)
{
struct xfs_da_state_blk *blk;
struct xfs_da_blkinfo *info;
struct xfs_da_intnode *node;
struct xfs_da_args *args;
struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
xfs_dablk_t blkno = 0;
int level;
int error;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_path_shift(state->args);
/*
* Roll up the Btree looking for the first block where our
* current index is not at the edge of the block. Note that
* we skip the bottom layer because we want the sibling block.
*/
args = state->args;
ASSERT(args != NULL);
ASSERT(path != NULL);
ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
level = (path->active-1) - 1; /* skip bottom layer in path */
for (blk = &path->blk[level]; level >= 0; blk--, level--) {
node = blk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
btree = dp->d_ops->node_tree_p(node);
if (forward && (blk->index < nodehdr.count - 1)) {
blk->index++;
blkno = be32_to_cpu(btree[blk->index].before);
break;
} else if (!forward && (blk->index > 0)) {
blk->index--;
blkno = be32_to_cpu(btree[blk->index].before);
break;
}
}
if (level < 0) {
*result = XFS_ERROR(ENOENT); /* we're out of our tree */
ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
return(0);
}
/*
* Roll down the edge of the subtree until we reach the
* same depth we were at originally.
*/
for (blk++, level++; level < path->active; blk++, level++) {
/*
* Release the old block.
* (if it's dirty, trans won't actually let go)
*/
if (release)
xfs_trans_brelse(args->trans, blk->bp);
/*
* Read the next child block.
*/
blk->blkno = blkno;
error = xfs_da3_node_read(args->trans, dp, blkno, -1,
&blk->bp, args->whichfork);
if (error)
return(error);
info = blk->bp->b_addr;
ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
/*
* Note: we flatten the magic number to a single type so we
* don't have to compare against crc/non-crc types elsewhere.
*/
switch (be16_to_cpu(info->magic)) {
case XFS_DA_NODE_MAGIC:
case XFS_DA3_NODE_MAGIC:
blk->magic = XFS_DA_NODE_MAGIC;
node = (xfs_da_intnode_t *)info;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
btree = dp->d_ops->node_tree_p(node);
blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
if (forward)
blk->index = 0;
else
blk->index = nodehdr.count - 1;
blkno = be32_to_cpu(btree[blk->index].before);
break;
case XFS_ATTR_LEAF_MAGIC:
case XFS_ATTR3_LEAF_MAGIC:
blk->magic = XFS_ATTR_LEAF_MAGIC;
ASSERT(level == path->active-1);
blk->index = 0;
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
break;
case XFS_DIR2_LEAFN_MAGIC:
case XFS_DIR3_LEAFN_MAGIC:
blk->magic = XFS_DIR2_LEAFN_MAGIC;
ASSERT(level == path->active-1);
blk->index = 0;
blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
blk->bp, NULL);
break;
default:
ASSERT(0);
break;
}
}
*result = 0;
return 0;
}
/*========================================================================
* Utility routines.
*========================================================================*/
/*
* Implement a simple hash on a character string.
* Rotate the hash value by 7 bits, then XOR each character in.
* This is implemented with some source-level loop unrolling.
*/
xfs_dahash_t
xfs_da_hashname(const __uint8_t *name, int namelen)
{
xfs_dahash_t hash;
/*
* Do four characters at a time as long as we can.
*/
for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
(name[3] << 0) ^ rol32(hash, 7 * 4);
/*
* Now do the rest of the characters.
*/
switch (namelen) {
case 3:
return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
rol32(hash, 7 * 3);
case 2:
return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
case 1:
return (name[0] << 0) ^ rol32(hash, 7 * 1);
default: /* case 0: */
return hash;
}
}
enum xfs_dacmp
xfs_da_compname(
struct xfs_da_args *args,
const unsigned char *name,
int len)
{
return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
}
static xfs_dahash_t
xfs_default_hashname(
struct xfs_name *name)
{
return xfs_da_hashname(name->name, name->len);
}
const struct xfs_nameops xfs_default_nameops = {
.hashname = xfs_default_hashname,
.compname = xfs_da_compname
};
int
xfs_da_grow_inode_int(
struct xfs_da_args *args,
xfs_fileoff_t *bno,
int count)
{
struct xfs_trans *tp = args->trans;
struct xfs_inode *dp = args->dp;
int w = args->whichfork;
xfs_drfsbno_t nblks = dp->i_d.di_nblocks;
struct xfs_bmbt_irec map, *mapp;
int nmap, error, got, i, mapi;
/*
* Find a spot in the file space to put the new block.
*/
error = xfs_bmap_first_unused(tp, dp, count, bno, w);
if (error)
return error;
/*
* Try mapping it in one filesystem block.
*/
nmap = 1;
ASSERT(args->firstblock != NULL);
error = xfs_bmapi_write(tp, dp, *bno, count,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
args->firstblock, args->total, &map, &nmap,
args->flist);
if (error)
return error;
ASSERT(nmap <= 1);
if (nmap == 1) {
mapp = ↦
mapi = 1;
} else if (nmap == 0 && count > 1) {
xfs_fileoff_t b;
int c;
/*
* If we didn't get it and the block might work if fragmented,
* try without the CONTIG flag. Loop until we get it all.
*/
mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
for (b = *bno, mapi = 0; b < *bno + count; ) {
nmap = MIN(XFS_BMAP_MAX_NMAP, count);
c = (int)(*bno + count - b);
error = xfs_bmapi_write(tp, dp, b, c,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
args->firstblock, args->total,
&mapp[mapi], &nmap, args->flist);
if (error)
goto out_free_map;
if (nmap < 1)
break;
mapi += nmap;
b = mapp[mapi - 1].br_startoff +
mapp[mapi - 1].br_blockcount;
}
} else {
mapi = 0;
mapp = NULL;
}
/*
* Count the blocks we got, make sure it matches the total.
*/
for (i = 0, got = 0; i < mapi; i++)
got += mapp[i].br_blockcount;
if (got != count || mapp[0].br_startoff != *bno ||
mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
*bno + count) {
error = XFS_ERROR(ENOSPC);
goto out_free_map;
}
/* account for newly allocated blocks in reserved blocks total */
args->total -= dp->i_d.di_nblocks - nblks;
out_free_map:
if (mapp != &map)
kmem_free(mapp);
return error;
}
/*
* Add a block to the btree ahead of the file.
* Return the new block number to the caller.
*/
int
xfs_da_grow_inode(
struct xfs_da_args *args,
xfs_dablk_t *new_blkno)
{
xfs_fileoff_t bno;
int count;
int error;
trace_xfs_da_grow_inode(args);
if (args->whichfork == XFS_DATA_FORK) {
bno = args->dp->i_mount->m_dirleafblk;
count = args->dp->i_mount->m_dirblkfsbs;
} else {
bno = 0;
count = 1;
}
error = xfs_da_grow_inode_int(args, &bno, count);
if (!error)
*new_blkno = (xfs_dablk_t)bno;
return error;
}
/*
* Ick. We need to always be able to remove a btree block, even
* if there's no space reservation because the filesystem is full.
* This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
* It swaps the target block with the last block in the file. The
* last block in the file can always be removed since it can't cause
* a bmap btree split to do that.
*/
STATIC int
xfs_da3_swap_lastblock(
struct xfs_da_args *args,
xfs_dablk_t *dead_blknop,
struct xfs_buf **dead_bufp)
{
struct xfs_da_blkinfo *dead_info;
struct xfs_da_blkinfo *sib_info;
struct xfs_da_intnode *par_node;
struct xfs_da_intnode *dead_node;
struct xfs_dir2_leaf *dead_leaf2;
struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr par_hdr;
struct xfs_inode *dp;
struct xfs_trans *tp;
struct xfs_mount *mp;
struct xfs_buf *dead_buf;
struct xfs_buf *last_buf;
struct xfs_buf *sib_buf;
struct xfs_buf *par_buf;
xfs_dahash_t dead_hash;
xfs_fileoff_t lastoff;
xfs_dablk_t dead_blkno;
xfs_dablk_t last_blkno;
xfs_dablk_t sib_blkno;
xfs_dablk_t par_blkno;
int error;
int w;
int entno;
int level;
int dead_level;
trace_xfs_da_swap_lastblock(args);
dead_buf = *dead_bufp;
dead_blkno = *dead_blknop;
tp = args->trans;
dp = args->dp;
w = args->whichfork;
ASSERT(w == XFS_DATA_FORK);
mp = dp->i_mount;
lastoff = mp->m_dirfreeblk;
error = xfs_bmap_last_before(tp, dp, &lastoff, w);
if (error)
return error;
if (unlikely(lastoff == 0)) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
mp);
return XFS_ERROR(EFSCORRUPTED);
}
/*
* Read the last block in the btree space.
*/
last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
if (error)
return error;
/*
* Copy the last block into the dead buffer and log it.
*/
memcpy(dead_buf->b_addr, last_buf->b_addr, mp->m_dirblksize);
xfs_trans_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
dead_info = dead_buf->b_addr;
/*
* Get values from the moved block.
*/
if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
struct xfs_dir3_icleaf_hdr leafhdr;
struct xfs_dir2_leaf_entry *ents;
dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2);
ents = dp->d_ops->leaf_ents_p(dead_leaf2);
dead_level = 0;
dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
} else {
struct xfs_da3_icnode_hdr deadhdr;
dead_node = (xfs_da_intnode_t *)dead_info;
dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node);
btree = dp->d_ops->node_tree_p(dead_node);
dead_level = deadhdr.level;
dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
}
sib_buf = par_buf = NULL;
/*
* If the moved block has a left sibling, fix up the pointers.
*/
if ((sib_blkno = be32_to_cpu(dead_info->back))) {
error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
if (error)
goto done;
sib_info = sib_buf->b_addr;
if (unlikely(
be32_to_cpu(sib_info->forw) != last_blkno ||
sib_info->magic != dead_info->magic)) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
sib_info->forw = cpu_to_be32(dead_blkno);
xfs_trans_log_buf(tp, sib_buf,
XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
sizeof(sib_info->forw)));
sib_buf = NULL;
}
/*
* If the moved block has a right sibling, fix up the pointers.
*/
if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
if (error)
goto done;
sib_info = sib_buf->b_addr;
if (unlikely(
be32_to_cpu(sib_info->back) != last_blkno ||
sib_info->magic != dead_info->magic)) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
sib_info->back = cpu_to_be32(dead_blkno);
xfs_trans_log_buf(tp, sib_buf,
XFS_DA_LOGRANGE(sib_info, &sib_info->back,
sizeof(sib_info->back)));
sib_buf = NULL;
}
par_blkno = mp->m_dirleafblk;
level = -1;
/*
* Walk down the tree looking for the parent of the moved block.
*/
for (;;) {
error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
if (error)
goto done;
par_node = par_buf->b_addr;
dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
if (level >= 0 && level != par_hdr.level + 1) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
level = par_hdr.level;
btree = dp->d_ops->node_tree_p(par_node);
for (entno = 0;
entno < par_hdr.count &&
be32_to_cpu(btree[entno].hashval) < dead_hash;
entno++)
continue;
if (entno == par_hdr.count) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
par_blkno = be32_to_cpu(btree[entno].before);
if (level == dead_level + 1)
break;
xfs_trans_brelse(tp, par_buf);
par_buf = NULL;
}
/*
* We're in the right parent block.
* Look for the right entry.
*/
for (;;) {
for (;
entno < par_hdr.count &&
be32_to_cpu(btree[entno].before) != last_blkno;
entno++)
continue;
if (entno < par_hdr.count)
break;
par_blkno = par_hdr.forw;
xfs_trans_brelse(tp, par_buf);
par_buf = NULL;
if (unlikely(par_blkno == 0)) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
if (error)
goto done;
par_node = par_buf->b_addr;
dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
if (par_hdr.level != level) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
btree = dp->d_ops->node_tree_p(par_node);
entno = 0;
}
/*
* Update the parent entry pointing to the moved block.
*/
btree[entno].before = cpu_to_be32(dead_blkno);
xfs_trans_log_buf(tp, par_buf,
XFS_DA_LOGRANGE(par_node, &btree[entno].before,
sizeof(btree[entno].before)));
*dead_blknop = last_blkno;
*dead_bufp = last_buf;
return 0;
done:
if (par_buf)
xfs_trans_brelse(tp, par_buf);
if (sib_buf)
xfs_trans_brelse(tp, sib_buf);
xfs_trans_brelse(tp, last_buf);
return error;
}
/*
* Remove a btree block from a directory or attribute.
*/
int
xfs_da_shrink_inode(
xfs_da_args_t *args,
xfs_dablk_t dead_blkno,
struct xfs_buf *dead_buf)
{
xfs_inode_t *dp;
int done, error, w, count;
xfs_trans_t *tp;
xfs_mount_t *mp;
trace_xfs_da_shrink_inode(args);
dp = args->dp;
w = args->whichfork;
tp = args->trans;
mp = dp->i_mount;
if (w == XFS_DATA_FORK)
count = mp->m_dirblkfsbs;
else
count = 1;
for (;;) {
/*
* Remove extents. If we get ENOSPC for a dir we have to move
* the last block to the place we want to kill.
*/
error = xfs_bunmapi(tp, dp, dead_blkno, count,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
0, args->firstblock, args->flist, &done);
if (error == ENOSPC) {
if (w != XFS_DATA_FORK)
break;
error = xfs_da3_swap_lastblock(args, &dead_blkno,
&dead_buf);
if (error)
break;
} else {
break;
}
}
xfs_trans_binval(tp, dead_buf);
return error;
}
/*
* See if the mapping(s) for this btree block are valid, i.e.
* don't contain holes, are logically contiguous, and cover the whole range.
*/
STATIC int
xfs_da_map_covers_blocks(
int nmap,
xfs_bmbt_irec_t *mapp,
xfs_dablk_t bno,
int count)
{
int i;
xfs_fileoff_t off;
for (i = 0, off = bno; i < nmap; i++) {
if (mapp[i].br_startblock == HOLESTARTBLOCK ||
mapp[i].br_startblock == DELAYSTARTBLOCK) {
return 0;
}
if (off != mapp[i].br_startoff) {
return 0;
}
off += mapp[i].br_blockcount;
}
return off == bno + count;
}
/*
* Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
*
* For the single map case, it is assumed that the caller has provided a pointer
* to a valid xfs_buf_map. For the multiple map case, this function will
* allocate the xfs_buf_map to hold all the maps and replace the caller's single
* map pointer with the allocated map.
*/
static int
xfs_buf_map_from_irec(
struct xfs_mount *mp,
struct xfs_buf_map **mapp,
int *nmaps,
struct xfs_bmbt_irec *irecs,
int nirecs)
{
struct xfs_buf_map *map;
int i;
ASSERT(*nmaps == 1);
ASSERT(nirecs >= 1);
if (nirecs > 1) {
map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
KM_SLEEP | KM_NOFS);
if (!map)
return ENOMEM;
*mapp = map;
}
*nmaps = nirecs;
map = *mapp;
for (i = 0; i < *nmaps; i++) {
ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
irecs[i].br_startblock != HOLESTARTBLOCK);
map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
}
return 0;
}
/*
* Map the block we are given ready for reading. There are three possible return
* values:
* -1 - will be returned if we land in a hole and mappedbno == -2 so the
* caller knows not to execute a subsequent read.
* 0 - if we mapped the block successfully
* >0 - positive error number if there was an error.
*/
static int
xfs_dabuf_map(
struct xfs_trans *trans,
struct xfs_inode *dp,
xfs_dablk_t bno,
xfs_daddr_t mappedbno,
int whichfork,
struct xfs_buf_map **map,
int *nmaps)
{
struct xfs_mount *mp = dp->i_mount;
int nfsb;
int error = 0;
struct xfs_bmbt_irec irec;
struct xfs_bmbt_irec *irecs = &irec;
int nirecs;
ASSERT(map && *map);
ASSERT(*nmaps == 1);
nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
/*
* Caller doesn't have a mapping. -2 means don't complain
* if we land in a hole.
*/
if (mappedbno == -1 || mappedbno == -2) {
/*
* Optimize the one-block case.
*/
if (nfsb != 1)
irecs = kmem_zalloc(sizeof(irec) * nfsb,
KM_SLEEP | KM_NOFS);
nirecs = nfsb;
error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
&nirecs, xfs_bmapi_aflag(whichfork));
if (error)
goto out;
} else {
irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
irecs->br_startoff = (xfs_fileoff_t)bno;
irecs->br_blockcount = nfsb;
irecs->br_state = 0;
nirecs = 1;
}
if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
error = mappedbno == -2 ? -1 : XFS_ERROR(EFSCORRUPTED);
if (unlikely(error == EFSCORRUPTED)) {
if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
int i;
xfs_alert(mp, "%s: bno %lld dir: inode %lld",
__func__, (long long)bno,
(long long)dp->i_ino);
for (i = 0; i < *nmaps; i++) {
xfs_alert(mp,
"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
i,
(long long)irecs[i].br_startoff,
(long long)irecs[i].br_startblock,
(long long)irecs[i].br_blockcount,
irecs[i].br_state);
}
}
XFS_ERROR_REPORT("xfs_da_do_buf(1)",
XFS_ERRLEVEL_LOW, mp);
}
goto out;
}
error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
out:
if (irecs != &irec)
kmem_free(irecs);
return error;
}
/*
* Get a buffer for the dir/attr block.
*/
int
xfs_da_get_buf(
struct xfs_trans *trans,
struct xfs_inode *dp,
xfs_dablk_t bno,
xfs_daddr_t mappedbno,
struct xfs_buf **bpp,
int whichfork)
{
struct xfs_buf *bp;
struct xfs_buf_map map;
struct xfs_buf_map *mapp;
int nmap;
int error;
*bpp = NULL;
mapp = ↦
nmap = 1;
error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
&mapp, &nmap);
if (error) {
/* mapping a hole is not an error, but we don't continue */
if (error == -1)
error = 0;
goto out_free;
}
bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
mapp, nmap, 0);
error = bp ? bp->b_error : XFS_ERROR(EIO);
if (error) {
xfs_trans_brelse(trans, bp);
goto out_free;
}
*bpp = bp;
out_free:
if (mapp != &map)
kmem_free(mapp);
return error;
}
/*
* Get a buffer for the dir/attr block, fill in the contents.
*/
int
xfs_da_read_buf(
struct xfs_trans *trans,
struct xfs_inode *dp,
xfs_dablk_t bno,
xfs_daddr_t mappedbno,
struct xfs_buf **bpp,
int whichfork,
const struct xfs_buf_ops *ops)
{
struct xfs_buf *bp;
struct xfs_buf_map map;
struct xfs_buf_map *mapp;
int nmap;
int error;
*bpp = NULL;
mapp = ↦
nmap = 1;
error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
&mapp, &nmap);
if (error) {
/* mapping a hole is not an error, but we don't continue */
if (error == -1)
error = 0;
goto out_free;
}
error = xfs_trans_read_buf_map(dp->i_mount, trans,
dp->i_mount->m_ddev_targp,
mapp, nmap, 0, &bp, ops);
if (error)
goto out_free;
if (whichfork == XFS_ATTR_FORK)
xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
else
xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
/*
* This verification code will be moved to a CRC verification callback
* function so just leave it here unchanged until then.
*/
{
xfs_dir2_data_hdr_t *hdr = bp->b_addr;
xfs_dir2_free_t *free = bp->b_addr;
xfs_da_blkinfo_t *info = bp->b_addr;
uint magic, magic1;
struct xfs_mount *mp = dp->i_mount;
magic = be16_to_cpu(info->magic);
magic1 = be32_to_cpu(hdr->magic);
if (unlikely(
XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
(magic != XFS_DA3_NODE_MAGIC) &&
(magic != XFS_ATTR_LEAF_MAGIC) &&
(magic != XFS_ATTR3_LEAF_MAGIC) &&
(magic != XFS_DIR2_LEAF1_MAGIC) &&
(magic != XFS_DIR3_LEAF1_MAGIC) &&
(magic != XFS_DIR2_LEAFN_MAGIC) &&
(magic != XFS_DIR3_LEAFN_MAGIC) &&
(magic1 != XFS_DIR2_BLOCK_MAGIC) &&
(magic1 != XFS_DIR3_BLOCK_MAGIC) &&
(magic1 != XFS_DIR2_DATA_MAGIC) &&
(magic1 != XFS_DIR3_DATA_MAGIC) &&
(free->hdr.magic !=
cpu_to_be32(XFS_DIR2_FREE_MAGIC)) &&
(free->hdr.magic !=
cpu_to_be32(XFS_DIR3_FREE_MAGIC)),
mp, XFS_ERRTAG_DA_READ_BUF,
XFS_RANDOM_DA_READ_BUF))) {
trace_xfs_da_btree_corrupt(bp, _RET_IP_);
XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
XFS_ERRLEVEL_LOW, mp, info);
error = XFS_ERROR(EFSCORRUPTED);
xfs_trans_brelse(trans, bp);
goto out_free;
}
}
*bpp = bp;
out_free:
if (mapp != &map)
kmem_free(mapp);
return error;
}
/*
* Readahead the dir/attr block.
*/
xfs_daddr_t
xfs_da_reada_buf(
struct xfs_trans *trans,
struct xfs_inode *dp,
xfs_dablk_t bno,
xfs_daddr_t mappedbno,
int whichfork,
const struct xfs_buf_ops *ops)
{
struct xfs_buf_map map;
struct xfs_buf_map *mapp;
int nmap;
int error;
mapp = ↦
nmap = 1;
error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
&mapp, &nmap);
if (error) {
/* mapping a hole is not an error, but we don't continue */
if (error == -1)
error = 0;
goto out_free;
}
mappedbno = mapp[0].bm_bn;
xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);
out_free:
if (mapp != &map)
kmem_free(mapp);
if (error)
return -1;
return mappedbno;
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_2285_0 |
crossvul-cpp_data_bad_5545_0 | /*
* Back-end of the driver for virtual network devices. This portion of the
* driver exports a 'unified' network-device interface that can be accessed
* by any operating system that implements a compatible front end. A
* reference front-end implementation can be found in:
* drivers/net/xen-netfront.c
*
* Copyright (c) 2002-2005, K A Fraser
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "common.h"
#include <linux/kthread.h>
#include <linux/if_vlan.h>
#include <linux/udp.h>
#include <net/tcp.h>
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/interface/memory.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
struct pending_tx_info {
struct xen_netif_tx_request req;
struct xenvif *vif;
};
typedef unsigned int pending_ring_idx_t;
struct netbk_rx_meta {
int id;
int size;
int gso_size;
};
#define MAX_PENDING_REQS 256
/* Discriminate from any valid pending_idx value. */
#define INVALID_PENDING_IDX 0xFFFF
#define MAX_BUFFER_OFFSET PAGE_SIZE
/* extra field used in struct page */
union page_ext {
struct {
#if BITS_PER_LONG < 64
#define IDX_WIDTH 8
#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
unsigned int group:GROUP_WIDTH;
unsigned int idx:IDX_WIDTH;
#else
unsigned int group, idx;
#endif
} e;
void *mapping;
};
struct xen_netbk {
wait_queue_head_t wq;
struct task_struct *task;
struct sk_buff_head rx_queue;
struct sk_buff_head tx_queue;
struct timer_list net_timer;
struct page *mmap_pages[MAX_PENDING_REQS];
pending_ring_idx_t pending_prod;
pending_ring_idx_t pending_cons;
struct list_head net_schedule_list;
/* Protect the net_schedule_list in netif. */
spinlock_t net_schedule_list_lock;
atomic_t netfront_count;
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
u16 pending_ring[MAX_PENDING_REQS];
/*
* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
* head/fragment page uses 2 copy operations because it
* straddles two buffers in the frontend.
*/
struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
};
static struct xen_netbk *xen_netbk;
static int xen_netbk_group_nr;
void xen_netbk_add_xenvif(struct xenvif *vif)
{
int i;
int min_netfront_count;
int min_group = 0;
struct xen_netbk *netbk;
min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
for (i = 0; i < xen_netbk_group_nr; i++) {
int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
if (netfront_count < min_netfront_count) {
min_group = i;
min_netfront_count = netfront_count;
}
}
netbk = &xen_netbk[min_group];
vif->netbk = netbk;
atomic_inc(&netbk->netfront_count);
}
void xen_netbk_remove_xenvif(struct xenvif *vif)
{
struct xen_netbk *netbk = vif->netbk;
vif->netbk = NULL;
atomic_dec(&netbk->netfront_count);
}
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx);
static void make_tx_response(struct xenvif *vif,
struct xen_netif_tx_request *txp,
s8 st);
static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
u16 id,
s8 st,
u16 offset,
u16 size,
u16 flags);
static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
u16 idx)
{
return page_to_pfn(netbk->mmap_pages[idx]);
}
static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
u16 idx)
{
return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
}
/* extra field used in struct page */
static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
unsigned int idx)
{
unsigned int group = netbk - xen_netbk;
union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
pg->mapping = ext.mapping;
}
static int get_page_ext(struct page *pg,
unsigned int *pgroup, unsigned int *pidx)
{
union page_ext ext = { .mapping = pg->mapping };
struct xen_netbk *netbk;
unsigned int group, idx;
group = ext.e.group - 1;
if (group < 0 || group >= xen_netbk_group_nr)
return 0;
netbk = &xen_netbk[group];
idx = ext.e.idx;
if ((idx < 0) || (idx >= MAX_PENDING_REQS))
return 0;
if (netbk->mmap_pages[idx] != pg)
return 0;
*pgroup = group;
*pidx = idx;
return 1;
}
/*
* This is the amount of packet we copy rather than map, so that the
* guest can't fiddle with the contents of the headers while we do
* packet processing on them (netfilter, routing, etc).
*/
#define PKT_PROT_LEN (ETH_HLEN + \
VLAN_HLEN + \
sizeof(struct iphdr) + MAX_IPOPTLEN + \
sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
static u16 frag_get_pending_idx(skb_frag_t *frag)
{
return (u16)frag->page_offset;
}
static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
{
frag->page_offset = pending_idx;
}
static inline pending_ring_idx_t pending_index(unsigned i)
{
return i & (MAX_PENDING_REQS-1);
}
static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
{
return MAX_PENDING_REQS -
netbk->pending_prod + netbk->pending_cons;
}
static void xen_netbk_kick_thread(struct xen_netbk *netbk)
{
wake_up(&netbk->wq);
}
static int max_required_rx_slots(struct xenvif *vif)
{
int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
if (vif->can_sg || vif->gso || vif->gso_prefix)
max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
return max;
}
int xen_netbk_rx_ring_full(struct xenvif *vif)
{
RING_IDX peek = vif->rx_req_cons_peek;
RING_IDX needed = max_required_rx_slots(vif);
return ((vif->rx.sring->req_prod - peek) < needed) ||
((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
}
int xen_netbk_must_stop_queue(struct xenvif *vif)
{
if (!xen_netbk_rx_ring_full(vif))
return 0;
vif->rx.sring->req_event = vif->rx_req_cons_peek +
max_required_rx_slots(vif);
mb(); /* request notification /then/ check the queue */
return xen_netbk_rx_ring_full(vif);
}
/*
* Returns true if we should start a new receive buffer instead of
* adding 'size' bytes to a buffer which currently contains 'offset'
* bytes.
*/
static bool start_new_rx_buffer(int offset, unsigned long size, int head)
{
/* simple case: we have completely filled the current buffer. */
if (offset == MAX_BUFFER_OFFSET)
return true;
/*
* complex case: start a fresh buffer if the current frag
* would overflow the current buffer but only if:
* (i) this frag would fit completely in the next buffer
* and (ii) there is already some data in the current buffer
* and (iii) this is not the head buffer.
*
* Where:
* - (i) stops us splitting a frag into two copies
* unless the frag is too large for a single buffer.
* - (ii) stops us from leaving a buffer pointlessly empty.
* - (iii) stops us leaving the first buffer
* empty. Strictly speaking this is already covered
* by (ii) but is explicitly checked because
* netfront relies on the first buffer being
* non-empty and can crash otherwise.
*
* This means we will effectively linearise small
* frags but do not needlessly split large buffers
* into multiple copies tend to give large frags their
* own buffers as before.
*/
if ((offset + size > MAX_BUFFER_OFFSET) &&
(size <= MAX_BUFFER_OFFSET) && offset && !head)
return true;
return false;
}
/*
* Figure out how many ring slots we're going to need to send @skb to
* the guest. This function is essentially a dry run of
* netbk_gop_frag_copy.
*/
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
{
unsigned int count;
int i, copy_off;
count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
copy_off = skb_headlen(skb) % PAGE_SIZE;
if (skb_shinfo(skb)->gso_size)
count++;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
unsigned long bytes;
offset &= ~PAGE_MASK;
while (size > 0) {
BUG_ON(offset >= PAGE_SIZE);
BUG_ON(copy_off > MAX_BUFFER_OFFSET);
bytes = PAGE_SIZE - offset;
if (bytes > size)
bytes = size;
if (start_new_rx_buffer(copy_off, bytes, 0)) {
count++;
copy_off = 0;
}
if (copy_off + bytes > MAX_BUFFER_OFFSET)
bytes = MAX_BUFFER_OFFSET - copy_off;
copy_off += bytes;
offset += bytes;
size -= bytes;
if (offset == PAGE_SIZE)
offset = 0;
}
}
return count;
}
struct netrx_pending_operations {
unsigned copy_prod, copy_cons;
unsigned meta_prod, meta_cons;
struct gnttab_copy *copy;
struct netbk_rx_meta *meta;
int copy_off;
grant_ref_t copy_gref;
};
static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
struct netrx_pending_operations *npo)
{
struct netbk_rx_meta *meta;
struct xen_netif_rx_request *req;
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_size = 0;
meta->size = 0;
meta->id = req->id;
npo->copy_off = 0;
npo->copy_gref = req->gref;
return meta;
}
/*
* Set up the grant operations for this fragment. If it's a flipping
* interface, we also set up the unmap request from here.
*/
static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct netrx_pending_operations *npo,
struct page *page, unsigned long size,
unsigned long offset, int *head)
{
struct gnttab_copy *copy_gop;
struct netbk_rx_meta *meta;
/*
* These variables are used iff get_page_ext returns true,
* in which case they are guaranteed to be initialized.
*/
unsigned int uninitialized_var(group), uninitialized_var(idx);
int foreign = get_page_ext(page, &group, &idx);
unsigned long bytes;
/* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
meta = npo->meta + npo->meta_prod - 1;
/* Skip unused frames from start of page */
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
while (size > 0) {
BUG_ON(offset >= PAGE_SIZE);
BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
bytes = PAGE_SIZE - offset;
if (bytes > size)
bytes = size;
if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
/*
* Netfront requires there to be some data in the head
* buffer.
*/
BUG_ON(*head);
meta = get_next_rx_buffer(vif, npo);
}
if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
bytes = MAX_BUFFER_OFFSET - npo->copy_off;
copy_gop = npo->copy + npo->copy_prod++;
copy_gop->flags = GNTCOPY_dest_gref;
if (foreign) {
struct xen_netbk *netbk = &xen_netbk[group];
struct pending_tx_info *src_pend;
src_pend = &netbk->pending_tx_info[idx];
copy_gop->source.domid = src_pend->vif->domid;
copy_gop->source.u.ref = src_pend->req.gref;
copy_gop->flags |= GNTCOPY_source_gref;
} else {
void *vaddr = page_address(page);
copy_gop->source.domid = DOMID_SELF;
copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
}
copy_gop->source.offset = offset;
copy_gop->dest.domid = vif->domid;
copy_gop->dest.offset = npo->copy_off;
copy_gop->dest.u.ref = npo->copy_gref;
copy_gop->len = bytes;
npo->copy_off += bytes;
meta->size += bytes;
offset += bytes;
size -= bytes;
/* Next frame */
if (offset == PAGE_SIZE && size) {
BUG_ON(!PageCompound(page));
page++;
offset = 0;
}
/* Leave a gap for the GSO descriptor. */
if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
vif->rx.req_cons++;
*head = 0; /* There must be something in this buffer now. */
}
}
/*
* Prepare an SKB to be transmitted to the frontend.
*
* This function is responsible for allocating grant operations, meta
* structures, etc.
*
* It returns the number of meta structures consumed. The number of
* ring slots used is always equal to the number of meta slots used
* plus the number of GSO descriptors used. Currently, we use either
* zero GSO descriptors (for non-GSO packets) or one descriptor (for
* frontend-side LRO).
*/
static int netbk_gop_skb(struct sk_buff *skb,
struct netrx_pending_operations *npo)
{
struct xenvif *vif = netdev_priv(skb->dev);
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
struct xen_netif_rx_request *req;
struct netbk_rx_meta *meta;
unsigned char *data;
int head = 1;
int old_meta_prod;
old_meta_prod = npo->meta_prod;
/* Set up a GSO prefix descriptor, if necessary */
if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0;
meta->id = req->id;
}
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
if (!vif->gso_prefix)
meta->gso_size = skb_shinfo(skb)->gso_size;
else
meta->gso_size = 0;
meta->size = 0;
meta->id = req->id;
npo->copy_off = 0;
npo->copy_gref = req->gref;
data = skb->data;
while (data < skb_tail_pointer(skb)) {
unsigned int offset = offset_in_page(data);
unsigned int len = PAGE_SIZE - offset;
if (data + len > skb_tail_pointer(skb))
len = skb_tail_pointer(skb) - data;
netbk_gop_frag_copy(vif, skb, npo,
virt_to_page(data), len, offset, &head);
data += len;
}
for (i = 0; i < nr_frags; i++) {
netbk_gop_frag_copy(vif, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
&head);
}
return npo->meta_prod - old_meta_prod;
}
/*
* This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
* used to set up the operations on the top of
* netrx_pending_operations, which have since been done. Check that
* they didn't give any errors and advance over them.
*/
static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
struct netrx_pending_operations *npo)
{
struct gnttab_copy *copy_op;
int status = XEN_NETIF_RSP_OKAY;
int i;
for (i = 0; i < nr_meta_slots; i++) {
copy_op = npo->copy + npo->copy_cons++;
if (copy_op->status != GNTST_okay) {
netdev_dbg(vif->dev,
"Bad status %d from copy to DOM%d.\n",
copy_op->status, vif->domid);
status = XEN_NETIF_RSP_ERROR;
}
}
return status;
}
static void netbk_add_frag_responses(struct xenvif *vif, int status,
struct netbk_rx_meta *meta,
int nr_meta_slots)
{
int i;
unsigned long offset;
/* No fragments used */
if (nr_meta_slots <= 1)
return;
nr_meta_slots--;
for (i = 0; i < nr_meta_slots; i++) {
int flags;
if (i == nr_meta_slots - 1)
flags = 0;
else
flags = XEN_NETRXF_more_data;
offset = 0;
make_rx_response(vif, meta[i].id, status, offset,
meta[i].size, flags);
}
}
struct skb_cb_overlay {
int meta_slots_used;
};
static void xen_netbk_rx_action(struct xen_netbk *netbk)
{
struct xenvif *vif = NULL, *tmp;
s8 status;
u16 irq, flags;
struct xen_netif_rx_response *resp;
struct sk_buff_head rxq;
struct sk_buff *skb;
LIST_HEAD(notify);
int ret;
int nr_frags;
int count;
unsigned long offset;
struct skb_cb_overlay *sco;
struct netrx_pending_operations npo = {
.copy = netbk->grant_copy_op,
.meta = netbk->meta,
};
skb_queue_head_init(&rxq);
count = 0;
while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
vif = netdev_priv(skb->dev);
nr_frags = skb_shinfo(skb)->nr_frags;
sco = (struct skb_cb_overlay *)skb->cb;
sco->meta_slots_used = netbk_gop_skb(skb, &npo);
count += nr_frags + 1;
__skb_queue_tail(&rxq, skb);
/* Filled the batch queue? */
if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
break;
}
BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
if (!npo.copy_prod)
return;
BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
sco = (struct skb_cb_overlay *)skb->cb;
vif = netdev_priv(skb->dev);
if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
resp = RING_GET_RESPONSE(&vif->rx,
vif->rx.rsp_prod_pvt++);
resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
resp->offset = netbk->meta[npo.meta_cons].gso_size;
resp->id = netbk->meta[npo.meta_cons].id;
resp->status = sco->meta_slots_used;
npo.meta_cons++;
sco->meta_slots_used--;
}
vif->dev->stats.tx_bytes += skb->len;
vif->dev->stats.tx_packets++;
status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
if (sco->meta_slots_used == 1)
flags = 0;
else
flags = XEN_NETRXF_more_data;
if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
/* remote but checksummed. */
flags |= XEN_NETRXF_data_validated;
offset = 0;
resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
status, offset,
netbk->meta[npo.meta_cons].size,
flags);
if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
struct xen_netif_extra_info *gso =
(struct xen_netif_extra_info *)
RING_GET_RESPONSE(&vif->rx,
vif->rx.rsp_prod_pvt++);
resp->flags |= XEN_NETRXF_extra_info;
gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
gso->flags = 0;
}
netbk_add_frag_responses(vif, status,
netbk->meta + npo.meta_cons + 1,
sco->meta_slots_used);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
irq = vif->irq;
if (ret && list_empty(&vif->notify_list))
list_add_tail(&vif->notify_list, ¬ify);
xenvif_notify_tx_completion(vif);
xenvif_put(vif);
npo.meta_cons += sco->meta_slots_used;
dev_kfree_skb(skb);
}
list_for_each_entry_safe(vif, tmp, ¬ify, notify_list) {
notify_remote_via_irq(vif->irq);
list_del_init(&vif->notify_list);
}
/* More work to do? */
if (!skb_queue_empty(&netbk->rx_queue) &&
!timer_pending(&netbk->net_timer))
xen_netbk_kick_thread(netbk);
}
void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
{
struct xen_netbk *netbk = vif->netbk;
skb_queue_tail(&netbk->rx_queue, skb);
xen_netbk_kick_thread(netbk);
}
static void xen_netbk_alarm(unsigned long data)
{
struct xen_netbk *netbk = (struct xen_netbk *)data;
xen_netbk_kick_thread(netbk);
}
static int __on_net_schedule_list(struct xenvif *vif)
{
return !list_empty(&vif->schedule_list);
}
/* Must be called with net_schedule_list_lock held */
static void remove_from_net_schedule_list(struct xenvif *vif)
{
if (likely(__on_net_schedule_list(vif))) {
list_del_init(&vif->schedule_list);
xenvif_put(vif);
}
}
static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
{
struct xenvif *vif = NULL;
spin_lock_irq(&netbk->net_schedule_list_lock);
if (list_empty(&netbk->net_schedule_list))
goto out;
vif = list_first_entry(&netbk->net_schedule_list,
struct xenvif, schedule_list);
if (!vif)
goto out;
xenvif_get(vif);
remove_from_net_schedule_list(vif);
out:
spin_unlock_irq(&netbk->net_schedule_list_lock);
return vif;
}
void xen_netbk_schedule_xenvif(struct xenvif *vif)
{
unsigned long flags;
struct xen_netbk *netbk = vif->netbk;
if (__on_net_schedule_list(vif))
goto kick;
spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
if (!__on_net_schedule_list(vif) &&
likely(xenvif_schedulable(vif))) {
list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
xenvif_get(vif);
}
spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
kick:
smp_mb();
if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
!list_empty(&netbk->net_schedule_list))
xen_netbk_kick_thread(netbk);
}
void xen_netbk_deschedule_xenvif(struct xenvif *vif)
{
struct xen_netbk *netbk = vif->netbk;
spin_lock_irq(&netbk->net_schedule_list_lock);
remove_from_net_schedule_list(vif);
spin_unlock_irq(&netbk->net_schedule_list_lock);
}
void xen_netbk_check_rx_xenvif(struct xenvif *vif)
{
int more_to_do;
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
if (more_to_do)
xen_netbk_schedule_xenvif(vif);
}
static void tx_add_credit(struct xenvif *vif)
{
unsigned long max_burst, max_credit;
/*
* Allow a burst big enough to transmit a jumbo packet of up to 128kB.
* Otherwise the interface can seize up due to insufficient credit.
*/
max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
max_burst = min(max_burst, 131072UL);
max_burst = max(max_burst, vif->credit_bytes);
/* Take care that adding a new chunk of credit doesn't wrap to zero. */
max_credit = vif->remaining_credit + vif->credit_bytes;
if (max_credit < vif->remaining_credit)
max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
vif->remaining_credit = min(max_credit, max_burst);
}
static void tx_credit_callback(unsigned long data)
{
struct xenvif *vif = (struct xenvif *)data;
tx_add_credit(vif);
xen_netbk_check_rx_xenvif(vif);
}
static void netbk_tx_err(struct xenvif *vif,
struct xen_netif_tx_request *txp, RING_IDX end)
{
RING_IDX cons = vif->tx.req_cons;
do {
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
if (cons >= end)
break;
txp = RING_GET_REQUEST(&vif->tx, cons++);
} while (1);
vif->tx.req_cons = cons;
xen_netbk_check_rx_xenvif(vif);
xenvif_put(vif);
}
static void netbk_fatal_tx_err(struct xenvif *vif)
{
netdev_err(vif->dev, "fatal error; disabling device\n");
xenvif_carrier_off(vif);
xenvif_put(vif);
}
static int netbk_count_requests(struct xenvif *vif,
struct xen_netif_tx_request *first,
struct xen_netif_tx_request *txp,
int work_to_do)
{
RING_IDX cons = vif->tx.req_cons;
int frags = 0;
if (!(first->flags & XEN_NETTXF_more_data))
return 0;
do {
if (frags >= work_to_do) {
netdev_err(vif->dev, "Need more frags\n");
netbk_fatal_tx_err(vif);
return -frags;
}
if (unlikely(frags >= MAX_SKB_FRAGS)) {
netdev_err(vif->dev, "Too many frags\n");
netbk_fatal_tx_err(vif);
return -frags;
}
memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
sizeof(*txp));
if (txp->size > first->size) {
netdev_err(vif->dev, "Frag is bigger than frame.\n");
netbk_fatal_tx_err(vif);
return -frags;
}
first->size -= txp->size;
frags++;
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
txp->offset, txp->size);
netbk_fatal_tx_err(vif);
return -frags;
}
} while ((txp++)->flags & XEN_NETTXF_more_data);
return frags;
}
static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
struct sk_buff *skb,
u16 pending_idx)
{
struct page *page;
page = alloc_page(GFP_KERNEL|__GFP_COLD);
if (!page)
return NULL;
set_page_ext(page, netbk, pending_idx);
netbk->mmap_pages[pending_idx] = page;
return page;
}
static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
struct xenvif *vif,
struct sk_buff *skb,
struct xen_netif_tx_request *txp,
struct gnttab_copy *gop)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
u16 pending_idx = *((u16 *)skb->data);
int i, start;
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
for (i = start; i < shinfo->nr_frags; i++, txp++) {
struct page *page;
pending_ring_idx_t index;
struct pending_tx_info *pending_tx_info =
netbk->pending_tx_info;
index = pending_index(netbk->pending_cons++);
pending_idx = netbk->pending_ring[index];
page = xen_netbk_alloc_page(netbk, skb, pending_idx);
if (!page)
return NULL;
gop->source.u.ref = txp->gref;
gop->source.domid = vif->domid;
gop->source.offset = txp->offset;
gop->dest.u.gmfn = virt_to_mfn(page_address(page));
gop->dest.domid = DOMID_SELF;
gop->dest.offset = txp->offset;
gop->len = txp->size;
gop->flags = GNTCOPY_source_gref;
gop++;
memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
xenvif_get(vif);
pending_tx_info[pending_idx].vif = vif;
frag_set_pending_idx(&frags[i], pending_idx);
}
return gop;
}
static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
struct sk_buff *skb,
struct gnttab_copy **gopp)
{
struct gnttab_copy *gop = *gopp;
u16 pending_idx = *((u16 *)skb->data);
struct pending_tx_info *pending_tx_info = netbk->pending_tx_info;
struct xenvif *vif = pending_tx_info[pending_idx].vif;
struct xen_netif_tx_request *txp;
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
int i, err, start;
/* Check status of header. */
err = gop->status;
if (unlikely(err)) {
pending_ring_idx_t index;
index = pending_index(netbk->pending_prod++);
txp = &pending_tx_info[pending_idx].req;
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
netbk->pending_ring[index] = pending_idx;
xenvif_put(vif);
}
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
for (i = start; i < nr_frags; i++) {
int j, newerr;
pending_ring_idx_t index;
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
/* Check error status: if okay then remember grant handle. */
newerr = (++gop)->status;
if (likely(!newerr)) {
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err))
xen_netbk_idx_release(netbk, pending_idx);
continue;
}
/* Error on this fragment: respond to client with an error. */
txp = &netbk->pending_tx_info[pending_idx].req;
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
index = pending_index(netbk->pending_prod++);
netbk->pending_ring[index] = pending_idx;
xenvif_put(vif);
/* Not the first error? Preceding frags already invalidated. */
if (err)
continue;
/* First error: invalidate header and preceding fragments. */
pending_idx = *((u16 *)skb->data);
xen_netbk_idx_release(netbk, pending_idx);
for (j = start; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xen_netbk_idx_release(netbk, pending_idx);
}
/* Remember the error: invalidate all subsequent fragments. */
err = newerr;
}
*gopp = gop + 1;
return err;
}
static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
int i;
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = shinfo->frags + i;
struct xen_netif_tx_request *txp;
struct page *page;
u16 pending_idx;
pending_idx = frag_get_pending_idx(frag);
txp = &netbk->pending_tx_info[pending_idx].req;
page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
skb->len += txp->size;
skb->data_len += txp->size;
skb->truesize += txp->size;
/* Take an extra reference to offset xen_netbk_idx_release */
get_page(netbk->mmap_pages[pending_idx]);
xen_netbk_idx_release(netbk, pending_idx);
}
}
static int xen_netbk_get_extras(struct xenvif *vif,
struct xen_netif_extra_info *extras,
int work_to_do)
{
struct xen_netif_extra_info extra;
RING_IDX cons = vif->tx.req_cons;
do {
if (unlikely(work_to_do-- <= 0)) {
netdev_err(vif->dev, "Missing extra info\n");
netbk_fatal_tx_err(vif);
return -EBADR;
}
memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
sizeof(extra));
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
vif->tx.req_cons = ++cons;
netdev_err(vif->dev,
"Invalid extra type: %d\n", extra.type);
netbk_fatal_tx_err(vif);
return -EINVAL;
}
memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
vif->tx.req_cons = ++cons;
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
return work_to_do;
}
static int netbk_set_skb_gso(struct xenvif *vif,
struct sk_buff *skb,
struct xen_netif_extra_info *gso)
{
if (!gso->u.gso.size) {
netdev_err(vif->dev, "GSO size must not be zero.\n");
netbk_fatal_tx_err(vif);
return -EINVAL;
}
/* Currently only TCPv4 S.O. is supported. */
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
netbk_fatal_tx_err(vif);
return -EINVAL;
}
skb_shinfo(skb)->gso_size = gso->u.gso.size;
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0;
return 0;
}
static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
{
struct iphdr *iph;
unsigned char *th;
int err = -EPROTO;
int recalculate_partial_csum = 0;
/*
* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
* peers can fail to set NETRXF_csum_blank when sending a GSO
* frame. In this case force the SKB to CHECKSUM_PARTIAL and
* recalculate the partial checksum.
*/
if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
vif->rx_gso_checksum_fixup++;
skb->ip_summed = CHECKSUM_PARTIAL;
recalculate_partial_csum = 1;
}
/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
if (skb->protocol != htons(ETH_P_IP))
goto out;
iph = (void *)skb->data;
th = skb->data + 4 * iph->ihl;
if (th >= skb_tail_pointer(skb))
goto out;
skb->csum_start = th - skb->head;
switch (iph->protocol) {
case IPPROTO_TCP:
skb->csum_offset = offsetof(struct tcphdr, check);
if (recalculate_partial_csum) {
struct tcphdr *tcph = (struct tcphdr *)th;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - iph->ihl*4,
IPPROTO_TCP, 0);
}
break;
case IPPROTO_UDP:
skb->csum_offset = offsetof(struct udphdr, check);
if (recalculate_partial_csum) {
struct udphdr *udph = (struct udphdr *)th;
udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - iph->ihl*4,
IPPROTO_UDP, 0);
}
break;
default:
if (net_ratelimit())
netdev_err(vif->dev,
"Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
iph->protocol);
goto out;
}
if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
goto out;
err = 0;
out:
return err;
}
static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
{
unsigned long now = jiffies;
unsigned long next_credit =
vif->credit_timeout.expires +
msecs_to_jiffies(vif->credit_usec / 1000);
/* Timer could already be pending in rare cases. */
if (timer_pending(&vif->credit_timeout))
return true;
/* Passed the point where we can replenish credit? */
if (time_after_eq(now, next_credit)) {
vif->credit_timeout.expires = now;
tx_add_credit(vif);
}
/* Still too big to send right now? Set a callback. */
if (size > vif->remaining_credit) {
vif->credit_timeout.data =
(unsigned long)vif;
vif->credit_timeout.function =
tx_credit_callback;
mod_timer(&vif->credit_timeout,
next_credit);
return true;
}
return false;
}
static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
{
struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
struct sk_buff *skb;
int ret;
while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
!list_empty(&netbk->net_schedule_list)) {
struct xenvif *vif;
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
struct page *page;
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
u16 pending_idx;
RING_IDX idx;
int work_to_do;
unsigned int data_len;
pending_ring_idx_t index;
/* Get a netif from the list with work to do. */
vif = poll_net_schedule_list(netbk);
/* This can sometimes happen because the test of
* list_empty(net_schedule_list) at the top of the
* loop is unlocked. Just go back and have another
* look.
*/
if (!vif)
continue;
if (vif->tx.sring->req_prod - vif->tx.req_cons >
XEN_NETIF_TX_RING_SIZE) {
netdev_err(vif->dev,
"Impossible number of requests. "
"req_prod %d, req_cons %d, size %ld\n",
vif->tx.sring->req_prod, vif->tx.req_cons,
XEN_NETIF_TX_RING_SIZE);
netbk_fatal_tx_err(vif);
continue;
}
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
if (!work_to_do) {
xenvif_put(vif);
continue;
}
idx = vif->tx.req_cons;
rmb(); /* Ensure that we see the request before we copy it. */
memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
/* Credit-based scheduling. */
if (txreq.size > vif->remaining_credit &&
tx_credit_exceeded(vif, txreq.size)) {
xenvif_put(vif);
continue;
}
vif->remaining_credit -= txreq.size;
work_to_do--;
vif->tx.req_cons = ++idx;
memset(extras, 0, sizeof(extras));
if (txreq.flags & XEN_NETTXF_extra_info) {
work_to_do = xen_netbk_get_extras(vif, extras,
work_to_do);
idx = vif->tx.req_cons;
if (unlikely(work_to_do < 0))
continue;
}
ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0))
continue;
idx += ret;
if (unlikely(txreq.size < ETH_HLEN)) {
netdev_dbg(vif->dev,
"Bad packet size: %d\n", txreq.size);
netbk_tx_err(vif, &txreq, idx);
continue;
}
/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
netdev_err(vif->dev,
"txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(txreq.offset&~PAGE_MASK) + txreq.size);
netbk_fatal_tx_err(vif);
continue;
}
index = pending_index(netbk->pending_cons);
pending_idx = netbk->pending_ring[index];
data_len = (txreq.size > PKT_PROT_LEN &&
ret < MAX_SKB_FRAGS) ?
PKT_PROT_LEN : txreq.size;
skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(skb == NULL)) {
netdev_dbg(vif->dev,
"Can't allocate a skb in start_xmit.\n");
netbk_tx_err(vif, &txreq, idx);
break;
}
/* Packets passed to netif_rx() must have some headroom. */
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
if (netbk_set_skb_gso(vif, skb, gso)) {
/* Failure in netbk_set_skb_gso is fatal. */
kfree_skb(skb);
continue;
}
}
/* XXX could copy straight to head */
page = xen_netbk_alloc_page(netbk, skb, pending_idx);
if (!page) {
kfree_skb(skb);
netbk_tx_err(vif, &txreq, idx);
continue;
}
gop->source.u.ref = txreq.gref;
gop->source.domid = vif->domid;
gop->source.offset = txreq.offset;
gop->dest.u.gmfn = virt_to_mfn(page_address(page));
gop->dest.domid = DOMID_SELF;
gop->dest.offset = txreq.offset;
gop->len = txreq.size;
gop->flags = GNTCOPY_source_gref;
gop++;
memcpy(&netbk->pending_tx_info[pending_idx].req,
&txreq, sizeof(txreq));
netbk->pending_tx_info[pending_idx].vif = vif;
*((u16 *)skb->data) = pending_idx;
__skb_put(skb, data_len);
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
} else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX);
}
netbk->pending_cons++;
request_gop = xen_netbk_get_requests(netbk, vif,
skb, txfrags, gop);
if (request_gop == NULL) {
kfree_skb(skb);
netbk_tx_err(vif, &txreq, idx);
continue;
}
gop = request_gop;
__skb_queue_tail(&netbk->tx_queue, skb);
vif->tx.req_cons = idx;
xen_netbk_check_rx_xenvif(vif);
if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
break;
}
return gop - netbk->tx_copy_ops;
}
static void xen_netbk_tx_submit(struct xen_netbk *netbk)
{
struct gnttab_copy *gop = netbk->tx_copy_ops;
struct sk_buff *skb;
while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
struct xen_netif_tx_request *txp;
struct xenvif *vif;
u16 pending_idx;
unsigned data_len;
pending_idx = *((u16 *)skb->data);
vif = netbk->pending_tx_info[pending_idx].vif;
txp = &netbk->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
netdev_dbg(vif->dev, "netback grant failed.\n");
skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
continue;
}
data_len = skb->len;
memcpy(skb->data,
(void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
data_len);
if (data_len < txp->size) {
/* Append the packet payload as a fragment. */
txp->offset += data_len;
txp->size -= data_len;
} else {
/* Schedule a response immediately. */
xen_netbk_idx_release(netbk, pending_idx);
}
if (txp->flags & XEN_NETTXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
else if (txp->flags & XEN_NETTXF_data_validated)
skb->ip_summed = CHECKSUM_UNNECESSARY;
xen_netbk_fill_frags(netbk, skb);
/*
* If the initial fragment was < PKT_PROT_LEN then
* pull through some bytes from the other fragments to
* increase the linear region to PKT_PROT_LEN bytes.
*/
if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
int target = min_t(int, skb->len, PKT_PROT_LEN);
__pskb_pull_tail(skb, target - skb_headlen(skb));
}
skb->dev = vif->dev;
skb->protocol = eth_type_trans(skb, skb->dev);
if (checksum_setup(vif, skb)) {
netdev_dbg(vif->dev,
"Can't setup checksum in net_tx_action\n");
kfree_skb(skb);
continue;
}
vif->dev->stats.rx_bytes += skb->len;
vif->dev->stats.rx_packets++;
xenvif_receive_skb(vif, skb);
}
}
/* Called after netfront has transmitted */
static void xen_netbk_tx_action(struct xen_netbk *netbk)
{
unsigned nr_gops;
nr_gops = xen_netbk_tx_build_gops(netbk);
if (nr_gops == 0)
return;
gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
xen_netbk_tx_submit(netbk);
}
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx)
{
struct xenvif *vif;
struct pending_tx_info *pending_tx_info;
pending_ring_idx_t index;
/* Already complete? */
if (netbk->mmap_pages[pending_idx] == NULL)
return;
pending_tx_info = &netbk->pending_tx_info[pending_idx];
vif = pending_tx_info->vif;
make_tx_response(vif, &pending_tx_info->req, XEN_NETIF_RSP_OKAY);
index = pending_index(netbk->pending_prod++);
netbk->pending_ring[index] = pending_idx;
xenvif_put(vif);
netbk->mmap_pages[pending_idx]->mapping = 0;
put_page(netbk->mmap_pages[pending_idx]);
netbk->mmap_pages[pending_idx] = NULL;
}
static void make_tx_response(struct xenvif *vif,
struct xen_netif_tx_request *txp,
s8 st)
{
RING_IDX i = vif->tx.rsp_prod_pvt;
struct xen_netif_tx_response *resp;
int notify;
resp = RING_GET_RESPONSE(&vif->tx, i);
resp->id = txp->id;
resp->status = st;
if (txp->flags & XEN_NETTXF_extra_info)
RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
vif->tx.rsp_prod_pvt = ++i;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
if (notify)
notify_remote_via_irq(vif->irq);
}
static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
u16 id,
s8 st,
u16 offset,
u16 size,
u16 flags)
{
RING_IDX i = vif->rx.rsp_prod_pvt;
struct xen_netif_rx_response *resp;
resp = RING_GET_RESPONSE(&vif->rx, i);
resp->offset = offset;
resp->flags = flags;
resp->id = id;
resp->status = (s16)size;
if (st < 0)
resp->status = (s16)st;
vif->rx.rsp_prod_pvt = ++i;
return resp;
}
static inline int rx_work_todo(struct xen_netbk *netbk)
{
return !skb_queue_empty(&netbk->rx_queue);
}
static inline int tx_work_todo(struct xen_netbk *netbk)
{
if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
!list_empty(&netbk->net_schedule_list))
return 1;
return 0;
}
static int xen_netbk_kthread(void *data)
{
struct xen_netbk *netbk = data;
while (!kthread_should_stop()) {
wait_event_interruptible(netbk->wq,
rx_work_todo(netbk) ||
tx_work_todo(netbk) ||
kthread_should_stop());
cond_resched();
if (kthread_should_stop())
break;
if (rx_work_todo(netbk))
xen_netbk_rx_action(netbk);
if (tx_work_todo(netbk))
xen_netbk_tx_action(netbk);
}
return 0;
}
void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
{
if (vif->tx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
vif->tx.sring);
if (vif->rx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
vif->rx.sring);
}
int xen_netbk_map_frontend_rings(struct xenvif *vif,
grant_ref_t tx_ring_ref,
grant_ref_t rx_ring_ref)
{
void *addr;
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
int err = -ENOMEM;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
tx_ring_ref, &addr);
if (err)
goto err;
txs = (struct xen_netif_tx_sring *)addr;
BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
rx_ring_ref, &addr);
if (err)
goto err;
rxs = (struct xen_netif_rx_sring *)addr;
BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
vif->rx_req_cons_peek = 0;
return 0;
err:
xen_netbk_unmap_frontend_rings(vif);
return err;
}
static int __init netback_init(void)
{
int i;
int rc = 0;
int group;
if (!xen_domain())
return -ENODEV;
xen_netbk_group_nr = num_online_cpus();
xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
if (!xen_netbk)
return -ENOMEM;
for (group = 0; group < xen_netbk_group_nr; group++) {
struct xen_netbk *netbk = &xen_netbk[group];
skb_queue_head_init(&netbk->rx_queue);
skb_queue_head_init(&netbk->tx_queue);
init_timer(&netbk->net_timer);
netbk->net_timer.data = (unsigned long)netbk;
netbk->net_timer.function = xen_netbk_alarm;
netbk->pending_cons = 0;
netbk->pending_prod = MAX_PENDING_REQS;
for (i = 0; i < MAX_PENDING_REQS; i++)
netbk->pending_ring[i] = i;
init_waitqueue_head(&netbk->wq);
netbk->task = kthread_create(xen_netbk_kthread,
(void *)netbk,
"netback/%u", group);
if (IS_ERR(netbk->task)) {
printk(KERN_ALERT "kthread_create() fails at netback\n");
del_timer(&netbk->net_timer);
rc = PTR_ERR(netbk->task);
goto failed_init;
}
kthread_bind(netbk->task, group);
INIT_LIST_HEAD(&netbk->net_schedule_list);
spin_lock_init(&netbk->net_schedule_list_lock);
atomic_set(&netbk->netfront_count, 0);
wake_up_process(netbk->task);
}
rc = xenvif_xenbus_init();
if (rc)
goto failed_init;
return 0;
failed_init:
while (--group >= 0) {
struct xen_netbk *netbk = &xen_netbk[group];
for (i = 0; i < MAX_PENDING_REQS; i++) {
if (netbk->mmap_pages[i])
__free_page(netbk->mmap_pages[i]);
}
del_timer(&netbk->net_timer);
kthread_stop(netbk->task);
}
vfree(xen_netbk);
return rc;
}
module_init(netback_init);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("xen-backend:vif");
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_5545_0 |
crossvul-cpp_data_good_3486_7 | /*
* Linux performance counter support for MIPS.
*
* Copyright (C) 2010 MIPS Technologies, Inc.
* Author: Deng-Cheng Zhu
*
* This code is based on the implementation for ARM, which is in turn
* based on the sparc64 perf event code and the x86 code. Performance
* counter access is based on the MIPS Oprofile code. And the callchain
* support references the code of MIPS stacktrace.c.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/stacktrace.h>
#include <asm/time.h> /* For perf_irq */
/* These are for 32bit counters. For 64bit ones, define them accordingly. */
#define MAX_PERIOD ((1ULL << 32) - 1)
#define VALID_COUNT 0x7fffffff
#define TOTAL_BITS 32
#define HIGHEST_BIT 31
#define MIPS_MAX_HWEVENTS 4
struct cpu_hw_events {
/* Array of events on this cpu. */
struct perf_event *events[MIPS_MAX_HWEVENTS];
/*
* Set the bit (indexed by the counter number) when the counter
* is used for an event.
*/
unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
/*
* The borrowed MSB for the performance counter. A MIPS performance
* counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit
* counters) as a factor of determining whether a counter overflow
* should be signaled. So here we use a separate MSB for each
* counter to make things easy.
*/
unsigned long msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
/*
* Software copy of the control register for each performance counter.
* MIPS CPUs vary in performance counters. They use this differently,
* and even may not use it.
*/
unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
.saved_ctrl = {0},
};
/* The description of MIPS performance events. */
struct mips_perf_event {
unsigned int event_id;
/*
* MIPS performance counters are indexed starting from 0.
* CNTR_EVEN indicates the indexes of the counters to be used are
* even numbers.
*/
unsigned int cntr_mask;
#define CNTR_EVEN 0x55555555
#define CNTR_ODD 0xaaaaaaaa
#ifdef CONFIG_MIPS_MT_SMP
enum {
T = 0,
V = 1,
P = 2,
} range;
#else
#define T
#define V
#define P
#endif
};
static struct mips_perf_event raw_event;
static DEFINE_MUTEX(raw_event_mutex);
#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
#define C(x) PERF_COUNT_HW_CACHE_##x
struct mips_pmu {
const char *name;
int irq;
irqreturn_t (*handle_irq)(int irq, void *dev);
int (*handle_shared_irq)(void);
void (*start)(void);
void (*stop)(void);
int (*alloc_counter)(struct cpu_hw_events *cpuc,
struct hw_perf_event *hwc);
u64 (*read_counter)(unsigned int idx);
void (*write_counter)(unsigned int idx, u64 val);
void (*enable_event)(struct hw_perf_event *evt, int idx);
void (*disable_event)(int idx);
const struct mips_perf_event *(*map_raw_event)(u64 config);
const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
const struct mips_perf_event (*cache_event_map)
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
unsigned int num_counters;
};
static const struct mips_pmu *mipspmu;
static int
mipspmu_event_set_period(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
s64 left = local64_read(&hwc->period_left);
s64 period = hwc->sample_period;
int ret = 0;
u64 uleft;
unsigned long flags;
if (unlikely(left <= -period)) {
left = period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (unlikely(left <= 0)) {
left += period;
local64_set(&hwc->period_left, left);
hwc->last_period = period;
ret = 1;
}
if (left > (s64)MAX_PERIOD)
left = MAX_PERIOD;
local64_set(&hwc->prev_count, (u64)-left);
local_irq_save(flags);
uleft = (u64)(-left) & MAX_PERIOD;
uleft > VALID_COUNT ?
set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs);
mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT);
local_irq_restore(flags);
perf_event_update_userpage(event);
return ret;
}
static void mipspmu_event_update(struct perf_event *event,
struct hw_perf_event *hwc,
int idx)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
unsigned long flags;
int shift = 64 - TOTAL_BITS;
s64 prev_raw_count, new_raw_count;
u64 delta;
again:
prev_raw_count = local64_read(&hwc->prev_count);
local_irq_save(flags);
/* Make the counter value be a "real" one. */
new_raw_count = mipspmu->read_counter(idx);
if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) {
new_raw_count &= VALID_COUNT;
clear_bit(idx, cpuc->msbs);
} else
new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT);
local_irq_restore(flags);
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
new_raw_count) != prev_raw_count)
goto again;
delta = (new_raw_count << shift) - (prev_raw_count << shift);
delta >>= shift;
local64_add(delta, &event->count);
local64_sub(delta, &hwc->period_left);
return;
}
static void mipspmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
if (!mipspmu)
return;
if (flags & PERF_EF_RELOAD)
WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
hwc->state = 0;
/* Set the period for the event. */
mipspmu_event_set_period(event, hwc, hwc->idx);
/* Enable the event. */
mipspmu->enable_event(hwc, hwc->idx);
}
static void mipspmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
if (!mipspmu)
return;
if (!(hwc->state & PERF_HES_STOPPED)) {
/* We are working on a local event. */
mipspmu->disable_event(hwc->idx);
barrier();
mipspmu_event_update(event, hwc, hwc->idx);
hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
}
}
static int mipspmu_add(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
perf_pmu_disable(event->pmu);
/* To look for a free counter for this event. */
idx = mipspmu->alloc_counter(cpuc, hwc);
if (idx < 0) {
err = idx;
goto out;
}
/*
* If there is an event in the counter we are going to use then
* make sure it is disabled.
*/
event->hw.idx = idx;
mipspmu->disable_event(idx);
cpuc->events[idx] = event;
hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
if (flags & PERF_EF_START)
mipspmu_start(event, PERF_EF_RELOAD);
/* Propagate our changes to the userspace mapping. */
perf_event_update_userpage(event);
out:
perf_pmu_enable(event->pmu);
return err;
}
static void mipspmu_del(struct perf_event *event, int flags)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
mipspmu_stop(event, PERF_EF_UPDATE);
cpuc->events[idx] = NULL;
clear_bit(idx, cpuc->used_mask);
perf_event_update_userpage(event);
}
static void mipspmu_read(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
/* Don't read disabled counters! */
if (hwc->idx < 0)
return;
mipspmu_event_update(event, hwc, hwc->idx);
}
static void mipspmu_enable(struct pmu *pmu)
{
if (mipspmu)
mipspmu->start();
}
static void mipspmu_disable(struct pmu *pmu)
{
if (mipspmu)
mipspmu->stop();
}
static atomic_t active_events = ATOMIC_INIT(0);
static DEFINE_MUTEX(pmu_reserve_mutex);
static int (*save_perf_irq)(void);
static int mipspmu_get_irq(void)
{
int err;
if (mipspmu->irq >= 0) {
/* Request my own irq handler. */
err = request_irq(mipspmu->irq, mipspmu->handle_irq,
IRQF_DISABLED | IRQF_NOBALANCING,
"mips_perf_pmu", NULL);
if (err) {
pr_warning("Unable to request IRQ%d for MIPS "
"performance counters!\n", mipspmu->irq);
}
} else if (cp0_perfcount_irq < 0) {
/*
* We are sharing the irq number with the timer interrupt.
*/
save_perf_irq = perf_irq;
perf_irq = mipspmu->handle_shared_irq;
err = 0;
} else {
pr_warning("The platform hasn't properly defined its "
"interrupt controller.\n");
err = -ENOENT;
}
return err;
}
static void mipspmu_free_irq(void)
{
if (mipspmu->irq >= 0)
free_irq(mipspmu->irq, NULL);
else if (cp0_perfcount_irq < 0)
perf_irq = save_perf_irq;
}
/*
* mipsxx/rm9000/loongson2 have different performance counters, they have
* specific low-level init routines.
*/
static void reset_counters(void *arg);
static int __hw_perf_event_init(struct perf_event *event);
static void hw_perf_event_destroy(struct perf_event *event)
{
if (atomic_dec_and_mutex_lock(&active_events,
&pmu_reserve_mutex)) {
/*
* We must not call the destroy function with interrupts
* disabled.
*/
on_each_cpu(reset_counters,
(void *)(long)mipspmu->num_counters, 1);
mipspmu_free_irq();
mutex_unlock(&pmu_reserve_mutex);
}
}
static int mipspmu_event_init(struct perf_event *event)
{
int err = 0;
switch (event->attr.type) {
case PERF_TYPE_RAW:
case PERF_TYPE_HARDWARE:
case PERF_TYPE_HW_CACHE:
break;
default:
return -ENOENT;
}
if (!mipspmu || event->cpu >= nr_cpumask_bits ||
(event->cpu >= 0 && !cpu_online(event->cpu)))
return -ENODEV;
if (!atomic_inc_not_zero(&active_events)) {
if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
atomic_dec(&active_events);
return -ENOSPC;
}
mutex_lock(&pmu_reserve_mutex);
if (atomic_read(&active_events) == 0)
err = mipspmu_get_irq();
if (!err)
atomic_inc(&active_events);
mutex_unlock(&pmu_reserve_mutex);
}
if (err)
return err;
err = __hw_perf_event_init(event);
if (err)
hw_perf_event_destroy(event);
return err;
}
static struct pmu pmu = {
.pmu_enable = mipspmu_enable,
.pmu_disable = mipspmu_disable,
.event_init = mipspmu_event_init,
.add = mipspmu_add,
.del = mipspmu_del,
.start = mipspmu_start,
.stop = mipspmu_stop,
.read = mipspmu_read,
};
static inline unsigned int
mipspmu_perf_event_encode(const struct mips_perf_event *pev)
{
/*
* Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
* event_id.
*/
#ifdef CONFIG_MIPS_MT_SMP
return ((unsigned int)pev->range << 24) |
(pev->cntr_mask & 0xffff00) |
(pev->event_id & 0xff);
#else
return (pev->cntr_mask & 0xffff00) |
(pev->event_id & 0xff);
#endif
}
static const struct mips_perf_event *
mipspmu_map_general_event(int idx)
{
const struct mips_perf_event *pev;
pev = ((*mipspmu->general_event_map)[idx].event_id ==
UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
&(*mipspmu->general_event_map)[idx]);
return pev;
}
static const struct mips_perf_event *
mipspmu_map_cache_event(u64 config)
{
unsigned int cache_type, cache_op, cache_result;
const struct mips_perf_event *pev;
cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return ERR_PTR(-EINVAL);
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return ERR_PTR(-EINVAL);
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return ERR_PTR(-EINVAL);
pev = &((*mipspmu->cache_event_map)
[cache_type]
[cache_op]
[cache_result]);
if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
return ERR_PTR(-EOPNOTSUPP);
return pev;
}
static int validate_event(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
struct hw_perf_event fake_hwc = event->hw;
/* Allow mixed event group. So return 1 to pass validation. */
if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
return 1;
return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
}
static int validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct cpu_hw_events fake_cpuc;
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
if (!validate_event(&fake_cpuc, leader))
return -ENOSPC;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_cpuc, sibling))
return -ENOSPC;
}
if (!validate_event(&fake_cpuc, event))
return -ENOSPC;
return 0;
}
/* This is needed by specific irq handlers in perf_event_*.c */
static void
handle_associated_event(struct cpu_hw_events *cpuc,
int idx, struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event *event = cpuc->events[idx];
struct hw_perf_event *hwc = &event->hw;
mipspmu_event_update(event, hwc, idx);
data->period = event->hw.last_period;
if (!mipspmu_event_set_period(event, hwc, idx))
return;
if (perf_event_overflow(event, data, regs))
mipspmu->disable_event(idx);
}
#include "perf_event_mipsxx.c"
/* Callchain handling code. */
/*
* Leave userspace callchain empty for now. When we find a way to trace
* the user stack callchains, we add here.
*/
void perf_callchain_user(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
}
static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
unsigned long reg29)
{
unsigned long *sp = (unsigned long *)reg29;
unsigned long addr;
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr)) {
perf_callchain_store(entry, addr);
if (entry->nr >= PERF_MAX_STACK_DEPTH)
break;
}
}
}
void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
if (raw_show_trace || !__kernel_text_address(pc)) {
unsigned long stack_page =
(unsigned long)task_stack_page(current);
if (stack_page && sp >= stack_page &&
sp <= stack_page + THREAD_SIZE - 32)
save_raw_perf_callchain(entry, sp);
return;
}
do {
perf_callchain_store(entry, pc);
if (entry->nr >= PERF_MAX_STACK_DEPTH)
break;
pc = unwind_stack(current, &sp, pc, &ra);
} while (pc);
#else
save_raw_perf_callchain(entry, sp);
#endif
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3486_7 |
crossvul-cpp_data_good_5545_0 | /*
* Back-end of the driver for virtual network devices. This portion of the
* driver exports a 'unified' network-device interface that can be accessed
* by any operating system that implements a compatible front end. A
* reference front-end implementation can be found in:
* drivers/net/xen-netfront.c
*
* Copyright (c) 2002-2005, K A Fraser
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "common.h"
#include <linux/kthread.h>
#include <linux/if_vlan.h>
#include <linux/udp.h>
#include <net/tcp.h>
#include <xen/xen.h>
#include <xen/events.h>
#include <xen/interface/memory.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>
struct pending_tx_info {
struct xen_netif_tx_request req;
struct xenvif *vif;
};
typedef unsigned int pending_ring_idx_t;
struct netbk_rx_meta {
int id;
int size;
int gso_size;
};
#define MAX_PENDING_REQS 256
/* Discriminate from any valid pending_idx value. */
#define INVALID_PENDING_IDX 0xFFFF
#define MAX_BUFFER_OFFSET PAGE_SIZE
/* extra field used in struct page */
union page_ext {
struct {
#if BITS_PER_LONG < 64
#define IDX_WIDTH 8
#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
unsigned int group:GROUP_WIDTH;
unsigned int idx:IDX_WIDTH;
#else
unsigned int group, idx;
#endif
} e;
void *mapping;
};
struct xen_netbk {
wait_queue_head_t wq;
struct task_struct *task;
struct sk_buff_head rx_queue;
struct sk_buff_head tx_queue;
struct timer_list net_timer;
struct page *mmap_pages[MAX_PENDING_REQS];
pending_ring_idx_t pending_prod;
pending_ring_idx_t pending_cons;
struct list_head net_schedule_list;
/* Protect the net_schedule_list in netif. */
spinlock_t net_schedule_list_lock;
atomic_t netfront_count;
struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS];
u16 pending_ring[MAX_PENDING_REQS];
/*
* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
* head/fragment page uses 2 copy operations because it
* straddles two buffers in the frontend.
*/
struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
};
static struct xen_netbk *xen_netbk;
static int xen_netbk_group_nr;
void xen_netbk_add_xenvif(struct xenvif *vif)
{
int i;
int min_netfront_count;
int min_group = 0;
struct xen_netbk *netbk;
min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
for (i = 0; i < xen_netbk_group_nr; i++) {
int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
if (netfront_count < min_netfront_count) {
min_group = i;
min_netfront_count = netfront_count;
}
}
netbk = &xen_netbk[min_group];
vif->netbk = netbk;
atomic_inc(&netbk->netfront_count);
}
void xen_netbk_remove_xenvif(struct xenvif *vif)
{
struct xen_netbk *netbk = vif->netbk;
vif->netbk = NULL;
atomic_dec(&netbk->netfront_count);
}
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
u8 status);
static void make_tx_response(struct xenvif *vif,
struct xen_netif_tx_request *txp,
s8 st);
static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
u16 id,
s8 st,
u16 offset,
u16 size,
u16 flags);
static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
u16 idx)
{
return page_to_pfn(netbk->mmap_pages[idx]);
}
static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
u16 idx)
{
return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
}
/* extra field used in struct page */
static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
unsigned int idx)
{
unsigned int group = netbk - xen_netbk;
union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
pg->mapping = ext.mapping;
}
static int get_page_ext(struct page *pg,
unsigned int *pgroup, unsigned int *pidx)
{
union page_ext ext = { .mapping = pg->mapping };
struct xen_netbk *netbk;
unsigned int group, idx;
group = ext.e.group - 1;
if (group < 0 || group >= xen_netbk_group_nr)
return 0;
netbk = &xen_netbk[group];
idx = ext.e.idx;
if ((idx < 0) || (idx >= MAX_PENDING_REQS))
return 0;
if (netbk->mmap_pages[idx] != pg)
return 0;
*pgroup = group;
*pidx = idx;
return 1;
}
/*
* This is the amount of packet we copy rather than map, so that the
* guest can't fiddle with the contents of the headers while we do
* packet processing on them (netfilter, routing, etc).
*/
#define PKT_PROT_LEN (ETH_HLEN + \
VLAN_HLEN + \
sizeof(struct iphdr) + MAX_IPOPTLEN + \
sizeof(struct tcphdr) + MAX_TCP_OPTION_SPACE)
static u16 frag_get_pending_idx(skb_frag_t *frag)
{
return (u16)frag->page_offset;
}
static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
{
frag->page_offset = pending_idx;
}
static inline pending_ring_idx_t pending_index(unsigned i)
{
return i & (MAX_PENDING_REQS-1);
}
static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
{
return MAX_PENDING_REQS -
netbk->pending_prod + netbk->pending_cons;
}
static void xen_netbk_kick_thread(struct xen_netbk *netbk)
{
wake_up(&netbk->wq);
}
static int max_required_rx_slots(struct xenvif *vif)
{
int max = DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
if (vif->can_sg || vif->gso || vif->gso_prefix)
max += MAX_SKB_FRAGS + 1; /* extra_info + frags */
return max;
}
int xen_netbk_rx_ring_full(struct xenvif *vif)
{
RING_IDX peek = vif->rx_req_cons_peek;
RING_IDX needed = max_required_rx_slots(vif);
return ((vif->rx.sring->req_prod - peek) < needed) ||
((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
}
int xen_netbk_must_stop_queue(struct xenvif *vif)
{
if (!xen_netbk_rx_ring_full(vif))
return 0;
vif->rx.sring->req_event = vif->rx_req_cons_peek +
max_required_rx_slots(vif);
mb(); /* request notification /then/ check the queue */
return xen_netbk_rx_ring_full(vif);
}
/*
* Returns true if we should start a new receive buffer instead of
* adding 'size' bytes to a buffer which currently contains 'offset'
* bytes.
*/
static bool start_new_rx_buffer(int offset, unsigned long size, int head)
{
/* simple case: we have completely filled the current buffer. */
if (offset == MAX_BUFFER_OFFSET)
return true;
/*
* complex case: start a fresh buffer if the current frag
* would overflow the current buffer but only if:
* (i) this frag would fit completely in the next buffer
* and (ii) there is already some data in the current buffer
* and (iii) this is not the head buffer.
*
* Where:
* - (i) stops us splitting a frag into two copies
* unless the frag is too large for a single buffer.
* - (ii) stops us from leaving a buffer pointlessly empty.
* - (iii) stops us leaving the first buffer
* empty. Strictly speaking this is already covered
* by (ii) but is explicitly checked because
* netfront relies on the first buffer being
* non-empty and can crash otherwise.
*
* This means we will effectively linearise small
* frags but do not needlessly split large buffers
* into multiple copies tend to give large frags their
* own buffers as before.
*/
if ((offset + size > MAX_BUFFER_OFFSET) &&
(size <= MAX_BUFFER_OFFSET) && offset && !head)
return true;
return false;
}
/*
* Figure out how many ring slots we're going to need to send @skb to
* the guest. This function is essentially a dry run of
* netbk_gop_frag_copy.
*/
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
{
unsigned int count;
int i, copy_off;
count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
copy_off = skb_headlen(skb) % PAGE_SIZE;
if (skb_shinfo(skb)->gso_size)
count++;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
unsigned long offset = skb_shinfo(skb)->frags[i].page_offset;
unsigned long bytes;
offset &= ~PAGE_MASK;
while (size > 0) {
BUG_ON(offset >= PAGE_SIZE);
BUG_ON(copy_off > MAX_BUFFER_OFFSET);
bytes = PAGE_SIZE - offset;
if (bytes > size)
bytes = size;
if (start_new_rx_buffer(copy_off, bytes, 0)) {
count++;
copy_off = 0;
}
if (copy_off + bytes > MAX_BUFFER_OFFSET)
bytes = MAX_BUFFER_OFFSET - copy_off;
copy_off += bytes;
offset += bytes;
size -= bytes;
if (offset == PAGE_SIZE)
offset = 0;
}
}
return count;
}
struct netrx_pending_operations {
unsigned copy_prod, copy_cons;
unsigned meta_prod, meta_cons;
struct gnttab_copy *copy;
struct netbk_rx_meta *meta;
int copy_off;
grant_ref_t copy_gref;
};
static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
struct netrx_pending_operations *npo)
{
struct netbk_rx_meta *meta;
struct xen_netif_rx_request *req;
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_size = 0;
meta->size = 0;
meta->id = req->id;
npo->copy_off = 0;
npo->copy_gref = req->gref;
return meta;
}
/*
* Set up the grant operations for this fragment. If it's a flipping
* interface, we also set up the unmap request from here.
*/
static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct netrx_pending_operations *npo,
struct page *page, unsigned long size,
unsigned long offset, int *head)
{
struct gnttab_copy *copy_gop;
struct netbk_rx_meta *meta;
/*
* These variables are used iff get_page_ext returns true,
* in which case they are guaranteed to be initialized.
*/
unsigned int uninitialized_var(group), uninitialized_var(idx);
int foreign = get_page_ext(page, &group, &idx);
unsigned long bytes;
/* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
meta = npo->meta + npo->meta_prod - 1;
/* Skip unused frames from start of page */
page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK;
while (size > 0) {
BUG_ON(offset >= PAGE_SIZE);
BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
bytes = PAGE_SIZE - offset;
if (bytes > size)
bytes = size;
if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
/*
* Netfront requires there to be some data in the head
* buffer.
*/
BUG_ON(*head);
meta = get_next_rx_buffer(vif, npo);
}
if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
bytes = MAX_BUFFER_OFFSET - npo->copy_off;
copy_gop = npo->copy + npo->copy_prod++;
copy_gop->flags = GNTCOPY_dest_gref;
if (foreign) {
struct xen_netbk *netbk = &xen_netbk[group];
struct pending_tx_info *src_pend;
src_pend = &netbk->pending_tx_info[idx];
copy_gop->source.domid = src_pend->vif->domid;
copy_gop->source.u.ref = src_pend->req.gref;
copy_gop->flags |= GNTCOPY_source_gref;
} else {
void *vaddr = page_address(page);
copy_gop->source.domid = DOMID_SELF;
copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
}
copy_gop->source.offset = offset;
copy_gop->dest.domid = vif->domid;
copy_gop->dest.offset = npo->copy_off;
copy_gop->dest.u.ref = npo->copy_gref;
copy_gop->len = bytes;
npo->copy_off += bytes;
meta->size += bytes;
offset += bytes;
size -= bytes;
/* Next frame */
if (offset == PAGE_SIZE && size) {
BUG_ON(!PageCompound(page));
page++;
offset = 0;
}
/* Leave a gap for the GSO descriptor. */
if (*head && skb_shinfo(skb)->gso_size && !vif->gso_prefix)
vif->rx.req_cons++;
*head = 0; /* There must be something in this buffer now. */
}
}
/*
* Prepare an SKB to be transmitted to the frontend.
*
* This function is responsible for allocating grant operations, meta
* structures, etc.
*
* It returns the number of meta structures consumed. The number of
* ring slots used is always equal to the number of meta slots used
* plus the number of GSO descriptors used. Currently, we use either
* zero GSO descriptors (for non-GSO packets) or one descriptor (for
* frontend-side LRO).
*/
static int netbk_gop_skb(struct sk_buff *skb,
struct netrx_pending_operations *npo)
{
struct xenvif *vif = netdev_priv(skb->dev);
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
struct xen_netif_rx_request *req;
struct netbk_rx_meta *meta;
unsigned char *data;
int head = 1;
int old_meta_prod;
old_meta_prod = npo->meta_prod;
/* Set up a GSO prefix descriptor, if necessary */
if (skb_shinfo(skb)->gso_size && vif->gso_prefix) {
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0;
meta->id = req->id;
}
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
if (!vif->gso_prefix)
meta->gso_size = skb_shinfo(skb)->gso_size;
else
meta->gso_size = 0;
meta->size = 0;
meta->id = req->id;
npo->copy_off = 0;
npo->copy_gref = req->gref;
data = skb->data;
while (data < skb_tail_pointer(skb)) {
unsigned int offset = offset_in_page(data);
unsigned int len = PAGE_SIZE - offset;
if (data + len > skb_tail_pointer(skb))
len = skb_tail_pointer(skb) - data;
netbk_gop_frag_copy(vif, skb, npo,
virt_to_page(data), len, offset, &head);
data += len;
}
for (i = 0; i < nr_frags; i++) {
netbk_gop_frag_copy(vif, skb, npo,
skb_frag_page(&skb_shinfo(skb)->frags[i]),
skb_frag_size(&skb_shinfo(skb)->frags[i]),
skb_shinfo(skb)->frags[i].page_offset,
&head);
}
return npo->meta_prod - old_meta_prod;
}
/*
* This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
* used to set up the operations on the top of
* netrx_pending_operations, which have since been done. Check that
* they didn't give any errors and advance over them.
*/
static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
struct netrx_pending_operations *npo)
{
struct gnttab_copy *copy_op;
int status = XEN_NETIF_RSP_OKAY;
int i;
for (i = 0; i < nr_meta_slots; i++) {
copy_op = npo->copy + npo->copy_cons++;
if (copy_op->status != GNTST_okay) {
netdev_dbg(vif->dev,
"Bad status %d from copy to DOM%d.\n",
copy_op->status, vif->domid);
status = XEN_NETIF_RSP_ERROR;
}
}
return status;
}
static void netbk_add_frag_responses(struct xenvif *vif, int status,
struct netbk_rx_meta *meta,
int nr_meta_slots)
{
int i;
unsigned long offset;
/* No fragments used */
if (nr_meta_slots <= 1)
return;
nr_meta_slots--;
for (i = 0; i < nr_meta_slots; i++) {
int flags;
if (i == nr_meta_slots - 1)
flags = 0;
else
flags = XEN_NETRXF_more_data;
offset = 0;
make_rx_response(vif, meta[i].id, status, offset,
meta[i].size, flags);
}
}
struct skb_cb_overlay {
int meta_slots_used;
};
static void xen_netbk_rx_action(struct xen_netbk *netbk)
{
struct xenvif *vif = NULL, *tmp;
s8 status;
u16 irq, flags;
struct xen_netif_rx_response *resp;
struct sk_buff_head rxq;
struct sk_buff *skb;
LIST_HEAD(notify);
int ret;
int nr_frags;
int count;
unsigned long offset;
struct skb_cb_overlay *sco;
struct netrx_pending_operations npo = {
.copy = netbk->grant_copy_op,
.meta = netbk->meta,
};
skb_queue_head_init(&rxq);
count = 0;
while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
vif = netdev_priv(skb->dev);
nr_frags = skb_shinfo(skb)->nr_frags;
sco = (struct skb_cb_overlay *)skb->cb;
sco->meta_slots_used = netbk_gop_skb(skb, &npo);
count += nr_frags + 1;
__skb_queue_tail(&rxq, skb);
/* Filled the batch queue? */
if (count + MAX_SKB_FRAGS >= XEN_NETIF_RX_RING_SIZE)
break;
}
BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
if (!npo.copy_prod)
return;
BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
sco = (struct skb_cb_overlay *)skb->cb;
vif = netdev_priv(skb->dev);
if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
resp = RING_GET_RESPONSE(&vif->rx,
vif->rx.rsp_prod_pvt++);
resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
resp->offset = netbk->meta[npo.meta_cons].gso_size;
resp->id = netbk->meta[npo.meta_cons].id;
resp->status = sco->meta_slots_used;
npo.meta_cons++;
sco->meta_slots_used--;
}
vif->dev->stats.tx_bytes += skb->len;
vif->dev->stats.tx_packets++;
status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
if (sco->meta_slots_used == 1)
flags = 0;
else
flags = XEN_NETRXF_more_data;
if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
/* remote but checksummed. */
flags |= XEN_NETRXF_data_validated;
offset = 0;
resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
status, offset,
netbk->meta[npo.meta_cons].size,
flags);
if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
struct xen_netif_extra_info *gso =
(struct xen_netif_extra_info *)
RING_GET_RESPONSE(&vif->rx,
vif->rx.rsp_prod_pvt++);
resp->flags |= XEN_NETRXF_extra_info;
gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
gso->flags = 0;
}
netbk_add_frag_responses(vif, status,
netbk->meta + npo.meta_cons + 1,
sco->meta_slots_used);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
irq = vif->irq;
if (ret && list_empty(&vif->notify_list))
list_add_tail(&vif->notify_list, ¬ify);
xenvif_notify_tx_completion(vif);
xenvif_put(vif);
npo.meta_cons += sco->meta_slots_used;
dev_kfree_skb(skb);
}
list_for_each_entry_safe(vif, tmp, ¬ify, notify_list) {
notify_remote_via_irq(vif->irq);
list_del_init(&vif->notify_list);
}
/* More work to do? */
if (!skb_queue_empty(&netbk->rx_queue) &&
!timer_pending(&netbk->net_timer))
xen_netbk_kick_thread(netbk);
}
void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
{
struct xen_netbk *netbk = vif->netbk;
skb_queue_tail(&netbk->rx_queue, skb);
xen_netbk_kick_thread(netbk);
}
static void xen_netbk_alarm(unsigned long data)
{
struct xen_netbk *netbk = (struct xen_netbk *)data;
xen_netbk_kick_thread(netbk);
}
static int __on_net_schedule_list(struct xenvif *vif)
{
return !list_empty(&vif->schedule_list);
}
/* Must be called with net_schedule_list_lock held */
static void remove_from_net_schedule_list(struct xenvif *vif)
{
if (likely(__on_net_schedule_list(vif))) {
list_del_init(&vif->schedule_list);
xenvif_put(vif);
}
}
static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
{
struct xenvif *vif = NULL;
spin_lock_irq(&netbk->net_schedule_list_lock);
if (list_empty(&netbk->net_schedule_list))
goto out;
vif = list_first_entry(&netbk->net_schedule_list,
struct xenvif, schedule_list);
if (!vif)
goto out;
xenvif_get(vif);
remove_from_net_schedule_list(vif);
out:
spin_unlock_irq(&netbk->net_schedule_list_lock);
return vif;
}
void xen_netbk_schedule_xenvif(struct xenvif *vif)
{
unsigned long flags;
struct xen_netbk *netbk = vif->netbk;
if (__on_net_schedule_list(vif))
goto kick;
spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
if (!__on_net_schedule_list(vif) &&
likely(xenvif_schedulable(vif))) {
list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
xenvif_get(vif);
}
spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
kick:
smp_mb();
if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
!list_empty(&netbk->net_schedule_list))
xen_netbk_kick_thread(netbk);
}
void xen_netbk_deschedule_xenvif(struct xenvif *vif)
{
struct xen_netbk *netbk = vif->netbk;
spin_lock_irq(&netbk->net_schedule_list_lock);
remove_from_net_schedule_list(vif);
spin_unlock_irq(&netbk->net_schedule_list_lock);
}
void xen_netbk_check_rx_xenvif(struct xenvif *vif)
{
int more_to_do;
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
if (more_to_do)
xen_netbk_schedule_xenvif(vif);
}
static void tx_add_credit(struct xenvif *vif)
{
unsigned long max_burst, max_credit;
/*
* Allow a burst big enough to transmit a jumbo packet of up to 128kB.
* Otherwise the interface can seize up due to insufficient credit.
*/
max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
max_burst = min(max_burst, 131072UL);
max_burst = max(max_burst, vif->credit_bytes);
/* Take care that adding a new chunk of credit doesn't wrap to zero. */
max_credit = vif->remaining_credit + vif->credit_bytes;
if (max_credit < vif->remaining_credit)
max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
vif->remaining_credit = min(max_credit, max_burst);
}
static void tx_credit_callback(unsigned long data)
{
struct xenvif *vif = (struct xenvif *)data;
tx_add_credit(vif);
xen_netbk_check_rx_xenvif(vif);
}
static void netbk_tx_err(struct xenvif *vif,
struct xen_netif_tx_request *txp, RING_IDX end)
{
RING_IDX cons = vif->tx.req_cons;
do {
make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
if (cons >= end)
break;
txp = RING_GET_REQUEST(&vif->tx, cons++);
} while (1);
vif->tx.req_cons = cons;
xen_netbk_check_rx_xenvif(vif);
xenvif_put(vif);
}
static void netbk_fatal_tx_err(struct xenvif *vif)
{
netdev_err(vif->dev, "fatal error; disabling device\n");
xenvif_carrier_off(vif);
xenvif_put(vif);
}
static int netbk_count_requests(struct xenvif *vif,
struct xen_netif_tx_request *first,
struct xen_netif_tx_request *txp,
int work_to_do)
{
RING_IDX cons = vif->tx.req_cons;
int frags = 0;
if (!(first->flags & XEN_NETTXF_more_data))
return 0;
do {
if (frags >= work_to_do) {
netdev_err(vif->dev, "Need more frags\n");
netbk_fatal_tx_err(vif);
return -frags;
}
if (unlikely(frags >= MAX_SKB_FRAGS)) {
netdev_err(vif->dev, "Too many frags\n");
netbk_fatal_tx_err(vif);
return -frags;
}
memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + frags),
sizeof(*txp));
if (txp->size > first->size) {
netdev_err(vif->dev, "Frag is bigger than frame.\n");
netbk_fatal_tx_err(vif);
return -frags;
}
first->size -= txp->size;
frags++;
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
netdev_err(vif->dev, "txp->offset: %x, size: %u\n",
txp->offset, txp->size);
netbk_fatal_tx_err(vif);
return -frags;
}
} while ((txp++)->flags & XEN_NETTXF_more_data);
return frags;
}
static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
struct sk_buff *skb,
u16 pending_idx)
{
struct page *page;
page = alloc_page(GFP_KERNEL|__GFP_COLD);
if (!page)
return NULL;
set_page_ext(page, netbk, pending_idx);
netbk->mmap_pages[pending_idx] = page;
return page;
}
static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
struct xenvif *vif,
struct sk_buff *skb,
struct xen_netif_tx_request *txp,
struct gnttab_copy *gop)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
u16 pending_idx = *((u16 *)skb->data);
int i, start;
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
for (i = start; i < shinfo->nr_frags; i++, txp++) {
struct page *page;
pending_ring_idx_t index;
struct pending_tx_info *pending_tx_info =
netbk->pending_tx_info;
index = pending_index(netbk->pending_cons++);
pending_idx = netbk->pending_ring[index];
page = xen_netbk_alloc_page(netbk, skb, pending_idx);
if (!page)
return NULL;
gop->source.u.ref = txp->gref;
gop->source.domid = vif->domid;
gop->source.offset = txp->offset;
gop->dest.u.gmfn = virt_to_mfn(page_address(page));
gop->dest.domid = DOMID_SELF;
gop->dest.offset = txp->offset;
gop->len = txp->size;
gop->flags = GNTCOPY_source_gref;
gop++;
memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
xenvif_get(vif);
pending_tx_info[pending_idx].vif = vif;
frag_set_pending_idx(&frags[i], pending_idx);
}
return gop;
}
static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
struct sk_buff *skb,
struct gnttab_copy **gopp)
{
struct gnttab_copy *gop = *gopp;
u16 pending_idx = *((u16 *)skb->data);
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
int i, err, start;
/* Check status of header. */
err = gop->status;
if (unlikely(err))
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
for (i = start; i < nr_frags; i++) {
int j, newerr;
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
/* Check error status: if okay then remember grant handle. */
newerr = (++gop)->status;
if (likely(!newerr)) {
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err))
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
continue;
}
/* Error on this fragment: respond to client with an error. */
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */
if (err)
continue;
/* First error: invalidate header and preceding fragments. */
pending_idx = *((u16 *)skb->data);
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
for (j = start; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
}
/* Remember the error: invalidate all subsequent fragments. */
err = newerr;
}
*gopp = gop + 1;
return err;
}
static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
int i;
for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = shinfo->frags + i;
struct xen_netif_tx_request *txp;
struct page *page;
u16 pending_idx;
pending_idx = frag_get_pending_idx(frag);
txp = &netbk->pending_tx_info[pending_idx].req;
page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
skb->len += txp->size;
skb->data_len += txp->size;
skb->truesize += txp->size;
/* Take an extra reference to offset xen_netbk_idx_release */
get_page(netbk->mmap_pages[pending_idx]);
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
}
}
static int xen_netbk_get_extras(struct xenvif *vif,
struct xen_netif_extra_info *extras,
int work_to_do)
{
struct xen_netif_extra_info extra;
RING_IDX cons = vif->tx.req_cons;
do {
if (unlikely(work_to_do-- <= 0)) {
netdev_err(vif->dev, "Missing extra info\n");
netbk_fatal_tx_err(vif);
return -EBADR;
}
memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
sizeof(extra));
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
vif->tx.req_cons = ++cons;
netdev_err(vif->dev,
"Invalid extra type: %d\n", extra.type);
netbk_fatal_tx_err(vif);
return -EINVAL;
}
memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
vif->tx.req_cons = ++cons;
} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
return work_to_do;
}
static int netbk_set_skb_gso(struct xenvif *vif,
struct sk_buff *skb,
struct xen_netif_extra_info *gso)
{
if (!gso->u.gso.size) {
netdev_err(vif->dev, "GSO size must not be zero.\n");
netbk_fatal_tx_err(vif);
return -EINVAL;
}
/* Currently only TCPv4 S.O. is supported. */
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
netbk_fatal_tx_err(vif);
return -EINVAL;
}
skb_shinfo(skb)->gso_size = gso->u.gso.size;
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0;
return 0;
}
static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
{
struct iphdr *iph;
unsigned char *th;
int err = -EPROTO;
int recalculate_partial_csum = 0;
/*
* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
* peers can fail to set NETRXF_csum_blank when sending a GSO
* frame. In this case force the SKB to CHECKSUM_PARTIAL and
* recalculate the partial checksum.
*/
if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
vif->rx_gso_checksum_fixup++;
skb->ip_summed = CHECKSUM_PARTIAL;
recalculate_partial_csum = 1;
}
/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
if (skb->protocol != htons(ETH_P_IP))
goto out;
iph = (void *)skb->data;
th = skb->data + 4 * iph->ihl;
if (th >= skb_tail_pointer(skb))
goto out;
skb->csum_start = th - skb->head;
switch (iph->protocol) {
case IPPROTO_TCP:
skb->csum_offset = offsetof(struct tcphdr, check);
if (recalculate_partial_csum) {
struct tcphdr *tcph = (struct tcphdr *)th;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - iph->ihl*4,
IPPROTO_TCP, 0);
}
break;
case IPPROTO_UDP:
skb->csum_offset = offsetof(struct udphdr, check);
if (recalculate_partial_csum) {
struct udphdr *udph = (struct udphdr *)th;
udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - iph->ihl*4,
IPPROTO_UDP, 0);
}
break;
default:
if (net_ratelimit())
netdev_err(vif->dev,
"Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
iph->protocol);
goto out;
}
if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
goto out;
err = 0;
out:
return err;
}
static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
{
unsigned long now = jiffies;
unsigned long next_credit =
vif->credit_timeout.expires +
msecs_to_jiffies(vif->credit_usec / 1000);
/* Timer could already be pending in rare cases. */
if (timer_pending(&vif->credit_timeout))
return true;
/* Passed the point where we can replenish credit? */
if (time_after_eq(now, next_credit)) {
vif->credit_timeout.expires = now;
tx_add_credit(vif);
}
/* Still too big to send right now? Set a callback. */
if (size > vif->remaining_credit) {
vif->credit_timeout.data =
(unsigned long)vif;
vif->credit_timeout.function =
tx_credit_callback;
mod_timer(&vif->credit_timeout,
next_credit);
return true;
}
return false;
}
static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
{
struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
struct sk_buff *skb;
int ret;
while (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
!list_empty(&netbk->net_schedule_list)) {
struct xenvif *vif;
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[MAX_SKB_FRAGS];
struct page *page;
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
u16 pending_idx;
RING_IDX idx;
int work_to_do;
unsigned int data_len;
pending_ring_idx_t index;
/* Get a netif from the list with work to do. */
vif = poll_net_schedule_list(netbk);
/* This can sometimes happen because the test of
* list_empty(net_schedule_list) at the top of the
* loop is unlocked. Just go back and have another
* look.
*/
if (!vif)
continue;
if (vif->tx.sring->req_prod - vif->tx.req_cons >
XEN_NETIF_TX_RING_SIZE) {
netdev_err(vif->dev,
"Impossible number of requests. "
"req_prod %d, req_cons %d, size %ld\n",
vif->tx.sring->req_prod, vif->tx.req_cons,
XEN_NETIF_TX_RING_SIZE);
netbk_fatal_tx_err(vif);
continue;
}
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
if (!work_to_do) {
xenvif_put(vif);
continue;
}
idx = vif->tx.req_cons;
rmb(); /* Ensure that we see the request before we copy it. */
memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
/* Credit-based scheduling. */
if (txreq.size > vif->remaining_credit &&
tx_credit_exceeded(vif, txreq.size)) {
xenvif_put(vif);
continue;
}
vif->remaining_credit -= txreq.size;
work_to_do--;
vif->tx.req_cons = ++idx;
memset(extras, 0, sizeof(extras));
if (txreq.flags & XEN_NETTXF_extra_info) {
work_to_do = xen_netbk_get_extras(vif, extras,
work_to_do);
idx = vif->tx.req_cons;
if (unlikely(work_to_do < 0))
continue;
}
ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0))
continue;
idx += ret;
if (unlikely(txreq.size < ETH_HLEN)) {
netdev_dbg(vif->dev,
"Bad packet size: %d\n", txreq.size);
netbk_tx_err(vif, &txreq, idx);
continue;
}
/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
netdev_err(vif->dev,
"txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(txreq.offset&~PAGE_MASK) + txreq.size);
netbk_fatal_tx_err(vif);
continue;
}
index = pending_index(netbk->pending_cons);
pending_idx = netbk->pending_ring[index];
data_len = (txreq.size > PKT_PROT_LEN &&
ret < MAX_SKB_FRAGS) ?
PKT_PROT_LEN : txreq.size;
skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(skb == NULL)) {
netdev_dbg(vif->dev,
"Can't allocate a skb in start_xmit.\n");
netbk_tx_err(vif, &txreq, idx);
break;
}
/* Packets passed to netif_rx() must have some headroom. */
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
if (netbk_set_skb_gso(vif, skb, gso)) {
/* Failure in netbk_set_skb_gso is fatal. */
kfree_skb(skb);
continue;
}
}
/* XXX could copy straight to head */
page = xen_netbk_alloc_page(netbk, skb, pending_idx);
if (!page) {
kfree_skb(skb);
netbk_tx_err(vif, &txreq, idx);
continue;
}
gop->source.u.ref = txreq.gref;
gop->source.domid = vif->domid;
gop->source.offset = txreq.offset;
gop->dest.u.gmfn = virt_to_mfn(page_address(page));
gop->dest.domid = DOMID_SELF;
gop->dest.offset = txreq.offset;
gop->len = txreq.size;
gop->flags = GNTCOPY_source_gref;
gop++;
memcpy(&netbk->pending_tx_info[pending_idx].req,
&txreq, sizeof(txreq));
netbk->pending_tx_info[pending_idx].vif = vif;
*((u16 *)skb->data) = pending_idx;
__skb_put(skb, data_len);
skb_shinfo(skb)->nr_frags = ret;
if (data_len < txreq.size) {
skb_shinfo(skb)->nr_frags++;
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
pending_idx);
} else {
frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
INVALID_PENDING_IDX);
}
netbk->pending_cons++;
request_gop = xen_netbk_get_requests(netbk, vif,
skb, txfrags, gop);
if (request_gop == NULL) {
kfree_skb(skb);
netbk_tx_err(vif, &txreq, idx);
continue;
}
gop = request_gop;
__skb_queue_tail(&netbk->tx_queue, skb);
vif->tx.req_cons = idx;
xen_netbk_check_rx_xenvif(vif);
if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
break;
}
return gop - netbk->tx_copy_ops;
}
static void xen_netbk_tx_submit(struct xen_netbk *netbk)
{
struct gnttab_copy *gop = netbk->tx_copy_ops;
struct sk_buff *skb;
while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
struct xen_netif_tx_request *txp;
struct xenvif *vif;
u16 pending_idx;
unsigned data_len;
pending_idx = *((u16 *)skb->data);
vif = netbk->pending_tx_info[pending_idx].vif;
txp = &netbk->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
netdev_dbg(vif->dev, "netback grant failed.\n");
skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
continue;
}
data_len = skb->len;
memcpy(skb->data,
(void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
data_len);
if (data_len < txp->size) {
/* Append the packet payload as a fragment. */
txp->offset += data_len;
txp->size -= data_len;
} else {
/* Schedule a response immediately. */
xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
}
if (txp->flags & XEN_NETTXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
else if (txp->flags & XEN_NETTXF_data_validated)
skb->ip_summed = CHECKSUM_UNNECESSARY;
xen_netbk_fill_frags(netbk, skb);
/*
* If the initial fragment was < PKT_PROT_LEN then
* pull through some bytes from the other fragments to
* increase the linear region to PKT_PROT_LEN bytes.
*/
if (skb_headlen(skb) < PKT_PROT_LEN && skb_is_nonlinear(skb)) {
int target = min_t(int, skb->len, PKT_PROT_LEN);
__pskb_pull_tail(skb, target - skb_headlen(skb));
}
skb->dev = vif->dev;
skb->protocol = eth_type_trans(skb, skb->dev);
if (checksum_setup(vif, skb)) {
netdev_dbg(vif->dev,
"Can't setup checksum in net_tx_action\n");
kfree_skb(skb);
continue;
}
vif->dev->stats.rx_bytes += skb->len;
vif->dev->stats.rx_packets++;
xenvif_receive_skb(vif, skb);
}
}
/* Called after netfront has transmitted */
static void xen_netbk_tx_action(struct xen_netbk *netbk)
{
unsigned nr_gops;
nr_gops = xen_netbk_tx_build_gops(netbk);
if (nr_gops == 0)
return;
gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
xen_netbk_tx_submit(netbk);
}
static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
u8 status)
{
struct xenvif *vif;
struct pending_tx_info *pending_tx_info;
pending_ring_idx_t index;
/* Already complete? */
if (netbk->mmap_pages[pending_idx] == NULL)
return;
pending_tx_info = &netbk->pending_tx_info[pending_idx];
vif = pending_tx_info->vif;
make_tx_response(vif, &pending_tx_info->req, status);
index = pending_index(netbk->pending_prod++);
netbk->pending_ring[index] = pending_idx;
xenvif_put(vif);
netbk->mmap_pages[pending_idx]->mapping = 0;
put_page(netbk->mmap_pages[pending_idx]);
netbk->mmap_pages[pending_idx] = NULL;
}
static void make_tx_response(struct xenvif *vif,
struct xen_netif_tx_request *txp,
s8 st)
{
RING_IDX i = vif->tx.rsp_prod_pvt;
struct xen_netif_tx_response *resp;
int notify;
resp = RING_GET_RESPONSE(&vif->tx, i);
resp->id = txp->id;
resp->status = st;
if (txp->flags & XEN_NETTXF_extra_info)
RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
vif->tx.rsp_prod_pvt = ++i;
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
if (notify)
notify_remote_via_irq(vif->irq);
}
static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
u16 id,
s8 st,
u16 offset,
u16 size,
u16 flags)
{
RING_IDX i = vif->rx.rsp_prod_pvt;
struct xen_netif_rx_response *resp;
resp = RING_GET_RESPONSE(&vif->rx, i);
resp->offset = offset;
resp->flags = flags;
resp->id = id;
resp->status = (s16)size;
if (st < 0)
resp->status = (s16)st;
vif->rx.rsp_prod_pvt = ++i;
return resp;
}
static inline int rx_work_todo(struct xen_netbk *netbk)
{
return !skb_queue_empty(&netbk->rx_queue);
}
static inline int tx_work_todo(struct xen_netbk *netbk)
{
if (((nr_pending_reqs(netbk) + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
!list_empty(&netbk->net_schedule_list))
return 1;
return 0;
}
static int xen_netbk_kthread(void *data)
{
struct xen_netbk *netbk = data;
while (!kthread_should_stop()) {
wait_event_interruptible(netbk->wq,
rx_work_todo(netbk) ||
tx_work_todo(netbk) ||
kthread_should_stop());
cond_resched();
if (kthread_should_stop())
break;
if (rx_work_todo(netbk))
xen_netbk_rx_action(netbk);
if (tx_work_todo(netbk))
xen_netbk_tx_action(netbk);
}
return 0;
}
void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
{
if (vif->tx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
vif->tx.sring);
if (vif->rx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
vif->rx.sring);
}
int xen_netbk_map_frontend_rings(struct xenvif *vif,
grant_ref_t tx_ring_ref,
grant_ref_t rx_ring_ref)
{
void *addr;
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
int err = -ENOMEM;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
tx_ring_ref, &addr);
if (err)
goto err;
txs = (struct xen_netif_tx_sring *)addr;
BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
rx_ring_ref, &addr);
if (err)
goto err;
rxs = (struct xen_netif_rx_sring *)addr;
BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
vif->rx_req_cons_peek = 0;
return 0;
err:
xen_netbk_unmap_frontend_rings(vif);
return err;
}
static int __init netback_init(void)
{
int i;
int rc = 0;
int group;
if (!xen_domain())
return -ENODEV;
xen_netbk_group_nr = num_online_cpus();
xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
if (!xen_netbk)
return -ENOMEM;
for (group = 0; group < xen_netbk_group_nr; group++) {
struct xen_netbk *netbk = &xen_netbk[group];
skb_queue_head_init(&netbk->rx_queue);
skb_queue_head_init(&netbk->tx_queue);
init_timer(&netbk->net_timer);
netbk->net_timer.data = (unsigned long)netbk;
netbk->net_timer.function = xen_netbk_alarm;
netbk->pending_cons = 0;
netbk->pending_prod = MAX_PENDING_REQS;
for (i = 0; i < MAX_PENDING_REQS; i++)
netbk->pending_ring[i] = i;
init_waitqueue_head(&netbk->wq);
netbk->task = kthread_create(xen_netbk_kthread,
(void *)netbk,
"netback/%u", group);
if (IS_ERR(netbk->task)) {
printk(KERN_ALERT "kthread_create() fails at netback\n");
del_timer(&netbk->net_timer);
rc = PTR_ERR(netbk->task);
goto failed_init;
}
kthread_bind(netbk->task, group);
INIT_LIST_HEAD(&netbk->net_schedule_list);
spin_lock_init(&netbk->net_schedule_list_lock);
atomic_set(&netbk->netfront_count, 0);
wake_up_process(netbk->task);
}
rc = xenvif_xenbus_init();
if (rc)
goto failed_init;
return 0;
failed_init:
while (--group >= 0) {
struct xen_netbk *netbk = &xen_netbk[group];
for (i = 0; i < MAX_PENDING_REQS; i++) {
if (netbk->mmap_pages[i])
__free_page(netbk->mmap_pages[i]);
}
del_timer(&netbk->net_timer);
kthread_stop(netbk->task);
}
vfree(xen_netbk);
return rc;
}
module_init(netback_init);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS("xen-backend:vif");
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_5545_0 |
crossvul-cpp_data_bad_1806_0 | /*
FUSE: Filesystem in Userspace
Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
This program can be distributed under the terms of the GNU GPL.
See the file COPYING.
*/
#include "fuse_i.h"
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/compat.h>
#include <linux/swap.h>
#include <linux/falloc.h>
#include <linux/uio.h>
static const struct file_operations fuse_direct_io_file_operations;
static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
int opcode, struct fuse_open_out *outargp)
{
struct fuse_open_in inarg;
FUSE_ARGS(args);
memset(&inarg, 0, sizeof(inarg));
inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY);
if (!fc->atomic_o_trunc)
inarg.flags &= ~O_TRUNC;
args.in.h.opcode = opcode;
args.in.h.nodeid = nodeid;
args.in.numargs = 1;
args.in.args[0].size = sizeof(inarg);
args.in.args[0].value = &inarg;
args.out.numargs = 1;
args.out.args[0].size = sizeof(*outargp);
args.out.args[0].value = outargp;
return fuse_simple_request(fc, &args);
}
struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
{
struct fuse_file *ff;
ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
if (unlikely(!ff))
return NULL;
ff->fc = fc;
ff->reserved_req = fuse_request_alloc(0);
if (unlikely(!ff->reserved_req)) {
kfree(ff);
return NULL;
}
INIT_LIST_HEAD(&ff->write_entry);
atomic_set(&ff->count, 0);
RB_CLEAR_NODE(&ff->polled_node);
init_waitqueue_head(&ff->poll_wait);
spin_lock(&fc->lock);
ff->kh = ++fc->khctr;
spin_unlock(&fc->lock);
return ff;
}
void fuse_file_free(struct fuse_file *ff)
{
fuse_request_free(ff->reserved_req);
kfree(ff);
}
struct fuse_file *fuse_file_get(struct fuse_file *ff)
{
atomic_inc(&ff->count);
return ff;
}
static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
{
iput(req->misc.release.inode);
}
static void fuse_file_put(struct fuse_file *ff, bool sync)
{
if (atomic_dec_and_test(&ff->count)) {
struct fuse_req *req = ff->reserved_req;
if (ff->fc->no_open) {
/*
* Drop the release request when client does not
* implement 'open'
*/
__clear_bit(FR_BACKGROUND, &req->flags);
iput(req->misc.release.inode);
fuse_put_request(ff->fc, req);
} else if (sync) {
__clear_bit(FR_BACKGROUND, &req->flags);
fuse_request_send(ff->fc, req);
iput(req->misc.release.inode);
fuse_put_request(ff->fc, req);
} else {
req->end = fuse_release_end;
__set_bit(FR_BACKGROUND, &req->flags);
fuse_request_send_background(ff->fc, req);
}
kfree(ff);
}
}
int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file,
bool isdir)
{
struct fuse_file *ff;
int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN;
ff = fuse_file_alloc(fc);
if (!ff)
return -ENOMEM;
ff->fh = 0;
ff->open_flags = FOPEN_KEEP_CACHE; /* Default for no-open */
if (!fc->no_open || isdir) {
struct fuse_open_out outarg;
int err;
err = fuse_send_open(fc, nodeid, file, opcode, &outarg);
if (!err) {
ff->fh = outarg.fh;
ff->open_flags = outarg.open_flags;
} else if (err != -ENOSYS || isdir) {
fuse_file_free(ff);
return err;
} else {
fc->no_open = 1;
}
}
if (isdir)
ff->open_flags &= ~FOPEN_DIRECT_IO;
ff->nodeid = nodeid;
file->private_data = fuse_file_get(ff);
return 0;
}
EXPORT_SYMBOL_GPL(fuse_do_open);
static void fuse_link_write_file(struct file *file)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_file *ff = file->private_data;
/*
* file may be written through mmap, so chain it onto the
* inodes's write_file list
*/
spin_lock(&fc->lock);
if (list_empty(&ff->write_entry))
list_add(&ff->write_entry, &fi->write_files);
spin_unlock(&fc->lock);
}
void fuse_finish_open(struct inode *inode, struct file *file)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = get_fuse_conn(inode);
if (ff->open_flags & FOPEN_DIRECT_IO)
file->f_op = &fuse_direct_io_file_operations;
if (!(ff->open_flags & FOPEN_KEEP_CACHE))
invalidate_inode_pages2(inode->i_mapping);
if (ff->open_flags & FOPEN_NONSEEKABLE)
nonseekable_open(inode, file);
if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
struct fuse_inode *fi = get_fuse_inode(inode);
spin_lock(&fc->lock);
fi->attr_version = ++fc->attr_version;
i_size_write(inode, 0);
spin_unlock(&fc->lock);
fuse_invalidate_attr(inode);
if (fc->writeback_cache)
file_update_time(file);
}
if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache)
fuse_link_write_file(file);
}
int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
{
struct fuse_conn *fc = get_fuse_conn(inode);
int err;
bool lock_inode = (file->f_flags & O_TRUNC) &&
fc->atomic_o_trunc &&
fc->writeback_cache;
err = generic_file_open(inode, file);
if (err)
return err;
if (lock_inode)
mutex_lock(&inode->i_mutex);
err = fuse_do_open(fc, get_node_id(inode), file, isdir);
if (!err)
fuse_finish_open(inode, file);
if (lock_inode)
mutex_unlock(&inode->i_mutex);
return err;
}
static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode)
{
struct fuse_conn *fc = ff->fc;
struct fuse_req *req = ff->reserved_req;
struct fuse_release_in *inarg = &req->misc.release.in;
spin_lock(&fc->lock);
list_del(&ff->write_entry);
if (!RB_EMPTY_NODE(&ff->polled_node))
rb_erase(&ff->polled_node, &fc->polled_files);
spin_unlock(&fc->lock);
wake_up_interruptible_all(&ff->poll_wait);
inarg->fh = ff->fh;
inarg->flags = flags;
req->in.h.opcode = opcode;
req->in.h.nodeid = ff->nodeid;
req->in.numargs = 1;
req->in.args[0].size = sizeof(struct fuse_release_in);
req->in.args[0].value = inarg;
}
void fuse_release_common(struct file *file, int opcode)
{
struct fuse_file *ff;
struct fuse_req *req;
ff = file->private_data;
if (unlikely(!ff))
return;
req = ff->reserved_req;
fuse_prepare_release(ff, file->f_flags, opcode);
if (ff->flock) {
struct fuse_release_in *inarg = &req->misc.release.in;
inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK;
inarg->lock_owner = fuse_lock_owner_id(ff->fc,
(fl_owner_t) file);
}
/* Hold inode until release is finished */
req->misc.release.inode = igrab(file_inode(file));
/*
* Normally this will send the RELEASE request, however if
* some asynchronous READ or WRITE requests are outstanding,
* the sending will be delayed.
*
* Make the release synchronous if this is a fuseblk mount,
* synchronous RELEASE is allowed (and desirable) in this case
* because the server can be trusted not to screw up.
*/
fuse_file_put(ff, ff->fc->destroy_req != NULL);
}
static int fuse_open(struct inode *inode, struct file *file)
{
return fuse_open_common(inode, file, false);
}
static int fuse_release(struct inode *inode, struct file *file)
{
struct fuse_conn *fc = get_fuse_conn(inode);
/* see fuse_vma_close() for !writeback_cache case */
if (fc->writeback_cache)
write_inode_now(inode, 1);
fuse_release_common(file, FUSE_RELEASE);
/* return value is ignored by VFS */
return 0;
}
void fuse_sync_release(struct fuse_file *ff, int flags)
{
WARN_ON(atomic_read(&ff->count) > 1);
fuse_prepare_release(ff, flags, FUSE_RELEASE);
__set_bit(FR_FORCE, &ff->reserved_req->flags);
__clear_bit(FR_BACKGROUND, &ff->reserved_req->flags);
fuse_request_send(ff->fc, ff->reserved_req);
fuse_put_request(ff->fc, ff->reserved_req);
kfree(ff);
}
EXPORT_SYMBOL_GPL(fuse_sync_release);
/*
* Scramble the ID space with XTEA, so that the value of the files_struct
* pointer is not exposed to userspace.
*/
u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
{
u32 *k = fc->scramble_key;
u64 v = (unsigned long) id;
u32 v0 = v;
u32 v1 = v >> 32;
u32 sum = 0;
int i;
for (i = 0; i < 32; i++) {
v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]);
sum += 0x9E3779B9;
v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]);
}
return (u64) v0 + ((u64) v1 << 32);
}
/*
* Check if any page in a range is under writeback
*
* This is currently done by walking the list of writepage requests
* for the inode, which can be pretty inefficient.
*/
static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from,
pgoff_t idx_to)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_req *req;
bool found = false;
spin_lock(&fc->lock);
list_for_each_entry(req, &fi->writepages, writepages_entry) {
pgoff_t curr_index;
BUG_ON(req->inode != inode);
curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
if (idx_from < curr_index + req->num_pages &&
curr_index <= idx_to) {
found = true;
break;
}
}
spin_unlock(&fc->lock);
return found;
}
static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
{
return fuse_range_is_writeback(inode, index, index);
}
/*
* Wait for page writeback to be completed.
*
* Since fuse doesn't rely on the VM writeback tracking, this has to
* use some other means.
*/
static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
{
struct fuse_inode *fi = get_fuse_inode(inode);
wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
return 0;
}
/*
* Wait for all pending writepages on the inode to finish.
*
* This is currently done by blocking further writes with FUSE_NOWRITE
* and waiting for all sent writes to complete.
*
* This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
* could conflict with truncation.
*/
static void fuse_sync_writes(struct inode *inode)
{
fuse_set_nowrite(inode);
fuse_release_nowrite(inode);
}
static int fuse_flush(struct file *file, fl_owner_t id)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
struct fuse_req *req;
struct fuse_flush_in inarg;
int err;
if (is_bad_inode(inode))
return -EIO;
if (fc->no_flush)
return 0;
err = write_inode_now(inode, 1);
if (err)
return err;
mutex_lock(&inode->i_mutex);
fuse_sync_writes(inode);
mutex_unlock(&inode->i_mutex);
req = fuse_get_req_nofail_nopages(fc, file);
memset(&inarg, 0, sizeof(inarg));
inarg.fh = ff->fh;
inarg.lock_owner = fuse_lock_owner_id(fc, id);
req->in.h.opcode = FUSE_FLUSH;
req->in.h.nodeid = get_node_id(inode);
req->in.numargs = 1;
req->in.args[0].size = sizeof(inarg);
req->in.args[0].value = &inarg;
__set_bit(FR_FORCE, &req->flags);
fuse_request_send(fc, req);
err = req->out.h.error;
fuse_put_request(fc, req);
if (err == -ENOSYS) {
fc->no_flush = 1;
err = 0;
}
return err;
}
int fuse_fsync_common(struct file *file, loff_t start, loff_t end,
int datasync, int isdir)
{
struct inode *inode = file->f_mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
FUSE_ARGS(args);
struct fuse_fsync_in inarg;
int err;
if (is_bad_inode(inode))
return -EIO;
mutex_lock(&inode->i_mutex);
/*
* Start writeback against all dirty pages of the inode, then
* wait for all outstanding writes, before sending the FSYNC
* request.
*/
err = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (err)
goto out;
fuse_sync_writes(inode);
err = sync_inode_metadata(inode, 1);
if (err)
goto out;
if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
goto out;
memset(&inarg, 0, sizeof(inarg));
inarg.fh = ff->fh;
inarg.fsync_flags = datasync ? 1 : 0;
args.in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC;
args.in.h.nodeid = get_node_id(inode);
args.in.numargs = 1;
args.in.args[0].size = sizeof(inarg);
args.in.args[0].value = &inarg;
err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
if (isdir)
fc->no_fsyncdir = 1;
else
fc->no_fsync = 1;
err = 0;
}
out:
mutex_unlock(&inode->i_mutex);
return err;
}
static int fuse_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
return fuse_fsync_common(file, start, end, datasync, 0);
}
void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos,
size_t count, int opcode)
{
struct fuse_read_in *inarg = &req->misc.read.in;
struct fuse_file *ff = file->private_data;
inarg->fh = ff->fh;
inarg->offset = pos;
inarg->size = count;
inarg->flags = file->f_flags;
req->in.h.opcode = opcode;
req->in.h.nodeid = ff->nodeid;
req->in.numargs = 1;
req->in.args[0].size = sizeof(struct fuse_read_in);
req->in.args[0].value = inarg;
req->out.argvar = 1;
req->out.numargs = 1;
req->out.args[0].size = count;
}
static void fuse_release_user_pages(struct fuse_req *req, int write)
{
unsigned i;
for (i = 0; i < req->num_pages; i++) {
struct page *page = req->pages[i];
if (write)
set_page_dirty_lock(page);
put_page(page);
}
}
static ssize_t fuse_get_res_by_io(struct fuse_io_priv *io)
{
if (io->err)
return io->err;
if (io->bytes >= 0 && io->write)
return -EIO;
return io->bytes < 0 ? io->size : io->bytes;
}
/**
* In case of short read, the caller sets 'pos' to the position of
* actual end of fuse request in IO request. Otherwise, if bytes_requested
* == bytes_transferred or rw == WRITE, the caller sets 'pos' to -1.
*
* An example:
* User requested DIO read of 64K. It was splitted into two 32K fuse requests,
* both submitted asynchronously. The first of them was ACKed by userspace as
* fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
* second request was ACKed as short, e.g. only 1K was read, resulting in
* pos == 33K.
*
* Thus, when all fuse requests are completed, the minimal non-negative 'pos'
* will be equal to the length of the longest contiguous fragment of
* transferred data starting from the beginning of IO request.
*/
static void fuse_aio_complete(struct fuse_io_priv *io, int err, ssize_t pos)
{
bool is_sync = is_sync_kiocb(io->iocb);
int left;
spin_lock(&io->lock);
if (err)
io->err = io->err ? : err;
else if (pos >= 0 && (io->bytes < 0 || pos < io->bytes))
io->bytes = pos;
left = --io->reqs;
if (!left && is_sync)
complete(io->done);
spin_unlock(&io->lock);
if (!left && !is_sync) {
ssize_t res = fuse_get_res_by_io(io);
if (res >= 0) {
struct inode *inode = file_inode(io->iocb->ki_filp);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
spin_lock(&fc->lock);
fi->attr_version = ++fc->attr_version;
spin_unlock(&fc->lock);
}
io->iocb->ki_complete(io->iocb, res, 0);
kfree(io);
}
}
static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req)
{
struct fuse_io_priv *io = req->io;
ssize_t pos = -1;
fuse_release_user_pages(req, !io->write);
if (io->write) {
if (req->misc.write.in.size != req->misc.write.out.size)
pos = req->misc.write.in.offset - io->offset +
req->misc.write.out.size;
} else {
if (req->misc.read.in.size != req->out.args[0].size)
pos = req->misc.read.in.offset - io->offset +
req->out.args[0].size;
}
fuse_aio_complete(io, req->out.h.error, pos);
}
static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req,
size_t num_bytes, struct fuse_io_priv *io)
{
spin_lock(&io->lock);
io->size += num_bytes;
io->reqs++;
spin_unlock(&io->lock);
req->io = io;
req->end = fuse_aio_complete_req;
__fuse_get_request(req);
fuse_request_send_background(fc, req);
return num_bytes;
}
static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io,
loff_t pos, size_t count, fl_owner_t owner)
{
struct file *file = io->file;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
fuse_read_fill(req, file, pos, count, FUSE_READ);
if (owner != NULL) {
struct fuse_read_in *inarg = &req->misc.read.in;
inarg->read_flags |= FUSE_READ_LOCKOWNER;
inarg->lock_owner = fuse_lock_owner_id(fc, owner);
}
if (io->async)
return fuse_async_req_send(fc, req, count, io);
fuse_request_send(fc, req);
return req->out.args[0].size;
}
static void fuse_read_update_size(struct inode *inode, loff_t size,
u64 attr_ver)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
spin_lock(&fc->lock);
if (attr_ver == fi->attr_version && size < inode->i_size &&
!test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
fi->attr_version = ++fc->attr_version;
i_size_write(inode, size);
}
spin_unlock(&fc->lock);
}
static void fuse_short_read(struct fuse_req *req, struct inode *inode,
u64 attr_ver)
{
size_t num_read = req->out.args[0].size;
struct fuse_conn *fc = get_fuse_conn(inode);
if (fc->writeback_cache) {
/*
* A hole in a file. Some data after the hole are in page cache,
* but have not reached the client fs yet. So, the hole is not
* present there.
*/
int i;
int start_idx = num_read >> PAGE_CACHE_SHIFT;
size_t off = num_read & (PAGE_CACHE_SIZE - 1);
for (i = start_idx; i < req->num_pages; i++) {
zero_user_segment(req->pages[i], off, PAGE_CACHE_SIZE);
off = 0;
}
} else {
loff_t pos = page_offset(req->pages[0]) + num_read;
fuse_read_update_size(inode, pos, attr_ver);
}
}
static int fuse_do_readpage(struct file *file, struct page *page)
{
struct fuse_io_priv io = { .async = 0, .file = file };
struct inode *inode = page->mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_req *req;
size_t num_read;
loff_t pos = page_offset(page);
size_t count = PAGE_CACHE_SIZE;
u64 attr_ver;
int err;
/*
* Page writeback can extend beyond the lifetime of the
* page-cache page, so make sure we read a properly synced
* page.
*/
fuse_wait_on_page_writeback(inode, page->index);
req = fuse_get_req(fc, 1);
if (IS_ERR(req))
return PTR_ERR(req);
attr_ver = fuse_get_attr_version(fc);
req->out.page_zeroing = 1;
req->out.argpages = 1;
req->num_pages = 1;
req->pages[0] = page;
req->page_descs[0].length = count;
num_read = fuse_send_read(req, &io, pos, count, NULL);
err = req->out.h.error;
if (!err) {
/*
* Short read means EOF. If file size is larger, truncate it
*/
if (num_read < count)
fuse_short_read(req, inode, attr_ver);
SetPageUptodate(page);
}
fuse_put_request(fc, req);
return err;
}
static int fuse_readpage(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
int err;
err = -EIO;
if (is_bad_inode(inode))
goto out;
err = fuse_do_readpage(file, page);
fuse_invalidate_atime(inode);
out:
unlock_page(page);
return err;
}
static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
{
int i;
size_t count = req->misc.read.in.size;
size_t num_read = req->out.args[0].size;
struct address_space *mapping = NULL;
for (i = 0; mapping == NULL && i < req->num_pages; i++)
mapping = req->pages[i]->mapping;
if (mapping) {
struct inode *inode = mapping->host;
/*
* Short read means EOF. If file size is larger, truncate it
*/
if (!req->out.h.error && num_read < count)
fuse_short_read(req, inode, req->misc.read.attr_ver);
fuse_invalidate_atime(inode);
}
for (i = 0; i < req->num_pages; i++) {
struct page *page = req->pages[i];
if (!req->out.h.error)
SetPageUptodate(page);
else
SetPageError(page);
unlock_page(page);
page_cache_release(page);
}
if (req->ff)
fuse_file_put(req->ff, false);
}
static void fuse_send_readpages(struct fuse_req *req, struct file *file)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
loff_t pos = page_offset(req->pages[0]);
size_t count = req->num_pages << PAGE_CACHE_SHIFT;
req->out.argpages = 1;
req->out.page_zeroing = 1;
req->out.page_replace = 1;
fuse_read_fill(req, file, pos, count, FUSE_READ);
req->misc.read.attr_ver = fuse_get_attr_version(fc);
if (fc->async_read) {
req->ff = fuse_file_get(ff);
req->end = fuse_readpages_end;
fuse_request_send_background(fc, req);
} else {
fuse_request_send(fc, req);
fuse_readpages_end(fc, req);
fuse_put_request(fc, req);
}
}
struct fuse_fill_data {
struct fuse_req *req;
struct file *file;
struct inode *inode;
unsigned nr_pages;
};
static int fuse_readpages_fill(void *_data, struct page *page)
{
struct fuse_fill_data *data = _data;
struct fuse_req *req = data->req;
struct inode *inode = data->inode;
struct fuse_conn *fc = get_fuse_conn(inode);
fuse_wait_on_page_writeback(inode, page->index);
if (req->num_pages &&
(req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
(req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
req->pages[req->num_pages - 1]->index + 1 != page->index)) {
int nr_alloc = min_t(unsigned, data->nr_pages,
FUSE_MAX_PAGES_PER_REQ);
fuse_send_readpages(req, data->file);
if (fc->async_read)
req = fuse_get_req_for_background(fc, nr_alloc);
else
req = fuse_get_req(fc, nr_alloc);
data->req = req;
if (IS_ERR(req)) {
unlock_page(page);
return PTR_ERR(req);
}
}
if (WARN_ON(req->num_pages >= req->max_pages)) {
fuse_put_request(fc, req);
return -EIO;
}
page_cache_get(page);
req->pages[req->num_pages] = page;
req->page_descs[req->num_pages].length = PAGE_SIZE;
req->num_pages++;
data->nr_pages--;
return 0;
}
static int fuse_readpages(struct file *file, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages)
{
struct inode *inode = mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_fill_data data;
int err;
int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ);
err = -EIO;
if (is_bad_inode(inode))
goto out;
data.file = file;
data.inode = inode;
if (fc->async_read)
data.req = fuse_get_req_for_background(fc, nr_alloc);
else
data.req = fuse_get_req(fc, nr_alloc);
data.nr_pages = nr_pages;
err = PTR_ERR(data.req);
if (IS_ERR(data.req))
goto out;
err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data);
if (!err) {
if (data.req->num_pages)
fuse_send_readpages(data.req, file);
else
fuse_put_request(fc, data.req);
}
out:
return err;
}
static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
/*
* In auto invalidate mode, always update attributes on read.
* Otherwise, only update if we attempt to read past EOF (to ensure
* i_size is up to date).
*/
if (fc->auto_inval_data ||
(iocb->ki_pos + iov_iter_count(to) > i_size_read(inode))) {
int err;
err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL);
if (err)
return err;
}
return generic_file_read_iter(iocb, to);
}
static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff,
loff_t pos, size_t count)
{
struct fuse_write_in *inarg = &req->misc.write.in;
struct fuse_write_out *outarg = &req->misc.write.out;
inarg->fh = ff->fh;
inarg->offset = pos;
inarg->size = count;
req->in.h.opcode = FUSE_WRITE;
req->in.h.nodeid = ff->nodeid;
req->in.numargs = 2;
if (ff->fc->minor < 9)
req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE;
else
req->in.args[0].size = sizeof(struct fuse_write_in);
req->in.args[0].value = inarg;
req->in.args[1].size = count;
req->out.numargs = 1;
req->out.args[0].size = sizeof(struct fuse_write_out);
req->out.args[0].value = outarg;
}
static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io,
loff_t pos, size_t count, fl_owner_t owner)
{
struct file *file = io->file;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
struct fuse_write_in *inarg = &req->misc.write.in;
fuse_write_fill(req, ff, pos, count);
inarg->flags = file->f_flags;
if (owner != NULL) {
inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
inarg->lock_owner = fuse_lock_owner_id(fc, owner);
}
if (io->async)
return fuse_async_req_send(fc, req, count, io);
fuse_request_send(fc, req);
return req->misc.write.out.size;
}
bool fuse_write_update_size(struct inode *inode, loff_t pos)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
bool ret = false;
spin_lock(&fc->lock);
fi->attr_version = ++fc->attr_version;
if (pos > inode->i_size) {
i_size_write(inode, pos);
ret = true;
}
spin_unlock(&fc->lock);
return ret;
}
static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
struct inode *inode, loff_t pos,
size_t count)
{
size_t res;
unsigned offset;
unsigned i;
struct fuse_io_priv io = { .async = 0, .file = file };
for (i = 0; i < req->num_pages; i++)
fuse_wait_on_page_writeback(inode, req->pages[i]->index);
res = fuse_send_write(req, &io, pos, count, NULL);
offset = req->page_descs[0].offset;
count = res;
for (i = 0; i < req->num_pages; i++) {
struct page *page = req->pages[i];
if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
SetPageUptodate(page);
if (count > PAGE_CACHE_SIZE - offset)
count -= PAGE_CACHE_SIZE - offset;
else
count = 0;
offset = 0;
unlock_page(page);
page_cache_release(page);
}
return res;
}
static ssize_t fuse_fill_write_pages(struct fuse_req *req,
struct address_space *mapping,
struct iov_iter *ii, loff_t pos)
{
struct fuse_conn *fc = get_fuse_conn(mapping->host);
unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
size_t count = 0;
int err;
req->in.argpages = 1;
req->page_descs[0].offset = offset;
do {
size_t tmp;
struct page *page;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
iov_iter_count(ii));
bytes = min_t(size_t, bytes, fc->max_write - count);
again:
err = -EFAULT;
if (iov_iter_fault_in_readable(ii, bytes))
break;
err = -ENOMEM;
page = grab_cache_page_write_begin(mapping, index, 0);
if (!page)
break;
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
flush_dcache_page(page);
if (!tmp) {
unlock_page(page);
page_cache_release(page);
bytes = min(bytes, iov_iter_single_seg_count(ii));
goto again;
}
err = 0;
req->pages[req->num_pages] = page;
req->page_descs[req->num_pages].length = tmp;
req->num_pages++;
iov_iter_advance(ii, tmp);
count += tmp;
pos += tmp;
offset += tmp;
if (offset == PAGE_CACHE_SIZE)
offset = 0;
if (!fc->big_writes)
break;
} while (iov_iter_count(ii) && count < fc->max_write &&
req->num_pages < req->max_pages && offset == 0);
return count > 0 ? count : err;
}
static inline unsigned fuse_wr_pages(loff_t pos, size_t len)
{
return min_t(unsigned,
((pos + len - 1) >> PAGE_CACHE_SHIFT) -
(pos >> PAGE_CACHE_SHIFT) + 1,
FUSE_MAX_PAGES_PER_REQ);
}
static ssize_t fuse_perform_write(struct file *file,
struct address_space *mapping,
struct iov_iter *ii, loff_t pos)
{
struct inode *inode = mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
int err = 0;
ssize_t res = 0;
if (is_bad_inode(inode))
return -EIO;
if (inode->i_size < pos + iov_iter_count(ii))
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
do {
struct fuse_req *req;
ssize_t count;
unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii));
req = fuse_get_req(fc, nr_pages);
if (IS_ERR(req)) {
err = PTR_ERR(req);
break;
}
count = fuse_fill_write_pages(req, mapping, ii, pos);
if (count <= 0) {
err = count;
} else {
size_t num_written;
num_written = fuse_send_write_pages(req, file, inode,
pos, count);
err = req->out.h.error;
if (!err) {
res += num_written;
pos += num_written;
/* break out of the loop on short write */
if (num_written != count)
err = -EIO;
}
}
fuse_put_request(fc, req);
} while (!err && iov_iter_count(ii));
if (res > 0)
fuse_write_update_size(inode, pos);
clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
fuse_invalidate_attr(inode);
return res > 0 ? res : err;
}
static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
ssize_t written = 0;
ssize_t written_buffered = 0;
struct inode *inode = mapping->host;
ssize_t err;
loff_t endbyte = 0;
if (get_fuse_conn(inode)->writeback_cache) {
/* Update size (EOF optimization) and mode (SUID clearing) */
err = fuse_update_attributes(mapping->host, NULL, file, NULL);
if (err)
return err;
return generic_file_write_iter(iocb, from);
}
mutex_lock(&inode->i_mutex);
/* We can write back this queue in page reclaim */
current->backing_dev_info = inode_to_bdi(inode);
err = generic_write_checks(iocb, from);
if (err <= 0)
goto out;
err = file_remove_privs(file);
if (err)
goto out;
err = file_update_time(file);
if (err)
goto out;
if (iocb->ki_flags & IOCB_DIRECT) {
loff_t pos = iocb->ki_pos;
written = generic_file_direct_write(iocb, from, pos);
if (written < 0 || !iov_iter_count(from))
goto out;
pos += written;
written_buffered = fuse_perform_write(file, mapping, from, pos);
if (written_buffered < 0) {
err = written_buffered;
goto out;
}
endbyte = pos + written_buffered - 1;
err = filemap_write_and_wait_range(file->f_mapping, pos,
endbyte);
if (err)
goto out;
invalidate_mapping_pages(file->f_mapping,
pos >> PAGE_CACHE_SHIFT,
endbyte >> PAGE_CACHE_SHIFT);
written += written_buffered;
iocb->ki_pos = pos + written_buffered;
} else {
written = fuse_perform_write(file, mapping, from, iocb->ki_pos);
if (written >= 0)
iocb->ki_pos += written;
}
out:
current->backing_dev_info = NULL;
mutex_unlock(&inode->i_mutex);
return written ? written : err;
}
static inline void fuse_page_descs_length_init(struct fuse_req *req,
unsigned index, unsigned nr_pages)
{
int i;
for (i = index; i < index + nr_pages; i++)
req->page_descs[i].length = PAGE_SIZE -
req->page_descs[i].offset;
}
static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii)
{
return (unsigned long)ii->iov->iov_base + ii->iov_offset;
}
static inline size_t fuse_get_frag_size(const struct iov_iter *ii,
size_t max_size)
{
return min(iov_iter_single_seg_count(ii), max_size);
}
static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii,
size_t *nbytesp, int write)
{
size_t nbytes = 0; /* # bytes already packed in req */
/* Special case for kernel I/O: can copy directly into the buffer */
if (ii->type & ITER_KVEC) {
unsigned long user_addr = fuse_get_user_addr(ii);
size_t frag_size = fuse_get_frag_size(ii, *nbytesp);
if (write)
req->in.args[1].value = (void *) user_addr;
else
req->out.args[0].value = (void *) user_addr;
iov_iter_advance(ii, frag_size);
*nbytesp = frag_size;
return 0;
}
while (nbytes < *nbytesp && req->num_pages < req->max_pages) {
unsigned npages;
size_t start;
ssize_t ret = iov_iter_get_pages(ii,
&req->pages[req->num_pages],
*nbytesp - nbytes,
req->max_pages - req->num_pages,
&start);
if (ret < 0)
return ret;
iov_iter_advance(ii, ret);
nbytes += ret;
ret += start;
npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE;
req->page_descs[req->num_pages].offset = start;
fuse_page_descs_length_init(req, req->num_pages, npages);
req->num_pages += npages;
req->page_descs[req->num_pages - 1].length -=
(PAGE_SIZE - ret) & (PAGE_SIZE - 1);
}
if (write)
req->in.argpages = 1;
else
req->out.argpages = 1;
*nbytesp = nbytes;
return 0;
}
static inline int fuse_iter_npages(const struct iov_iter *ii_p)
{
return iov_iter_npages(ii_p, FUSE_MAX_PAGES_PER_REQ);
}
ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
loff_t *ppos, int flags)
{
int write = flags & FUSE_DIO_WRITE;
int cuse = flags & FUSE_DIO_CUSE;
struct file *file = io->file;
struct inode *inode = file->f_mapping->host;
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
size_t nmax = write ? fc->max_write : fc->max_read;
loff_t pos = *ppos;
size_t count = iov_iter_count(iter);
pgoff_t idx_from = pos >> PAGE_CACHE_SHIFT;
pgoff_t idx_to = (pos + count - 1) >> PAGE_CACHE_SHIFT;
ssize_t res = 0;
struct fuse_req *req;
if (io->async)
req = fuse_get_req_for_background(fc, fuse_iter_npages(iter));
else
req = fuse_get_req(fc, fuse_iter_npages(iter));
if (IS_ERR(req))
return PTR_ERR(req);
if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) {
if (!write)
mutex_lock(&inode->i_mutex);
fuse_sync_writes(inode);
if (!write)
mutex_unlock(&inode->i_mutex);
}
while (count) {
size_t nres;
fl_owner_t owner = current->files;
size_t nbytes = min(count, nmax);
int err = fuse_get_user_pages(req, iter, &nbytes, write);
if (err) {
res = err;
break;
}
if (write)
nres = fuse_send_write(req, io, pos, nbytes, owner);
else
nres = fuse_send_read(req, io, pos, nbytes, owner);
if (!io->async)
fuse_release_user_pages(req, !write);
if (req->out.h.error) {
if (!res)
res = req->out.h.error;
break;
} else if (nres > nbytes) {
res = -EIO;
break;
}
count -= nres;
res += nres;
pos += nres;
if (nres != nbytes)
break;
if (count) {
fuse_put_request(fc, req);
if (io->async)
req = fuse_get_req_for_background(fc,
fuse_iter_npages(iter));
else
req = fuse_get_req(fc, fuse_iter_npages(iter));
if (IS_ERR(req))
break;
}
}
if (!IS_ERR(req))
fuse_put_request(fc, req);
if (res > 0)
*ppos = pos;
return res;
}
EXPORT_SYMBOL_GPL(fuse_direct_io);
static ssize_t __fuse_direct_read(struct fuse_io_priv *io,
struct iov_iter *iter,
loff_t *ppos)
{
ssize_t res;
struct file *file = io->file;
struct inode *inode = file_inode(file);
if (is_bad_inode(inode))
return -EIO;
res = fuse_direct_io(io, iter, ppos, 0);
fuse_invalidate_attr(inode);
return res;
}
static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to)
{
struct fuse_io_priv io = { .async = 0, .file = iocb->ki_filp };
return __fuse_direct_read(&io, to, &iocb->ki_pos);
}
static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct fuse_io_priv io = { .async = 0, .file = file };
ssize_t res;
if (is_bad_inode(inode))
return -EIO;
/* Don't allow parallel writes to the same file */
mutex_lock(&inode->i_mutex);
res = generic_write_checks(iocb, from);
if (res > 0)
res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE);
fuse_invalidate_attr(inode);
if (res > 0)
fuse_write_update_size(inode, iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
return res;
}
static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
{
int i;
for (i = 0; i < req->num_pages; i++)
__free_page(req->pages[i]);
if (req->ff)
fuse_file_put(req->ff, false);
}
static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
{
struct inode *inode = req->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
struct backing_dev_info *bdi = inode_to_bdi(inode);
int i;
list_del(&req->writepages_entry);
for (i = 0; i < req->num_pages; i++) {
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP);
wb_writeout_inc(&bdi->wb);
}
wake_up(&fi->page_waitq);
}
/* Called under fc->lock, may release and reacquire it */
static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req,
loff_t size)
__releases(fc->lock)
__acquires(fc->lock)
{
struct fuse_inode *fi = get_fuse_inode(req->inode);
struct fuse_write_in *inarg = &req->misc.write.in;
__u64 data_size = req->num_pages * PAGE_CACHE_SIZE;
if (!fc->connected)
goto out_free;
if (inarg->offset + data_size <= size) {
inarg->size = data_size;
} else if (inarg->offset < size) {
inarg->size = size - inarg->offset;
} else {
/* Got truncated off completely */
goto out_free;
}
req->in.args[1].size = inarg->size;
fi->writectr++;
fuse_request_send_background_locked(fc, req);
return;
out_free:
fuse_writepage_finish(fc, req);
spin_unlock(&fc->lock);
fuse_writepage_free(fc, req);
fuse_put_request(fc, req);
spin_lock(&fc->lock);
}
/*
* If fi->writectr is positive (no truncate or fsync going on) send
* all queued writepage requests.
*
* Called with fc->lock
*/
void fuse_flush_writepages(struct inode *inode)
__releases(fc->lock)
__acquires(fc->lock)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
size_t crop = i_size_read(inode);
struct fuse_req *req;
while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
req = list_entry(fi->queued_writes.next, struct fuse_req, list);
list_del_init(&req->list);
fuse_send_writepage(fc, req, crop);
}
}
static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
{
struct inode *inode = req->inode;
struct fuse_inode *fi = get_fuse_inode(inode);
mapping_set_error(inode->i_mapping, req->out.h.error);
spin_lock(&fc->lock);
while (req->misc.write.next) {
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_write_in *inarg = &req->misc.write.in;
struct fuse_req *next = req->misc.write.next;
req->misc.write.next = next->misc.write.next;
next->misc.write.next = NULL;
next->ff = fuse_file_get(req->ff);
list_add(&next->writepages_entry, &fi->writepages);
/*
* Skip fuse_flush_writepages() to make it easy to crop requests
* based on primary request size.
*
* 1st case (trivial): there are no concurrent activities using
* fuse_set/release_nowrite. Then we're on safe side because
* fuse_flush_writepages() would call fuse_send_writepage()
* anyway.
*
* 2nd case: someone called fuse_set_nowrite and it is waiting
* now for completion of all in-flight requests. This happens
* rarely and no more than once per page, so this should be
* okay.
*
* 3rd case: someone (e.g. fuse_do_setattr()) is in the middle
* of fuse_set_nowrite..fuse_release_nowrite section. The fact
* that fuse_set_nowrite returned implies that all in-flight
* requests were completed along with all of their secondary
* requests. Further primary requests are blocked by negative
* writectr. Hence there cannot be any in-flight requests and
* no invocations of fuse_writepage_end() while we're in
* fuse_set_nowrite..fuse_release_nowrite section.
*/
fuse_send_writepage(fc, next, inarg->offset + inarg->size);
}
fi->writectr--;
fuse_writepage_finish(fc, req);
spin_unlock(&fc->lock);
fuse_writepage_free(fc, req);
}
static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc,
struct fuse_inode *fi)
{
struct fuse_file *ff = NULL;
spin_lock(&fc->lock);
if (!list_empty(&fi->write_files)) {
ff = list_entry(fi->write_files.next, struct fuse_file,
write_entry);
fuse_file_get(ff);
}
spin_unlock(&fc->lock);
return ff;
}
static struct fuse_file *fuse_write_file_get(struct fuse_conn *fc,
struct fuse_inode *fi)
{
struct fuse_file *ff = __fuse_write_file_get(fc, fi);
WARN_ON(!ff);
return ff;
}
int fuse_write_inode(struct inode *inode, struct writeback_control *wbc)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_file *ff;
int err;
ff = __fuse_write_file_get(fc, fi);
err = fuse_flush_times(inode, ff);
if (ff)
fuse_file_put(ff, 0);
return err;
}
static int fuse_writepage_locked(struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode = mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_req *req;
struct page *tmp_page;
int error = -ENOMEM;
set_page_writeback(page);
req = fuse_request_alloc_nofs(1);
if (!req)
goto err;
/* writeback always goes to bg_queue */
__set_bit(FR_BACKGROUND, &req->flags);
tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (!tmp_page)
goto err_free;
error = -EIO;
req->ff = fuse_write_file_get(fc, fi);
if (!req->ff)
goto err_nofile;
fuse_write_fill(req, req->ff, page_offset(page), 0);
copy_highpage(tmp_page, page);
req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
req->misc.write.next = NULL;
req->in.argpages = 1;
req->num_pages = 1;
req->pages[0] = tmp_page;
req->page_descs[0].offset = 0;
req->page_descs[0].length = PAGE_SIZE;
req->end = fuse_writepage_end;
req->inode = inode;
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
spin_lock(&fc->lock);
list_add(&req->writepages_entry, &fi->writepages);
list_add_tail(&req->list, &fi->queued_writes);
fuse_flush_writepages(inode);
spin_unlock(&fc->lock);
end_page_writeback(page);
return 0;
err_nofile:
__free_page(tmp_page);
err_free:
fuse_request_free(req);
err:
end_page_writeback(page);
return error;
}
static int fuse_writepage(struct page *page, struct writeback_control *wbc)
{
int err;
if (fuse_page_is_writeback(page->mapping->host, page->index)) {
/*
* ->writepages() should be called for sync() and friends. We
* should only get here on direct reclaim and then we are
* allowed to skip a page which is already in flight
*/
WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
redirty_page_for_writepage(wbc, page);
return 0;
}
err = fuse_writepage_locked(page);
unlock_page(page);
return err;
}
struct fuse_fill_wb_data {
struct fuse_req *req;
struct fuse_file *ff;
struct inode *inode;
struct page **orig_pages;
};
static void fuse_writepages_send(struct fuse_fill_wb_data *data)
{
struct fuse_req *req = data->req;
struct inode *inode = data->inode;
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
int num_pages = req->num_pages;
int i;
req->ff = fuse_file_get(data->ff);
spin_lock(&fc->lock);
list_add_tail(&req->list, &fi->queued_writes);
fuse_flush_writepages(inode);
spin_unlock(&fc->lock);
for (i = 0; i < num_pages; i++)
end_page_writeback(data->orig_pages[i]);
}
static bool fuse_writepage_in_flight(struct fuse_req *new_req,
struct page *page)
{
struct fuse_conn *fc = get_fuse_conn(new_req->inode);
struct fuse_inode *fi = get_fuse_inode(new_req->inode);
struct fuse_req *tmp;
struct fuse_req *old_req;
bool found = false;
pgoff_t curr_index;
BUG_ON(new_req->num_pages != 0);
spin_lock(&fc->lock);
list_del(&new_req->writepages_entry);
list_for_each_entry(old_req, &fi->writepages, writepages_entry) {
BUG_ON(old_req->inode != new_req->inode);
curr_index = old_req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
if (curr_index <= page->index &&
page->index < curr_index + old_req->num_pages) {
found = true;
break;
}
}
if (!found) {
list_add(&new_req->writepages_entry, &fi->writepages);
goto out_unlock;
}
new_req->num_pages = 1;
for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) {
BUG_ON(tmp->inode != new_req->inode);
curr_index = tmp->misc.write.in.offset >> PAGE_CACHE_SHIFT;
if (tmp->num_pages == 1 &&
curr_index == page->index) {
old_req = tmp;
}
}
if (old_req->num_pages == 1 && test_bit(FR_PENDING, &old_req->flags)) {
struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host);
copy_highpage(old_req->pages[0], page);
spin_unlock(&fc->lock);
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
dec_zone_page_state(page, NR_WRITEBACK_TEMP);
wb_writeout_inc(&bdi->wb);
fuse_writepage_free(fc, new_req);
fuse_request_free(new_req);
goto out;
} else {
new_req->misc.write.next = old_req->misc.write.next;
old_req->misc.write.next = new_req;
}
out_unlock:
spin_unlock(&fc->lock);
out:
return found;
}
static int fuse_writepages_fill(struct page *page,
struct writeback_control *wbc, void *_data)
{
struct fuse_fill_wb_data *data = _data;
struct fuse_req *req = data->req;
struct inode *inode = data->inode;
struct fuse_conn *fc = get_fuse_conn(inode);
struct page *tmp_page;
bool is_writeback;
int err;
if (!data->ff) {
err = -EIO;
data->ff = fuse_write_file_get(fc, get_fuse_inode(inode));
if (!data->ff)
goto out_unlock;
}
/*
* Being under writeback is unlikely but possible. For example direct
* read to an mmaped fuse file will set the page dirty twice; once when
* the pages are faulted with get_user_pages(), and then after the read
* completed.
*/
is_writeback = fuse_page_is_writeback(inode, page->index);
if (req && req->num_pages &&
(is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
(req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write ||
data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) {
fuse_writepages_send(data);
data->req = NULL;
}
err = -ENOMEM;
tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
if (!tmp_page)
goto out_unlock;
/*
* The page must not be redirtied until the writeout is completed
* (i.e. userspace has sent a reply to the write request). Otherwise
* there could be more than one temporary page instance for each real
* page.
*
* This is ensured by holding the page lock in page_mkwrite() while
* checking fuse_page_is_writeback(). We already hold the page lock
* since clear_page_dirty_for_io() and keep it held until we add the
* request to the fi->writepages list and increment req->num_pages.
* After this fuse_page_is_writeback() will indicate that the page is
* under writeback, so we can release the page lock.
*/
if (data->req == NULL) {
struct fuse_inode *fi = get_fuse_inode(inode);
err = -ENOMEM;
req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ);
if (!req) {
__free_page(tmp_page);
goto out_unlock;
}
fuse_write_fill(req, data->ff, page_offset(page), 0);
req->misc.write.in.write_flags |= FUSE_WRITE_CACHE;
req->misc.write.next = NULL;
req->in.argpages = 1;
__set_bit(FR_BACKGROUND, &req->flags);
req->num_pages = 0;
req->end = fuse_writepage_end;
req->inode = inode;
spin_lock(&fc->lock);
list_add(&req->writepages_entry, &fi->writepages);
spin_unlock(&fc->lock);
data->req = req;
}
set_page_writeback(page);
copy_highpage(tmp_page, page);
req->pages[req->num_pages] = tmp_page;
req->page_descs[req->num_pages].offset = 0;
req->page_descs[req->num_pages].length = PAGE_SIZE;
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
err = 0;
if (is_writeback && fuse_writepage_in_flight(req, page)) {
end_page_writeback(page);
data->req = NULL;
goto out_unlock;
}
data->orig_pages[req->num_pages] = page;
/*
* Protected by fc->lock against concurrent access by
* fuse_page_is_writeback().
*/
spin_lock(&fc->lock);
req->num_pages++;
spin_unlock(&fc->lock);
out_unlock:
unlock_page(page);
return err;
}
static int fuse_writepages(struct address_space *mapping,
struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
struct fuse_fill_wb_data data;
int err;
err = -EIO;
if (is_bad_inode(inode))
goto out;
data.inode = inode;
data.req = NULL;
data.ff = NULL;
err = -ENOMEM;
data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ,
sizeof(struct page *),
GFP_NOFS);
if (!data.orig_pages)
goto out;
err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data);
if (data.req) {
/* Ignore errors if we can write at least one page */
BUG_ON(!data.req->num_pages);
fuse_writepages_send(&data);
err = 0;
}
if (data.ff)
fuse_file_put(data.ff, false);
kfree(data.orig_pages);
out:
return err;
}
/*
* It's worthy to make sure that space is reserved on disk for the write,
* but how to implement it without killing performance need more thinking.
*/
static int fuse_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
struct fuse_conn *fc = get_fuse_conn(file_inode(file));
struct page *page;
loff_t fsize;
int err = -ENOMEM;
WARN_ON(!fc->writeback_cache);
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
goto error;
fuse_wait_on_page_writeback(mapping->host, page->index);
if (PageUptodate(page) || len == PAGE_CACHE_SIZE)
goto success;
/*
* Check if the start this page comes after the end of file, in which
* case the readpage can be optimized away.
*/
fsize = i_size_read(mapping->host);
if (fsize <= (pos & PAGE_CACHE_MASK)) {
size_t off = pos & ~PAGE_CACHE_MASK;
if (off)
zero_user_segment(page, 0, off);
goto success;
}
err = fuse_do_readpage(file, page);
if (err)
goto cleanup;
success:
*pagep = page;
return 0;
cleanup:
unlock_page(page);
page_cache_release(page);
error:
return err;
}
static int fuse_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
struct inode *inode = page->mapping->host;
if (!PageUptodate(page)) {
/* Zero any unwritten bytes at the end of the page */
size_t endoff = (pos + copied) & ~PAGE_CACHE_MASK;
if (endoff)
zero_user_segment(page, endoff, PAGE_CACHE_SIZE);
SetPageUptodate(page);
}
fuse_write_update_size(inode, pos + copied);
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
return copied;
}
static int fuse_launder_page(struct page *page)
{
int err = 0;
if (clear_page_dirty_for_io(page)) {
struct inode *inode = page->mapping->host;
err = fuse_writepage_locked(page);
if (!err)
fuse_wait_on_page_writeback(inode, page->index);
}
return err;
}
/*
* Write back dirty pages now, because there may not be any suitable
* open files later
*/
static void fuse_vma_close(struct vm_area_struct *vma)
{
filemap_write_and_wait(vma->vm_file->f_mapping);
}
/*
* Wait for writeback against this page to complete before allowing it
* to be marked dirty again, and hence written back again, possibly
* before the previous writepage completed.
*
* Block here, instead of in ->writepage(), so that the userspace fs
* can only block processes actually operating on the filesystem.
*
* Otherwise unprivileged userspace fs would be able to block
* unrelated:
*
* - page migration
* - sync(2)
* - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
*/
static int fuse_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct inode *inode = file_inode(vma->vm_file);
file_update_time(vma->vm_file);
lock_page(page);
if (page->mapping != inode->i_mapping) {
unlock_page(page);
return VM_FAULT_NOPAGE;
}
fuse_wait_on_page_writeback(inode, page->index);
return VM_FAULT_LOCKED;
}
static const struct vm_operations_struct fuse_file_vm_ops = {
.close = fuse_vma_close,
.fault = filemap_fault,
.map_pages = filemap_map_pages,
.page_mkwrite = fuse_page_mkwrite,
};
static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
{
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE))
fuse_link_write_file(file);
file_accessed(file);
vma->vm_ops = &fuse_file_vm_ops;
return 0;
}
static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma)
{
/* Can't provide the coherency needed for MAP_SHARED */
if (vma->vm_flags & VM_MAYSHARE)
return -ENODEV;
invalidate_inode_pages2(file->f_mapping);
return generic_file_mmap(file, vma);
}
static int convert_fuse_file_lock(const struct fuse_file_lock *ffl,
struct file_lock *fl)
{
switch (ffl->type) {
case F_UNLCK:
break;
case F_RDLCK:
case F_WRLCK:
if (ffl->start > OFFSET_MAX || ffl->end > OFFSET_MAX ||
ffl->end < ffl->start)
return -EIO;
fl->fl_start = ffl->start;
fl->fl_end = ffl->end;
fl->fl_pid = ffl->pid;
break;
default:
return -EIO;
}
fl->fl_type = ffl->type;
return 0;
}
static void fuse_lk_fill(struct fuse_args *args, struct file *file,
const struct file_lock *fl, int opcode, pid_t pid,
int flock, struct fuse_lk_in *inarg)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_file *ff = file->private_data;
memset(inarg, 0, sizeof(*inarg));
inarg->fh = ff->fh;
inarg->owner = fuse_lock_owner_id(fc, fl->fl_owner);
inarg->lk.start = fl->fl_start;
inarg->lk.end = fl->fl_end;
inarg->lk.type = fl->fl_type;
inarg->lk.pid = pid;
if (flock)
inarg->lk_flags |= FUSE_LK_FLOCK;
args->in.h.opcode = opcode;
args->in.h.nodeid = get_node_id(inode);
args->in.numargs = 1;
args->in.args[0].size = sizeof(*inarg);
args->in.args[0].value = inarg;
}
static int fuse_getlk(struct file *file, struct file_lock *fl)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
FUSE_ARGS(args);
struct fuse_lk_in inarg;
struct fuse_lk_out outarg;
int err;
fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg);
args.out.numargs = 1;
args.out.args[0].size = sizeof(outarg);
args.out.args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err)
err = convert_fuse_file_lock(&outarg.lk, fl);
return err;
}
static int fuse_setlk(struct file *file, struct file_lock *fl, int flock)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
FUSE_ARGS(args);
struct fuse_lk_in inarg;
int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK;
pid_t pid = fl->fl_type != F_UNLCK ? current->tgid : 0;
int err;
if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
/* NLM needs asynchronous locks, which we don't support yet */
return -ENOLCK;
}
/* Unlock on close is handled by the flush method */
if (fl->fl_flags & FL_CLOSE)
return 0;
fuse_lk_fill(&args, file, fl, opcode, pid, flock, &inarg);
err = fuse_simple_request(fc, &args);
/* locking is restartable */
if (err == -EINTR)
err = -ERESTARTSYS;
return err;
}
static int fuse_file_lock(struct file *file, int cmd, struct file_lock *fl)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
int err;
if (cmd == F_CANCELLK) {
err = 0;
} else if (cmd == F_GETLK) {
if (fc->no_lock) {
posix_test_lock(file, fl);
err = 0;
} else
err = fuse_getlk(file, fl);
} else {
if (fc->no_lock)
err = posix_lock_file(file, fl, NULL);
else
err = fuse_setlk(file, fl, 0);
}
return err;
}
static int fuse_file_flock(struct file *file, int cmd, struct file_lock *fl)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
int err;
if (fc->no_flock) {
err = flock_lock_file_wait(file, fl);
} else {
struct fuse_file *ff = file->private_data;
/* emulate flock with POSIX locks */
ff->flock = true;
err = fuse_setlk(file, fl, 1);
}
return err;
}
static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
{
struct inode *inode = mapping->host;
struct fuse_conn *fc = get_fuse_conn(inode);
FUSE_ARGS(args);
struct fuse_bmap_in inarg;
struct fuse_bmap_out outarg;
int err;
if (!inode->i_sb->s_bdev || fc->no_bmap)
return 0;
memset(&inarg, 0, sizeof(inarg));
inarg.block = block;
inarg.blocksize = inode->i_sb->s_blocksize;
args.in.h.opcode = FUSE_BMAP;
args.in.h.nodeid = get_node_id(inode);
args.in.numargs = 1;
args.in.args[0].size = sizeof(inarg);
args.in.args[0].value = &inarg;
args.out.numargs = 1;
args.out.args[0].size = sizeof(outarg);
args.out.args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (err == -ENOSYS)
fc->no_bmap = 1;
return err ? 0 : outarg.block;
}
static loff_t fuse_file_llseek(struct file *file, loff_t offset, int whence)
{
loff_t retval;
struct inode *inode = file_inode(file);
/* No i_mutex protection necessary for SEEK_CUR and SEEK_SET */
if (whence == SEEK_CUR || whence == SEEK_SET)
return generic_file_llseek(file, offset, whence);
mutex_lock(&inode->i_mutex);
retval = fuse_update_attributes(inode, NULL, file, NULL);
if (!retval)
retval = generic_file_llseek(file, offset, whence);
mutex_unlock(&inode->i_mutex);
return retval;
}
static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov,
unsigned int nr_segs, size_t bytes, bool to_user)
{
struct iov_iter ii;
int page_idx = 0;
if (!bytes)
return 0;
iov_iter_init(&ii, to_user ? READ : WRITE, iov, nr_segs, bytes);
while (iov_iter_count(&ii)) {
struct page *page = pages[page_idx++];
size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii));
void *kaddr;
kaddr = kmap(page);
while (todo) {
char __user *uaddr = ii.iov->iov_base + ii.iov_offset;
size_t iov_len = ii.iov->iov_len - ii.iov_offset;
size_t copy = min(todo, iov_len);
size_t left;
if (!to_user)
left = copy_from_user(kaddr, uaddr, copy);
else
left = copy_to_user(uaddr, kaddr, copy);
if (unlikely(left))
return -EFAULT;
iov_iter_advance(&ii, copy);
todo -= copy;
kaddr += copy;
}
kunmap(page);
}
return 0;
}
/*
* CUSE servers compiled on 32bit broke on 64bit kernels because the
* ABI was defined to be 'struct iovec' which is different on 32bit
* and 64bit. Fortunately we can determine which structure the server
* used from the size of the reply.
*/
static int fuse_copy_ioctl_iovec_old(struct iovec *dst, void *src,
size_t transferred, unsigned count,
bool is_compat)
{
#ifdef CONFIG_COMPAT
if (count * sizeof(struct compat_iovec) == transferred) {
struct compat_iovec *ciov = src;
unsigned i;
/*
* With this interface a 32bit server cannot support
* non-compat (i.e. ones coming from 64bit apps) ioctl
* requests
*/
if (!is_compat)
return -EINVAL;
for (i = 0; i < count; i++) {
dst[i].iov_base = compat_ptr(ciov[i].iov_base);
dst[i].iov_len = ciov[i].iov_len;
}
return 0;
}
#endif
if (count * sizeof(struct iovec) != transferred)
return -EIO;
memcpy(dst, src, transferred);
return 0;
}
/* Make sure iov_length() won't overflow */
static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
{
size_t n;
u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
for (n = 0; n < count; n++, iov++) {
if (iov->iov_len > (size_t) max)
return -ENOMEM;
max -= iov->iov_len;
}
return 0;
}
static int fuse_copy_ioctl_iovec(struct fuse_conn *fc, struct iovec *dst,
void *src, size_t transferred, unsigned count,
bool is_compat)
{
unsigned i;
struct fuse_ioctl_iovec *fiov = src;
if (fc->minor < 16) {
return fuse_copy_ioctl_iovec_old(dst, src, transferred,
count, is_compat);
}
if (count * sizeof(struct fuse_ioctl_iovec) != transferred)
return -EIO;
for (i = 0; i < count; i++) {
/* Did the server supply an inappropriate value? */
if (fiov[i].base != (unsigned long) fiov[i].base ||
fiov[i].len != (unsigned long) fiov[i].len)
return -EIO;
dst[i].iov_base = (void __user *) (unsigned long) fiov[i].base;
dst[i].iov_len = (size_t) fiov[i].len;
#ifdef CONFIG_COMPAT
if (is_compat &&
(ptr_to_compat(dst[i].iov_base) != fiov[i].base ||
(compat_size_t) dst[i].iov_len != fiov[i].len))
return -EIO;
#endif
}
return 0;
}
/*
* For ioctls, there is no generic way to determine how much memory
* needs to be read and/or written. Furthermore, ioctls are allowed
* to dereference the passed pointer, so the parameter requires deep
* copying but FUSE has no idea whatsoever about what to copy in or
* out.
*
* This is solved by allowing FUSE server to retry ioctl with
* necessary in/out iovecs. Let's assume the ioctl implementation
* needs to read in the following structure.
*
* struct a {
* char *buf;
* size_t buflen;
* }
*
* On the first callout to FUSE server, inarg->in_size and
* inarg->out_size will be NULL; then, the server completes the ioctl
* with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and
* the actual iov array to
*
* { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } }
*
* which tells FUSE to copy in the requested area and retry the ioctl.
* On the second round, the server has access to the structure and
* from that it can tell what to look for next, so on the invocation,
* it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to
*
* { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) },
* { .iov_base = a.buf, .iov_len = a.buflen } }
*
* FUSE will copy both struct a and the pointed buffer from the
* process doing the ioctl and retry ioctl with both struct a and the
* buffer.
*
* This time, FUSE server has everything it needs and completes ioctl
* without FUSE_IOCTL_RETRY which finishes the ioctl call.
*
* Copying data out works the same way.
*
* Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel
* automatically initializes in and out iovs by decoding @cmd with
* _IOC_* macros and the server is not allowed to request RETRY. This
* limits ioctl data transfers to well-formed ioctls and is the forced
* behavior for all FUSE servers.
*/
long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
unsigned int flags)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
struct fuse_ioctl_in inarg = {
.fh = ff->fh,
.cmd = cmd,
.arg = arg,
.flags = flags
};
struct fuse_ioctl_out outarg;
struct fuse_req *req = NULL;
struct page **pages = NULL;
struct iovec *iov_page = NULL;
struct iovec *in_iov = NULL, *out_iov = NULL;
unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages;
size_t in_size, out_size, transferred;
int err;
#if BITS_PER_LONG == 32
inarg.flags |= FUSE_IOCTL_32BIT;
#else
if (flags & FUSE_IOCTL_COMPAT)
inarg.flags |= FUSE_IOCTL_32BIT;
#endif
/* assume all the iovs returned by client always fits in a page */
BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE);
err = -ENOMEM;
pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL);
iov_page = (struct iovec *) __get_free_page(GFP_KERNEL);
if (!pages || !iov_page)
goto out;
/*
* If restricted, initialize IO parameters as encoded in @cmd.
* RETRY from server is not allowed.
*/
if (!(flags & FUSE_IOCTL_UNRESTRICTED)) {
struct iovec *iov = iov_page;
iov->iov_base = (void __user *)arg;
iov->iov_len = _IOC_SIZE(cmd);
if (_IOC_DIR(cmd) & _IOC_WRITE) {
in_iov = iov;
in_iovs = 1;
}
if (_IOC_DIR(cmd) & _IOC_READ) {
out_iov = iov;
out_iovs = 1;
}
}
retry:
inarg.in_size = in_size = iov_length(in_iov, in_iovs);
inarg.out_size = out_size = iov_length(out_iov, out_iovs);
/*
* Out data can be used either for actual out data or iovs,
* make sure there always is at least one page.
*/
out_size = max_t(size_t, out_size, PAGE_SIZE);
max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE);
/* make sure there are enough buffer pages and init request with them */
err = -ENOMEM;
if (max_pages > FUSE_MAX_PAGES_PER_REQ)
goto out;
while (num_pages < max_pages) {
pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
if (!pages[num_pages])
goto out;
num_pages++;
}
req = fuse_get_req(fc, num_pages);
if (IS_ERR(req)) {
err = PTR_ERR(req);
req = NULL;
goto out;
}
memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages);
req->num_pages = num_pages;
fuse_page_descs_length_init(req, 0, req->num_pages);
/* okay, let's send it to the client */
req->in.h.opcode = FUSE_IOCTL;
req->in.h.nodeid = ff->nodeid;
req->in.numargs = 1;
req->in.args[0].size = sizeof(inarg);
req->in.args[0].value = &inarg;
if (in_size) {
req->in.numargs++;
req->in.args[1].size = in_size;
req->in.argpages = 1;
err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size,
false);
if (err)
goto out;
}
req->out.numargs = 2;
req->out.args[0].size = sizeof(outarg);
req->out.args[0].value = &outarg;
req->out.args[1].size = out_size;
req->out.argpages = 1;
req->out.argvar = 1;
fuse_request_send(fc, req);
err = req->out.h.error;
transferred = req->out.args[1].size;
fuse_put_request(fc, req);
req = NULL;
if (err)
goto out;
/* did it ask for retry? */
if (outarg.flags & FUSE_IOCTL_RETRY) {
void *vaddr;
/* no retry if in restricted mode */
err = -EIO;
if (!(flags & FUSE_IOCTL_UNRESTRICTED))
goto out;
in_iovs = outarg.in_iovs;
out_iovs = outarg.out_iovs;
/*
* Make sure things are in boundary, separate checks
* are to protect against overflow.
*/
err = -ENOMEM;
if (in_iovs > FUSE_IOCTL_MAX_IOV ||
out_iovs > FUSE_IOCTL_MAX_IOV ||
in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
goto out;
vaddr = kmap_atomic(pages[0]);
err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr,
transferred, in_iovs + out_iovs,
(flags & FUSE_IOCTL_COMPAT) != 0);
kunmap_atomic(vaddr);
if (err)
goto out;
in_iov = iov_page;
out_iov = in_iov + in_iovs;
err = fuse_verify_ioctl_iov(in_iov, in_iovs);
if (err)
goto out;
err = fuse_verify_ioctl_iov(out_iov, out_iovs);
if (err)
goto out;
goto retry;
}
err = -EIO;
if (transferred > inarg.out_size)
goto out;
err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true);
out:
if (req)
fuse_put_request(fc, req);
free_page((unsigned long) iov_page);
while (num_pages)
__free_page(pages[--num_pages]);
kfree(pages);
return err ? err : outarg.result;
}
EXPORT_SYMBOL_GPL(fuse_do_ioctl);
long fuse_ioctl_common(struct file *file, unsigned int cmd,
unsigned long arg, unsigned int flags)
{
struct inode *inode = file_inode(file);
struct fuse_conn *fc = get_fuse_conn(inode);
if (!fuse_allow_current_process(fc))
return -EACCES;
if (is_bad_inode(inode))
return -EIO;
return fuse_do_ioctl(file, cmd, arg, flags);
}
static long fuse_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return fuse_ioctl_common(file, cmd, arg, 0);
}
static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
return fuse_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT);
}
/*
* All files which have been polled are linked to RB tree
* fuse_conn->polled_files which is indexed by kh. Walk the tree and
* find the matching one.
*/
static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh,
struct rb_node **parent_out)
{
struct rb_node **link = &fc->polled_files.rb_node;
struct rb_node *last = NULL;
while (*link) {
struct fuse_file *ff;
last = *link;
ff = rb_entry(last, struct fuse_file, polled_node);
if (kh < ff->kh)
link = &last->rb_left;
else if (kh > ff->kh)
link = &last->rb_right;
else
return link;
}
if (parent_out)
*parent_out = last;
return link;
}
/*
* The file is about to be polled. Make sure it's on the polled_files
* RB tree. Note that files once added to the polled_files tree are
* not removed before the file is released. This is because a file
* polled once is likely to be polled again.
*/
static void fuse_register_polled_file(struct fuse_conn *fc,
struct fuse_file *ff)
{
spin_lock(&fc->lock);
if (RB_EMPTY_NODE(&ff->polled_node)) {
struct rb_node **link, *uninitialized_var(parent);
link = fuse_find_polled_node(fc, ff->kh, &parent);
BUG_ON(*link);
rb_link_node(&ff->polled_node, parent, link);
rb_insert_color(&ff->polled_node, &fc->polled_files);
}
spin_unlock(&fc->lock);
}
unsigned fuse_file_poll(struct file *file, poll_table *wait)
{
struct fuse_file *ff = file->private_data;
struct fuse_conn *fc = ff->fc;
struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh };
struct fuse_poll_out outarg;
FUSE_ARGS(args);
int err;
if (fc->no_poll)
return DEFAULT_POLLMASK;
poll_wait(file, &ff->poll_wait, wait);
inarg.events = (__u32)poll_requested_events(wait);
/*
* Ask for notification iff there's someone waiting for it.
* The client may ignore the flag and always notify.
*/
if (waitqueue_active(&ff->poll_wait)) {
inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY;
fuse_register_polled_file(fc, ff);
}
args.in.h.opcode = FUSE_POLL;
args.in.h.nodeid = ff->nodeid;
args.in.numargs = 1;
args.in.args[0].size = sizeof(inarg);
args.in.args[0].value = &inarg;
args.out.numargs = 1;
args.out.args[0].size = sizeof(outarg);
args.out.args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err)
return outarg.revents;
if (err == -ENOSYS) {
fc->no_poll = 1;
return DEFAULT_POLLMASK;
}
return POLLERR;
}
EXPORT_SYMBOL_GPL(fuse_file_poll);
/*
* This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and
* wakes up the poll waiters.
*/
int fuse_notify_poll_wakeup(struct fuse_conn *fc,
struct fuse_notify_poll_wakeup_out *outarg)
{
u64 kh = outarg->kh;
struct rb_node **link;
spin_lock(&fc->lock);
link = fuse_find_polled_node(fc, kh, NULL);
if (*link) {
struct fuse_file *ff;
ff = rb_entry(*link, struct fuse_file, polled_node);
wake_up_interruptible_sync(&ff->poll_wait);
}
spin_unlock(&fc->lock);
return 0;
}
static void fuse_do_truncate(struct file *file)
{
struct inode *inode = file->f_mapping->host;
struct iattr attr;
attr.ia_valid = ATTR_SIZE;
attr.ia_size = i_size_read(inode);
attr.ia_file = file;
attr.ia_valid |= ATTR_FILE;
fuse_do_setattr(inode, &attr, file);
}
static inline loff_t fuse_round_up(loff_t off)
{
return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
}
static ssize_t
fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
{
DECLARE_COMPLETION_ONSTACK(wait);
ssize_t ret = 0;
struct file *file = iocb->ki_filp;
struct fuse_file *ff = file->private_data;
bool async_dio = ff->fc->async_dio;
loff_t pos = 0;
struct inode *inode;
loff_t i_size;
size_t count = iov_iter_count(iter);
struct fuse_io_priv *io;
pos = offset;
inode = file->f_mapping->host;
i_size = i_size_read(inode);
if ((iov_iter_rw(iter) == READ) && (offset > i_size))
return 0;
/* optimization for short read */
if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) {
if (offset >= i_size)
return 0;
iov_iter_truncate(iter, fuse_round_up(i_size - offset));
count = iov_iter_count(iter);
}
io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
if (!io)
return -ENOMEM;
spin_lock_init(&io->lock);
io->reqs = 1;
io->bytes = -1;
io->size = 0;
io->offset = offset;
io->write = (iov_iter_rw(iter) == WRITE);
io->err = 0;
io->file = file;
/*
* By default, we want to optimize all I/Os with async request
* submission to the client filesystem if supported.
*/
io->async = async_dio;
io->iocb = iocb;
/*
* We cannot asynchronously extend the size of a file. We have no method
* to wait on real async I/O requests, so we must submit this request
* synchronously.
*/
if (!is_sync_kiocb(iocb) && (offset + count > i_size) &&
iov_iter_rw(iter) == WRITE)
io->async = false;
if (io->async && is_sync_kiocb(iocb))
io->done = &wait;
if (iov_iter_rw(iter) == WRITE) {
ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
fuse_invalidate_attr(inode);
} else {
ret = __fuse_direct_read(io, iter, &pos);
}
if (io->async) {
fuse_aio_complete(io, ret < 0 ? ret : 0, -1);
/* we have a non-extending, async request, so return */
if (!is_sync_kiocb(iocb))
return -EIOCBQUEUED;
wait_for_completion(&wait);
ret = fuse_get_res_by_io(io);
}
kfree(io);
if (iov_iter_rw(iter) == WRITE) {
if (ret > 0)
fuse_write_update_size(inode, pos);
else if (ret < 0 && offset + count > i_size)
fuse_do_truncate(file);
}
return ret;
}
static long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
loff_t length)
{
struct fuse_file *ff = file->private_data;
struct inode *inode = file_inode(file);
struct fuse_inode *fi = get_fuse_inode(inode);
struct fuse_conn *fc = ff->fc;
FUSE_ARGS(args);
struct fuse_fallocate_in inarg = {
.fh = ff->fh,
.offset = offset,
.length = length,
.mode = mode
};
int err;
bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
(mode & FALLOC_FL_PUNCH_HOLE);
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
if (fc->no_fallocate)
return -EOPNOTSUPP;
if (lock_inode) {
mutex_lock(&inode->i_mutex);
if (mode & FALLOC_FL_PUNCH_HOLE) {
loff_t endbyte = offset + length - 1;
err = filemap_write_and_wait_range(inode->i_mapping,
offset, endbyte);
if (err)
goto out;
fuse_sync_writes(inode);
}
}
if (!(mode & FALLOC_FL_KEEP_SIZE))
set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
args.in.h.opcode = FUSE_FALLOCATE;
args.in.h.nodeid = ff->nodeid;
args.in.numargs = 1;
args.in.args[0].size = sizeof(inarg);
args.in.args[0].value = &inarg;
err = fuse_simple_request(fc, &args);
if (err == -ENOSYS) {
fc->no_fallocate = 1;
err = -EOPNOTSUPP;
}
if (err)
goto out;
/* we could have extended the file */
if (!(mode & FALLOC_FL_KEEP_SIZE)) {
bool changed = fuse_write_update_size(inode, offset + length);
if (changed && fc->writeback_cache)
file_update_time(file);
}
if (mode & FALLOC_FL_PUNCH_HOLE)
truncate_pagecache_range(inode, offset, offset + length - 1);
fuse_invalidate_attr(inode);
out:
if (!(mode & FALLOC_FL_KEEP_SIZE))
clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
if (lock_inode)
mutex_unlock(&inode->i_mutex);
return err;
}
static const struct file_operations fuse_file_operations = {
.llseek = fuse_file_llseek,
.read_iter = fuse_file_read_iter,
.write_iter = fuse_file_write_iter,
.mmap = fuse_file_mmap,
.open = fuse_open,
.flush = fuse_flush,
.release = fuse_release,
.fsync = fuse_fsync,
.lock = fuse_file_lock,
.flock = fuse_file_flock,
.splice_read = generic_file_splice_read,
.unlocked_ioctl = fuse_file_ioctl,
.compat_ioctl = fuse_file_compat_ioctl,
.poll = fuse_file_poll,
.fallocate = fuse_file_fallocate,
};
static const struct file_operations fuse_direct_io_file_operations = {
.llseek = fuse_file_llseek,
.read_iter = fuse_direct_read_iter,
.write_iter = fuse_direct_write_iter,
.mmap = fuse_direct_mmap,
.open = fuse_open,
.flush = fuse_flush,
.release = fuse_release,
.fsync = fuse_fsync,
.lock = fuse_file_lock,
.flock = fuse_file_flock,
.unlocked_ioctl = fuse_file_ioctl,
.compat_ioctl = fuse_file_compat_ioctl,
.poll = fuse_file_poll,
.fallocate = fuse_file_fallocate,
/* no splice_read */
};
static const struct address_space_operations fuse_file_aops = {
.readpage = fuse_readpage,
.writepage = fuse_writepage,
.writepages = fuse_writepages,
.launder_page = fuse_launder_page,
.readpages = fuse_readpages,
.set_page_dirty = __set_page_dirty_nobuffers,
.bmap = fuse_bmap,
.direct_IO = fuse_direct_IO,
.write_begin = fuse_write_begin,
.write_end = fuse_write_end,
};
void fuse_init_file_inode(struct inode *inode)
{
inode->i_fop = &fuse_file_operations;
inode->i_data.a_ops = &fuse_file_aops;
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_1806_0 |
crossvul-cpp_data_bad_5768_0 | /*
* Kernel-based Virtual Machine driver for Linux
*
* This module enables machines with Intel VT-x extensions to run virtual
* machines without emulation or binary translation.
*
* Copyright (C) 2006 Qumranet, Inc.
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Avi Kivity <avi@qumranet.com>
* Yaniv Kamay <yaniv@qumranet.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#include "iodev.h"
#include <linux/kvm_host.h>
#include <linux/kvm.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/percpu.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <linux/vmalloc.h>
#include <linux/reboot.h>
#include <linux/debugfs.h>
#include <linux/highmem.h>
#include <linux/file.h>
#include <linux/syscore_ops.h>
#include <linux/cpu.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/smp.h>
#include <linux/anon_inodes.h>
#include <linux/profile.h>
#include <linux/kvm_para.h>
#include <linux/pagemap.h>
#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/compat.h>
#include <linux/srcu.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <linux/bsearch.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include "coalesced_mmio.h"
#include "async_pf.h"
#define CREATE_TRACE_POINTS
#include <trace/events/kvm.h>
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
/*
* Ordering of locks:
*
* kvm->lock --> kvm->slots_lock --> kvm->irq_lock
*/
DEFINE_RAW_SPINLOCK(kvm_lock);
LIST_HEAD(vm_list);
static cpumask_var_t cpus_hardware_enabled;
static int kvm_usage_count = 0;
static atomic_t hardware_enable_failed;
struct kmem_cache *kvm_vcpu_cache;
EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
static __read_mostly struct preempt_ops kvm_preempt_ops;
struct dentry *kvm_debugfs_dir;
static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
#ifdef CONFIG_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
#endif
static int hardware_enable_all(void);
static void hardware_disable_all(void);
static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
bool kvm_rebooting;
EXPORT_SYMBOL_GPL(kvm_rebooting);
static bool largepages_enabled = true;
bool kvm_is_mmio_pfn(pfn_t pfn)
{
if (pfn_valid(pfn)) {
int reserved;
struct page *tail = pfn_to_page(pfn);
struct page *head = compound_trans_head(tail);
reserved = PageReserved(head);
if (head != tail) {
/*
* "head" is not a dangling pointer
* (compound_trans_head takes care of that)
* but the hugepage may have been splitted
* from under us (and we may not hold a
* reference count on the head page so it can
* be reused before we run PageReferenced), so
* we've to check PageTail before returning
* what we just read.
*/
smp_rmb();
if (PageTail(tail))
return reserved;
}
return PageReserved(tail);
}
return true;
}
/*
* Switches to specified vcpu, until a matching vcpu_put()
*/
void vcpu_load(struct kvm_vcpu *vcpu)
{
int cpu;
mutex_lock(&vcpu->mutex);
if (unlikely(vcpu->pid != current->pids[PIDTYPE_PID].pid)) {
/* The thread running this VCPU changed. */
struct pid *oldpid = vcpu->pid;
struct pid *newpid = get_task_pid(current, PIDTYPE_PID);
rcu_assign_pointer(vcpu->pid, newpid);
synchronize_rcu();
put_pid(oldpid);
}
cpu = get_cpu();
preempt_notifier_register(&vcpu->preempt_notifier);
kvm_arch_vcpu_load(vcpu, cpu);
put_cpu();
}
void vcpu_put(struct kvm_vcpu *vcpu)
{
preempt_disable();
kvm_arch_vcpu_put(vcpu);
preempt_notifier_unregister(&vcpu->preempt_notifier);
preempt_enable();
mutex_unlock(&vcpu->mutex);
}
static void ack_flush(void *_completed)
{
}
static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
{
int i, cpu, me;
cpumask_var_t cpus;
bool called = true;
struct kvm_vcpu *vcpu;
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
me = get_cpu();
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_make_request(req, vcpu);
cpu = vcpu->cpu;
/* Set ->requests bit before we read ->mode */
smp_mb();
if (cpus != NULL && cpu != -1 && cpu != me &&
kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
cpumask_set_cpu(cpu, cpus);
}
if (unlikely(cpus == NULL))
smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
else if (!cpumask_empty(cpus))
smp_call_function_many(cpus, ack_flush, NULL, 1);
else
called = false;
put_cpu();
free_cpumask_var(cpus);
return called;
}
void kvm_flush_remote_tlbs(struct kvm *kvm)
{
long dirty_count = kvm->tlbs_dirty;
smp_mb();
if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
++kvm->stat.remote_tlb_flush;
cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
}
void kvm_reload_remote_mmus(struct kvm *kvm)
{
make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
}
int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
{
struct page *page;
int r;
mutex_init(&vcpu->mutex);
vcpu->cpu = -1;
vcpu->kvm = kvm;
vcpu->vcpu_id = id;
vcpu->pid = NULL;
init_waitqueue_head(&vcpu->wq);
kvm_async_pf_vcpu_init(vcpu);
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
r = -ENOMEM;
goto fail;
}
vcpu->run = page_address(page);
kvm_vcpu_set_in_spin_loop(vcpu, false);
kvm_vcpu_set_dy_eligible(vcpu, false);
r = kvm_arch_vcpu_init(vcpu);
if (r < 0)
goto fail_free_run;
return 0;
fail_free_run:
free_page((unsigned long)vcpu->run);
fail:
return r;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_init);
void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
{
put_pid(vcpu->pid);
kvm_arch_vcpu_uninit(vcpu);
free_page((unsigned long)vcpu->run);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
{
return container_of(mn, struct kvm, mmu_notifier);
}
static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush, idx;
/*
* When ->invalidate_page runs, the linux pte has been zapped
* already but the page is still allocated until
* ->invalidate_page returns. So if we increase the sequence
* here the kvm page fault will notice if the spte can't be
* established because the page is going to be freed. If
* instead the kvm page fault establishes the spte before
* ->invalidate_page runs, kvm_unmap_hva will release it
* before returning.
*
* The sequence increase only need to be seen at spin_unlock
* time, and not at spin_lock time.
*
* Increasing the sequence after the spin_unlock would be
* unsafe because the kvm page fault could then establish the
* pte after kvm_unmap_hva returned, without noticing the page
* is going to be freed.
*/
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address,
pte_t pte)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int idx;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
kvm_set_spte_hva(kvm, address, pte);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush = 0, idx;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
/*
* The count increase must become visible at unlock time as no
* spte can be established without taking the mmu_lock and
* count is also read inside the mmu_lock critical section.
*/
kvm->mmu_notifier_count++;
need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
need_tlb_flush |= kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
spin_lock(&kvm->mmu_lock);
/*
* This sequence increase will notify the kvm page fault that
* the page that is going to be mapped in the spte could have
* been freed.
*/
kvm->mmu_notifier_seq++;
smp_wmb();
/*
* The above sequence increase must be visible before the
* below count decrease, which is ensured by the smp_wmb above
* in conjunction with the smp_rmb in mmu_notifier_retry().
*/
kvm->mmu_notifier_count--;
spin_unlock(&kvm->mmu_lock);
BUG_ON(kvm->mmu_notifier_count < 0);
}
static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int young, idx;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
young = kvm_age_hva(kvm, address);
if (young)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
return young;
}
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int young, idx;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
young = kvm_test_age_hva(kvm, address);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
return young;
}
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
struct mm_struct *mm)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int idx;
idx = srcu_read_lock(&kvm->srcu);
kvm_arch_flush_shadow_all(kvm);
srcu_read_unlock(&kvm->srcu, idx);
}
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
.invalidate_page = kvm_mmu_notifier_invalidate_page,
.invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
.test_young = kvm_mmu_notifier_test_young,
.change_pte = kvm_mmu_notifier_change_pte,
.release = kvm_mmu_notifier_release,
};
static int kvm_init_mmu_notifier(struct kvm *kvm)
{
kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
}
#else /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
static int kvm_init_mmu_notifier(struct kvm *kvm)
{
return 0;
}
#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
static void kvm_init_memslots_id(struct kvm *kvm)
{
int i;
struct kvm_memslots *slots = kvm->memslots;
for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
slots->id_to_index[i] = slots->memslots[i].id = i;
}
static struct kvm *kvm_create_vm(unsigned long type)
{
int r, i;
struct kvm *kvm = kvm_arch_alloc_vm();
if (!kvm)
return ERR_PTR(-ENOMEM);
r = kvm_arch_init_vm(kvm, type);
if (r)
goto out_err_nodisable;
r = hardware_enable_all();
if (r)
goto out_err_nodisable;
#ifdef CONFIG_HAVE_KVM_IRQCHIP
INIT_HLIST_HEAD(&kvm->mask_notifier_list);
INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
#endif
r = -ENOMEM;
kvm->memslots = kzalloc(sizeof(struct kvm_memslots), GFP_KERNEL);
if (!kvm->memslots)
goto out_err_nosrcu;
kvm_init_memslots_id(kvm);
if (init_srcu_struct(&kvm->srcu))
goto out_err_nosrcu;
for (i = 0; i < KVM_NR_BUSES; i++) {
kvm->buses[i] = kzalloc(sizeof(struct kvm_io_bus),
GFP_KERNEL);
if (!kvm->buses[i])
goto out_err;
}
spin_lock_init(&kvm->mmu_lock);
kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
kvm_eventfd_init(kvm);
mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock);
mutex_init(&kvm->slots_lock);
atomic_set(&kvm->users_count, 1);
r = kvm_init_mmu_notifier(kvm);
if (r)
goto out_err;
raw_spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
raw_spin_unlock(&kvm_lock);
return kvm;
out_err:
cleanup_srcu_struct(&kvm->srcu);
out_err_nosrcu:
hardware_disable_all();
out_err_nodisable:
for (i = 0; i < KVM_NR_BUSES; i++)
kfree(kvm->buses[i]);
kfree(kvm->memslots);
kvm_arch_free_vm(kvm);
return ERR_PTR(r);
}
/*
* Avoid using vmalloc for a small buffer.
* Should not be used when the size is statically known.
*/
void *kvm_kvzalloc(unsigned long size)
{
if (size > PAGE_SIZE)
return vzalloc(size);
else
return kzalloc(size, GFP_KERNEL);
}
void kvm_kvfree(const void *addr)
{
if (is_vmalloc_addr(addr))
vfree(addr);
else
kfree(addr);
}
static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
{
if (!memslot->dirty_bitmap)
return;
kvm_kvfree(memslot->dirty_bitmap);
memslot->dirty_bitmap = NULL;
}
/*
* Free any memory in @free but not in @dont.
*/
static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
kvm_destroy_dirty_bitmap(free);
kvm_arch_free_memslot(free, dont);
free->npages = 0;
}
void kvm_free_physmem(struct kvm *kvm)
{
struct kvm_memslots *slots = kvm->memslots;
struct kvm_memory_slot *memslot;
kvm_for_each_memslot(memslot, slots)
kvm_free_physmem_slot(memslot, NULL);
kfree(kvm->memslots);
}
static void kvm_destroy_vm(struct kvm *kvm)
{
int i;
struct mm_struct *mm = kvm->mm;
kvm_arch_sync_events(kvm);
raw_spin_lock(&kvm_lock);
list_del(&kvm->vm_list);
raw_spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
for (i = 0; i < KVM_NR_BUSES; i++)
kvm_io_bus_destroy(kvm->buses[i]);
kvm_coalesced_mmio_free(kvm);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
#else
kvm_arch_flush_shadow_all(kvm);
#endif
kvm_arch_destroy_vm(kvm);
kvm_free_physmem(kvm);
cleanup_srcu_struct(&kvm->srcu);
kvm_arch_free_vm(kvm);
hardware_disable_all();
mmdrop(mm);
}
void kvm_get_kvm(struct kvm *kvm)
{
atomic_inc(&kvm->users_count);
}
EXPORT_SYMBOL_GPL(kvm_get_kvm);
void kvm_put_kvm(struct kvm *kvm)
{
if (atomic_dec_and_test(&kvm->users_count))
kvm_destroy_vm(kvm);
}
EXPORT_SYMBOL_GPL(kvm_put_kvm);
static int kvm_vm_release(struct inode *inode, struct file *filp)
{
struct kvm *kvm = filp->private_data;
kvm_irqfd_release(kvm);
kvm_put_kvm(kvm);
return 0;
}
/*
* Allocation size is twice as large as the actual dirty bitmap size.
* See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
*/
static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
{
#ifndef CONFIG_S390
unsigned long dirty_bytes = 2 * kvm_dirty_bitmap_bytes(memslot);
memslot->dirty_bitmap = kvm_kvzalloc(dirty_bytes);
if (!memslot->dirty_bitmap)
return -ENOMEM;
#endif /* !CONFIG_S390 */
return 0;
}
static int cmp_memslot(const void *slot1, const void *slot2)
{
struct kvm_memory_slot *s1, *s2;
s1 = (struct kvm_memory_slot *)slot1;
s2 = (struct kvm_memory_slot *)slot2;
if (s1->npages < s2->npages)
return 1;
if (s1->npages > s2->npages)
return -1;
return 0;
}
/*
* Sort the memslots base on its size, so the larger slots
* will get better fit.
*/
static void sort_memslots(struct kvm_memslots *slots)
{
int i;
sort(slots->memslots, KVM_MEM_SLOTS_NUM,
sizeof(struct kvm_memory_slot), cmp_memslot, NULL);
for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
slots->id_to_index[slots->memslots[i].id] = i;
}
void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new)
{
if (new) {
int id = new->id;
struct kvm_memory_slot *old = id_to_memslot(slots, id);
unsigned long npages = old->npages;
*old = *new;
if (new->npages != npages)
sort_memslots(slots);
}
slots->generation++;
}
static int check_memory_region_flags(struct kvm_userspace_memory_region *mem)
{
u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
#ifdef KVM_CAP_READONLY_MEM
valid_flags |= KVM_MEM_READONLY;
#endif
if (mem->flags & ~valid_flags)
return -EINVAL;
return 0;
}
/*
* Allocate some memory and give it an address in the guest physical address
* space.
*
* Discontiguous memory is allowed, mostly for framebuffers.
*
* Must be called holding mmap_sem for write.
*/
int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
int r;
gfn_t base_gfn;
unsigned long npages;
unsigned long i;
struct kvm_memory_slot *memslot;
struct kvm_memory_slot old, new;
struct kvm_memslots *slots, *old_memslots;
r = check_memory_region_flags(mem);
if (r)
goto out;
r = -EINVAL;
/* General sanity checks */
if (mem->memory_size & (PAGE_SIZE - 1))
goto out;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out;
/* We can read the guest memory with __xxx_user() later on. */
if (user_alloc &&
((mem->userspace_addr & (PAGE_SIZE - 1)) ||
!access_ok(VERIFY_WRITE,
(void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size)))
goto out;
if (mem->slot >= KVM_MEM_SLOTS_NUM)
goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
memslot = id_to_memslot(kvm->memslots, mem->slot);
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
r = -EINVAL;
if (npages > KVM_MEM_MAX_NR_PAGES)
goto out;
if (!npages)
mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
new = old = *memslot;
new.id = mem->slot;
new.base_gfn = base_gfn;
new.npages = npages;
new.flags = mem->flags;
/* Disallow changing a memory slot's size. */
r = -EINVAL;
if (npages && old.npages && npages != old.npages)
goto out_free;
/* Check for overlaps */
r = -EEXIST;
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
if (s == memslot || !s->npages)
continue;
if (!((base_gfn + npages <= s->base_gfn) ||
(base_gfn >= s->base_gfn + s->npages)))
goto out_free;
}
/* Free page dirty bitmap if unneeded */
if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
new.dirty_bitmap = NULL;
r = -ENOMEM;
/* Allocate if a slot is being created */
if (npages && !old.npages) {
new.user_alloc = user_alloc;
new.userspace_addr = mem->userspace_addr;
if (kvm_arch_create_memslot(&new, npages))
goto out_free;
}
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
if (kvm_create_dirty_bitmap(&new) < 0)
goto out_free;
/* destroy any largepage mappings for dirty tracking */
}
if (!npages) {
struct kvm_memory_slot *slot;
r = -ENOMEM;
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
if (!slots)
goto out_free;
slot = id_to_memslot(slots, mem->slot);
slot->flags |= KVM_MEMSLOT_INVALID;
update_memslots(slots, NULL);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
/* From this point no new shadow pages pointing to a deleted
* memslot will be created.
*
* validation of sp->gfn happens in:
* - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
* - kvm_is_visible_gfn (mmu_check_roots)
*/
kvm_arch_flush_shadow_memslot(kvm, slot);
kfree(old_memslots);
}
r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
if (r)
goto out_free;
/* map/unmap the pages in iommu page table */
if (npages) {
r = kvm_iommu_map_pages(kvm, &new);
if (r)
goto out_free;
} else
kvm_iommu_unmap_pages(kvm, &old);
r = -ENOMEM;
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
if (!slots)
goto out_free;
/* actual memory is freed via old in kvm_free_physmem_slot below */
if (!npages) {
new.dirty_bitmap = NULL;
memset(&new.arch, 0, sizeof(new.arch));
}
update_memslots(slots, &new);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
/*
* If the new memory slot is created, we need to clear all
* mmio sptes.
*/
if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT)
kvm_arch_flush_shadow_all(kvm);
kvm_free_physmem_slot(&old, &new);
kfree(old_memslots);
return 0;
out_free:
kvm_free_physmem_slot(&new, &old);
out:
return r;
}
EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
int kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
int r;
mutex_lock(&kvm->slots_lock);
r = __kvm_set_memory_region(kvm, mem, user_alloc);
mutex_unlock(&kvm->slots_lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_set_memory_region);
int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
struct
kvm_userspace_memory_region *mem,
int user_alloc)
{
if (mem->slot >= KVM_MEMORY_SLOTS)
return -EINVAL;
return kvm_set_memory_region(kvm, mem, user_alloc);
}
int kvm_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log, int *is_dirty)
{
struct kvm_memory_slot *memslot;
int r, i;
unsigned long n;
unsigned long any = 0;
r = -EINVAL;
if (log->slot >= KVM_MEMORY_SLOTS)
goto out;
memslot = id_to_memslot(kvm->memslots, log->slot);
r = -ENOENT;
if (!memslot->dirty_bitmap)
goto out;
n = kvm_dirty_bitmap_bytes(memslot);
for (i = 0; !any && i < n/sizeof(long); ++i)
any = memslot->dirty_bitmap[i];
r = -EFAULT;
if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
goto out;
if (any)
*is_dirty = 1;
r = 0;
out:
return r;
}
bool kvm_largepages_enabled(void)
{
return largepages_enabled;
}
void kvm_disable_largepages(void)
{
largepages_enabled = false;
}
EXPORT_SYMBOL_GPL(kvm_disable_largepages);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
{
return __gfn_to_memslot(kvm_memslots(kvm), gfn);
}
EXPORT_SYMBOL_GPL(gfn_to_memslot);
int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
if (!memslot || memslot->id >= KVM_MEMORY_SLOTS ||
memslot->flags & KVM_MEMSLOT_INVALID)
return 0;
return 1;
}
EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
{
struct vm_area_struct *vma;
unsigned long addr, size;
size = PAGE_SIZE;
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return PAGE_SIZE;
down_read(¤t->mm->mmap_sem);
vma = find_vma(current->mm, addr);
if (!vma)
goto out;
size = vma_kernel_pagesize(vma);
out:
up_read(¤t->mm->mmap_sem);
return size;
}
static bool memslot_is_readonly(struct kvm_memory_slot *slot)
{
return slot->flags & KVM_MEM_READONLY;
}
static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
gfn_t *nr_pages, bool write)
{
if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
return KVM_HVA_ERR_BAD;
if (memslot_is_readonly(slot) && write)
return KVM_HVA_ERR_RO_BAD;
if (nr_pages)
*nr_pages = slot->npages - (gfn - slot->base_gfn);
return __gfn_to_hva_memslot(slot, gfn);
}
static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
gfn_t *nr_pages)
{
return __gfn_to_hva_many(slot, gfn, nr_pages, true);
}
unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
gfn_t gfn)
{
return gfn_to_hva_many(slot, gfn, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
{
return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_hva);
/*
* The hva returned by this function is only allowed to be read.
* It should pair with kvm_read_hva() or kvm_read_hva_atomic().
*/
static unsigned long gfn_to_hva_read(struct kvm *kvm, gfn_t gfn)
{
return __gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL, false);
}
static int kvm_read_hva(void *data, void __user *hva, int len)
{
return __copy_from_user(data, hva, len);
}
static int kvm_read_hva_atomic(void *data, void __user *hva, int len)
{
return __copy_from_user_inatomic(data, hva, len);
}
int get_user_page_nowait(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int write, struct page **page)
{
int flags = FOLL_TOUCH | FOLL_NOWAIT | FOLL_HWPOISON | FOLL_GET;
if (write)
flags |= FOLL_WRITE;
return __get_user_pages(tsk, mm, start, 1, flags, page, NULL, NULL);
}
static inline int check_user_page_hwpoison(unsigned long addr)
{
int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
rc = __get_user_pages(current, current->mm, addr, 1,
flags, NULL, NULL, NULL);
return rc == -EHWPOISON;
}
/*
* The atomic path to get the writable pfn which will be stored in @pfn,
* true indicates success, otherwise false is returned.
*/
static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
bool write_fault, bool *writable, pfn_t *pfn)
{
struct page *page[1];
int npages;
if (!(async || atomic))
return false;
/*
* Fast pin a writable pfn only if it is a write fault request
* or the caller allows to map a writable pfn for a read fault
* request.
*/
if (!(write_fault || writable))
return false;
npages = __get_user_pages_fast(addr, 1, 1, page);
if (npages == 1) {
*pfn = page_to_pfn(page[0]);
if (writable)
*writable = true;
return true;
}
return false;
}
/*
* The slow path to get the pfn of the specified host virtual address,
* 1 indicates success, -errno is returned if error is detected.
*/
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
bool *writable, pfn_t *pfn)
{
struct page *page[1];
int npages = 0;
might_sleep();
if (writable)
*writable = write_fault;
if (async) {
down_read(¤t->mm->mmap_sem);
npages = get_user_page_nowait(current, current->mm,
addr, write_fault, page);
up_read(¤t->mm->mmap_sem);
} else
npages = get_user_pages_fast(addr, 1, write_fault,
page);
if (npages != 1)
return npages;
/* map read fault as writable if possible */
if (unlikely(!write_fault) && writable) {
struct page *wpage[1];
npages = __get_user_pages_fast(addr, 1, 1, wpage);
if (npages == 1) {
*writable = true;
put_page(page[0]);
page[0] = wpage[0];
}
npages = 1;
}
*pfn = page_to_pfn(page[0]);
return npages;
}
static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
{
if (unlikely(!(vma->vm_flags & VM_READ)))
return false;
if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
return false;
return true;
}
/*
* Pin guest page in memory and return its pfn.
* @addr: host virtual address which maps memory to the guest
* @atomic: whether this function can sleep
* @async: whether this function need to wait IO complete if the
* host page is not in the memory
* @write_fault: whether we should get a writable host page
* @writable: whether it allows to map a writable host page for !@write_fault
*
* The function will map a writable host page for these two cases:
* 1): @write_fault = true
* 2): @write_fault = false && @writable, @writable will tell the caller
* whether the mapping is writable.
*/
static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
bool write_fault, bool *writable)
{
struct vm_area_struct *vma;
pfn_t pfn = 0;
int npages;
/* we can do it either atomically or asynchronously, not both */
BUG_ON(atomic && async);
if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn))
return pfn;
if (atomic)
return KVM_PFN_ERR_FAULT;
npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
if (npages == 1)
return pfn;
down_read(¤t->mm->mmap_sem);
if (npages == -EHWPOISON ||
(!async && check_user_page_hwpoison(addr))) {
pfn = KVM_PFN_ERR_HWPOISON;
goto exit;
}
vma = find_vma_intersection(current->mm, addr, addr + 1);
if (vma == NULL)
pfn = KVM_PFN_ERR_FAULT;
else if ((vma->vm_flags & VM_PFNMAP)) {
pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
vma->vm_pgoff;
BUG_ON(!kvm_is_mmio_pfn(pfn));
} else {
if (async && vma_is_valid(vma, write_fault))
*async = true;
pfn = KVM_PFN_ERR_FAULT;
}
exit:
up_read(¤t->mm->mmap_sem);
return pfn;
}
static pfn_t
__gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
bool *async, bool write_fault, bool *writable)
{
unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
if (addr == KVM_HVA_ERR_RO_BAD)
return KVM_PFN_ERR_RO_FAULT;
if (kvm_is_error_hva(addr))
return KVM_PFN_ERR_BAD;
/* Do not map writable pfn in the readonly memslot. */
if (writable && memslot_is_readonly(slot)) {
*writable = false;
writable = NULL;
}
return hva_to_pfn(addr, atomic, async, write_fault,
writable);
}
static pfn_t __gfn_to_pfn(struct kvm *kvm, gfn_t gfn, bool atomic, bool *async,
bool write_fault, bool *writable)
{
struct kvm_memory_slot *slot;
if (async)
*async = false;
slot = gfn_to_memslot(kvm, gfn);
return __gfn_to_pfn_memslot(slot, gfn, atomic, async, write_fault,
writable);
}
pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn)
{
return __gfn_to_pfn(kvm, gfn, true, NULL, true, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_atomic);
pfn_t gfn_to_pfn_async(struct kvm *kvm, gfn_t gfn, bool *async,
bool write_fault, bool *writable)
{
return __gfn_to_pfn(kvm, gfn, false, async, write_fault, writable);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_async);
pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
{
return __gfn_to_pfn(kvm, gfn, false, NULL, true, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn);
pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
bool *writable)
{
return __gfn_to_pfn(kvm, gfn, false, NULL, write_fault, writable);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
{
return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL);
}
pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn)
{
return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL);
}
EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages,
int nr_pages)
{
unsigned long addr;
gfn_t entry;
addr = gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, &entry);
if (kvm_is_error_hva(addr))
return -1;
if (entry < nr_pages)
return 0;
return __get_user_pages_fast(addr, nr_pages, 1, pages);
}
EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
static struct page *kvm_pfn_to_page(pfn_t pfn)
{
if (is_error_pfn(pfn))
return KVM_ERR_PTR_BAD_PAGE;
if (kvm_is_mmio_pfn(pfn)) {
WARN_ON(1);
return KVM_ERR_PTR_BAD_PAGE;
}
return pfn_to_page(pfn);
}
struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
{
pfn_t pfn;
pfn = gfn_to_pfn(kvm, gfn);
return kvm_pfn_to_page(pfn);
}
EXPORT_SYMBOL_GPL(gfn_to_page);
void kvm_release_page_clean(struct page *page)
{
WARN_ON(is_error_page(page));
kvm_release_pfn_clean(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_release_page_clean);
void kvm_release_pfn_clean(pfn_t pfn)
{
WARN_ON(is_error_pfn(pfn));
if (!kvm_is_mmio_pfn(pfn))
put_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
void kvm_release_page_dirty(struct page *page)
{
WARN_ON(is_error_page(page));
kvm_release_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
void kvm_release_pfn_dirty(pfn_t pfn)
{
kvm_set_pfn_dirty(pfn);
kvm_release_pfn_clean(pfn);
}
EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
void kvm_set_page_dirty(struct page *page)
{
kvm_set_pfn_dirty(page_to_pfn(page));
}
EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
void kvm_set_pfn_dirty(pfn_t pfn)
{
if (!kvm_is_mmio_pfn(pfn)) {
struct page *page = pfn_to_page(pfn);
if (!PageReserved(page))
SetPageDirty(page);
}
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
void kvm_set_pfn_accessed(pfn_t pfn)
{
if (!kvm_is_mmio_pfn(pfn))
mark_page_accessed(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
void kvm_get_pfn(pfn_t pfn)
{
if (!kvm_is_mmio_pfn(pfn))
get_page(pfn_to_page(pfn));
}
EXPORT_SYMBOL_GPL(kvm_get_pfn);
static int next_segment(unsigned long len, int offset)
{
if (len > PAGE_SIZE - offset)
return PAGE_SIZE - offset;
else
return len;
}
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len)
{
int r;
unsigned long addr;
addr = gfn_to_hva_read(kvm, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
r = kvm_read_hva(data, (void __user *)addr + offset, len);
if (r)
return -EFAULT;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
int seg;
int offset = offset_in_page(gpa);
int ret;
while ((seg = next_segment(len, offset)) != 0) {
ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
if (ret < 0)
return ret;
offset = 0;
len -= seg;
data += seg;
++gfn;
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest);
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
unsigned long len)
{
int r;
unsigned long addr;
gfn_t gfn = gpa >> PAGE_SHIFT;
int offset = offset_in_page(gpa);
addr = gfn_to_hva_read(kvm, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
pagefault_disable();
r = kvm_read_hva_atomic(data, (void __user *)addr + offset, len);
pagefault_enable();
if (r)
return -EFAULT;
return 0;
}
EXPORT_SYMBOL(kvm_read_guest_atomic);
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
int offset, int len)
{
int r;
unsigned long addr;
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return -EFAULT;
r = __copy_to_user((void __user *)addr + offset, data, len);
if (r)
return -EFAULT;
mark_page_dirty(kvm, gfn);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_page);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
int seg;
int offset = offset_in_page(gpa);
int ret;
while ((seg = next_segment(len, offset)) != 0) {
ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
if (ret < 0)
return ret;
offset = 0;
len -= seg;
data += seg;
++gfn;
}
return 0;
}
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
int offset = offset_in_page(gpa);
gfn_t gfn = gpa >> PAGE_SHIFT;
ghc->gpa = gpa;
ghc->generation = slots->generation;
ghc->memslot = gfn_to_memslot(kvm, gfn);
ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL);
if (!kvm_is_error_hva(ghc->hva))
ghc->hva += offset;
else
return -EFAULT;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
int r;
if (slots->generation != ghc->generation)
kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;
r = __copy_to_user((void __user *)ghc->hva, data, len);
if (r)
return -EFAULT;
mark_page_dirty_in_slot(kvm, ghc->memslot, ghc->gpa >> PAGE_SHIFT);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
int r;
if (slots->generation != ghc->generation)
kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa);
if (kvm_is_error_hva(ghc->hva))
return -EFAULT;
r = __copy_from_user(data, (void __user *)ghc->hva, len);
if (r)
return -EFAULT;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
{
return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
offset, len);
}
EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
int seg;
int offset = offset_in_page(gpa);
int ret;
while ((seg = next_segment(len, offset)) != 0) {
ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
if (ret < 0)
return ret;
offset = 0;
len -= seg;
++gfn;
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_clear_guest);
void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
gfn_t gfn)
{
if (memslot && memslot->dirty_bitmap) {
unsigned long rel_gfn = gfn - memslot->base_gfn;
/* TODO: introduce set_bit_le() and use it */
test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap);
}
}
void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
{
struct kvm_memory_slot *memslot;
memslot = gfn_to_memslot(kvm, gfn);
mark_page_dirty_in_slot(kvm, memslot, gfn);
}
/*
* The vCPU has executed a HLT instruction with in-kernel mode enabled.
*/
void kvm_vcpu_block(struct kvm_vcpu *vcpu)
{
DEFINE_WAIT(wait);
for (;;) {
prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
if (kvm_arch_vcpu_runnable(vcpu)) {
kvm_make_request(KVM_REQ_UNHALT, vcpu);
break;
}
if (kvm_cpu_has_pending_timer(vcpu))
break;
if (signal_pending(current))
break;
schedule();
}
finish_wait(&vcpu->wq, &wait);
}
#ifndef CONFIG_S390
/*
* Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
*/
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
{
int me;
int cpu = vcpu->cpu;
wait_queue_head_t *wqp;
wqp = kvm_arch_vcpu_wq(vcpu);
if (waitqueue_active(wqp)) {
wake_up_interruptible(wqp);
++vcpu->stat.halt_wakeup;
}
me = get_cpu();
if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
if (kvm_arch_vcpu_should_kick(vcpu))
smp_send_reschedule(cpu);
put_cpu();
}
#endif /* !CONFIG_S390 */
void kvm_resched(struct kvm_vcpu *vcpu)
{
if (!need_resched())
return;
cond_resched();
}
EXPORT_SYMBOL_GPL(kvm_resched);
bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
{
struct pid *pid;
struct task_struct *task = NULL;
rcu_read_lock();
pid = rcu_dereference(target->pid);
if (pid)
task = get_pid_task(target->pid, PIDTYPE_PID);
rcu_read_unlock();
if (!task)
return false;
if (task->flags & PF_VCPU) {
put_task_struct(task);
return false;
}
if (yield_to(task, 1)) {
put_task_struct(task);
return true;
}
put_task_struct(task);
return false;
}
EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
#ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
/*
* Helper that checks whether a VCPU is eligible for directed yield.
* Most eligible candidate to yield is decided by following heuristics:
*
* (a) VCPU which has not done pl-exit or cpu relax intercepted recently
* (preempted lock holder), indicated by @in_spin_loop.
* Set at the beiginning and cleared at the end of interception/PLE handler.
*
* (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
* chance last time (mostly it has become eligible now since we have probably
* yielded to lockholder in last iteration. This is done by toggling
* @dy_eligible each time a VCPU checked for eligibility.)
*
* Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
* to preempted lock-holder could result in wrong VCPU selection and CPU
* burning. Giving priority for a potential lock-holder increases lock
* progress.
*
* Since algorithm is based on heuristics, accessing another VCPU data without
* locking does not harm. It may result in trying to yield to same VCPU, fail
* and continue with next VCPU and so on.
*/
bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
{
bool eligible;
eligible = !vcpu->spin_loop.in_spin_loop ||
(vcpu->spin_loop.in_spin_loop &&
vcpu->spin_loop.dy_eligible);
if (vcpu->spin_loop.in_spin_loop)
kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
return eligible;
}
#endif
void kvm_vcpu_on_spin(struct kvm_vcpu *me)
{
struct kvm *kvm = me->kvm;
struct kvm_vcpu *vcpu;
int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
int yielded = 0;
int pass;
int i;
kvm_vcpu_set_in_spin_loop(me, true);
/*
* We boost the priority of a VCPU that is runnable but not
* currently running, because it got preempted by something
* else and called schedule in __vcpu_run. Hopefully that
* VCPU is holding the lock that we need and will release it.
* We approximate round-robin by starting at the last boosted VCPU.
*/
for (pass = 0; pass < 2 && !yielded; pass++) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (!pass && i <= last_boosted_vcpu) {
i = last_boosted_vcpu;
continue;
} else if (pass && i > last_boosted_vcpu)
break;
if (vcpu == me)
continue;
if (waitqueue_active(&vcpu->wq))
continue;
if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
continue;
if (kvm_vcpu_yield_to(vcpu)) {
kvm->last_boosted_vcpu = i;
yielded = 1;
break;
}
}
}
kvm_vcpu_set_in_spin_loop(me, false);
/* Ensure vcpu is not eligible during next spinloop */
kvm_vcpu_set_dy_eligible(me, false);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct kvm_vcpu *vcpu = vma->vm_file->private_data;
struct page *page;
if (vmf->pgoff == 0)
page = virt_to_page(vcpu->run);
#ifdef CONFIG_X86
else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
page = virt_to_page(vcpu->arch.pio_data);
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
#endif
else
return kvm_arch_vcpu_fault(vcpu, vmf);
get_page(page);
vmf->page = page;
return 0;
}
static const struct vm_operations_struct kvm_vcpu_vm_ops = {
.fault = kvm_vcpu_fault,
};
static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
{
vma->vm_ops = &kvm_vcpu_vm_ops;
return 0;
}
static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
struct kvm_vcpu *vcpu = filp->private_data;
kvm_put_kvm(vcpu->kvm);
return 0;
}
static struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = kvm_vcpu_compat_ioctl,
#endif
.mmap = kvm_vcpu_mmap,
.llseek = noop_llseek,
};
/*
* Allocates an inode for the vcpu.
*/
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
}
/*
* Creates some virtual cpus. Good luck creating more than one.
*/
static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
{
int r;
struct kvm_vcpu *vcpu, *v;
vcpu = kvm_arch_vcpu_create(kvm, id);
if (IS_ERR(vcpu))
return PTR_ERR(vcpu);
preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
r = kvm_arch_vcpu_setup(vcpu);
if (r)
goto vcpu_destroy;
mutex_lock(&kvm->lock);
if (!kvm_vcpu_compatible(vcpu)) {
r = -EINVAL;
goto unlock_vcpu_destroy;
}
if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
r = -EINVAL;
goto unlock_vcpu_destroy;
}
kvm_for_each_vcpu(r, v, kvm)
if (v->vcpu_id == id) {
r = -EEXIST;
goto unlock_vcpu_destroy;
}
BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]);
/* Now it's all set up, let userspace reach it */
kvm_get_kvm(kvm);
r = create_vcpu_fd(vcpu);
if (r < 0) {
kvm_put_kvm(kvm);
goto unlock_vcpu_destroy;
}
kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu;
smp_wmb();
atomic_inc(&kvm->online_vcpus);
mutex_unlock(&kvm->lock);
return r;
unlock_vcpu_destroy:
mutex_unlock(&kvm->lock);
vcpu_destroy:
kvm_arch_vcpu_destroy(vcpu);
return r;
}
static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
{
if (sigset) {
sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
vcpu->sigset_active = 1;
vcpu->sigset = *sigset;
} else
vcpu->sigset_active = 0;
return 0;
}
static long kvm_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
int r;
struct kvm_fpu *fpu = NULL;
struct kvm_sregs *kvm_sregs = NULL;
if (vcpu->kvm->mm != current->mm)
return -EIO;
#if defined(CONFIG_S390) || defined(CONFIG_PPC)
/*
* Special cases: vcpu ioctls that are asynchronous to vcpu execution,
* so vcpu_load() would break it.
*/
if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
#endif
vcpu_load(vcpu);
switch (ioctl) {
case KVM_RUN:
r = -EINVAL;
if (arg)
goto out;
r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
break;
case KVM_GET_REGS: {
struct kvm_regs *kvm_regs;
r = -ENOMEM;
kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
if (!kvm_regs)
goto out;
r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
if (r)
goto out_free1;
r = -EFAULT;
if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
goto out_free1;
r = 0;
out_free1:
kfree(kvm_regs);
break;
}
case KVM_SET_REGS: {
struct kvm_regs *kvm_regs;
r = -ENOMEM;
kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
if (IS_ERR(kvm_regs)) {
r = PTR_ERR(kvm_regs);
goto out;
}
r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
if (r)
goto out_free2;
r = 0;
out_free2:
kfree(kvm_regs);
break;
}
case KVM_GET_SREGS: {
kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
r = -ENOMEM;
if (!kvm_sregs)
goto out;
r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
goto out;
r = 0;
break;
}
case KVM_SET_SREGS: {
kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
if (IS_ERR(kvm_sregs)) {
r = PTR_ERR(kvm_sregs);
goto out;
}
r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
if (r)
goto out;
r = 0;
break;
}
case KVM_GET_MP_STATE: {
struct kvm_mp_state mp_state;
r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &mp_state, sizeof mp_state))
goto out;
r = 0;
break;
}
case KVM_SET_MP_STATE: {
struct kvm_mp_state mp_state;
r = -EFAULT;
if (copy_from_user(&mp_state, argp, sizeof mp_state))
goto out;
r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
if (r)
goto out;
r = 0;
break;
}
case KVM_TRANSLATE: {
struct kvm_translation tr;
r = -EFAULT;
if (copy_from_user(&tr, argp, sizeof tr))
goto out;
r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &tr, sizeof tr))
goto out;
r = 0;
break;
}
case KVM_SET_GUEST_DEBUG: {
struct kvm_guest_debug dbg;
r = -EFAULT;
if (copy_from_user(&dbg, argp, sizeof dbg))
goto out;
r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
if (r)
goto out;
r = 0;
break;
}
case KVM_SET_SIGNAL_MASK: {
struct kvm_signal_mask __user *sigmask_arg = argp;
struct kvm_signal_mask kvm_sigmask;
sigset_t sigset, *p;
p = NULL;
if (argp) {
r = -EFAULT;
if (copy_from_user(&kvm_sigmask, argp,
sizeof kvm_sigmask))
goto out;
r = -EINVAL;
if (kvm_sigmask.len != sizeof sigset)
goto out;
r = -EFAULT;
if (copy_from_user(&sigset, sigmask_arg->sigset,
sizeof sigset))
goto out;
p = &sigset;
}
r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
break;
}
case KVM_GET_FPU: {
fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
r = -ENOMEM;
if (!fpu)
goto out;
r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
goto out;
r = 0;
break;
}
case KVM_SET_FPU: {
fpu = memdup_user(argp, sizeof(*fpu));
if (IS_ERR(fpu)) {
r = PTR_ERR(fpu);
goto out;
}
r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
if (r)
goto out;
r = 0;
break;
}
default:
r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
}
out:
vcpu_put(vcpu);
kfree(fpu);
kfree(kvm_sregs);
return r;
}
#ifdef CONFIG_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = compat_ptr(arg);
int r;
if (vcpu->kvm->mm != current->mm)
return -EIO;
switch (ioctl) {
case KVM_SET_SIGNAL_MASK: {
struct kvm_signal_mask __user *sigmask_arg = argp;
struct kvm_signal_mask kvm_sigmask;
compat_sigset_t csigset;
sigset_t sigset;
if (argp) {
r = -EFAULT;
if (copy_from_user(&kvm_sigmask, argp,
sizeof kvm_sigmask))
goto out;
r = -EINVAL;
if (kvm_sigmask.len != sizeof csigset)
goto out;
r = -EFAULT;
if (copy_from_user(&csigset, sigmask_arg->sigset,
sizeof csigset))
goto out;
}
sigset_from_compat(&sigset, &csigset);
r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
break;
}
default:
r = kvm_vcpu_ioctl(filp, ioctl, arg);
}
out:
return r;
}
#endif
static long kvm_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg;
int r;
if (kvm->mm != current->mm)
return -EIO;
switch (ioctl) {
case KVM_CREATE_VCPU:
r = kvm_vm_ioctl_create_vcpu(kvm, arg);
if (r < 0)
goto out;
break;
case KVM_SET_USER_MEMORY_REGION: {
struct kvm_userspace_memory_region kvm_userspace_mem;
r = -EFAULT;
if (copy_from_user(&kvm_userspace_mem, argp,
sizeof kvm_userspace_mem))
goto out;
r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
if (r)
goto out;
break;
}
case KVM_GET_DIRTY_LOG: {
struct kvm_dirty_log log;
r = -EFAULT;
if (copy_from_user(&log, argp, sizeof log))
goto out;
r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
if (r)
goto out;
break;
}
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
case KVM_REGISTER_COALESCED_MMIO: {
struct kvm_coalesced_mmio_zone zone;
r = -EFAULT;
if (copy_from_user(&zone, argp, sizeof zone))
goto out;
r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
if (r)
goto out;
r = 0;
break;
}
case KVM_UNREGISTER_COALESCED_MMIO: {
struct kvm_coalesced_mmio_zone zone;
r = -EFAULT;
if (copy_from_user(&zone, argp, sizeof zone))
goto out;
r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
if (r)
goto out;
r = 0;
break;
}
#endif
case KVM_IRQFD: {
struct kvm_irqfd data;
r = -EFAULT;
if (copy_from_user(&data, argp, sizeof data))
goto out;
r = kvm_irqfd(kvm, &data);
break;
}
case KVM_IOEVENTFD: {
struct kvm_ioeventfd data;
r = -EFAULT;
if (copy_from_user(&data, argp, sizeof data))
goto out;
r = kvm_ioeventfd(kvm, &data);
break;
}
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
case KVM_SET_BOOT_CPU_ID:
r = 0;
mutex_lock(&kvm->lock);
if (atomic_read(&kvm->online_vcpus) != 0)
r = -EBUSY;
else
kvm->bsp_vcpu_id = arg;
mutex_unlock(&kvm->lock);
break;
#endif
#ifdef CONFIG_HAVE_KVM_MSI
case KVM_SIGNAL_MSI: {
struct kvm_msi msi;
r = -EFAULT;
if (copy_from_user(&msi, argp, sizeof msi))
goto out;
r = kvm_send_userspace_msi(kvm, &msi);
break;
}
#endif
#ifdef __KVM_HAVE_IRQ_LINE
case KVM_IRQ_LINE_STATUS:
case KVM_IRQ_LINE: {
struct kvm_irq_level irq_event;
r = -EFAULT;
if (copy_from_user(&irq_event, argp, sizeof irq_event))
goto out;
r = kvm_vm_ioctl_irq_line(kvm, &irq_event);
if (r)
goto out;
r = -EFAULT;
if (ioctl == KVM_IRQ_LINE_STATUS) {
if (copy_to_user(argp, &irq_event, sizeof irq_event))
goto out;
}
r = 0;
break;
}
#endif
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
if (r == -ENOTTY)
r = kvm_vm_ioctl_assigned_device(kvm, ioctl, arg);
}
out:
return r;
}
#ifdef CONFIG_COMPAT
struct compat_kvm_dirty_log {
__u32 slot;
__u32 padding1;
union {
compat_uptr_t dirty_bitmap; /* one bit per page */
__u64 padding2;
};
};
static long kvm_vm_compat_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;
int r;
if (kvm->mm != current->mm)
return -EIO;
switch (ioctl) {
case KVM_GET_DIRTY_LOG: {
struct compat_kvm_dirty_log compat_log;
struct kvm_dirty_log log;
r = -EFAULT;
if (copy_from_user(&compat_log, (void __user *)arg,
sizeof(compat_log)))
goto out;
log.slot = compat_log.slot;
log.padding1 = compat_log.padding1;
log.padding2 = compat_log.padding2;
log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
if (r)
goto out;
break;
}
default:
r = kvm_vm_ioctl(filp, ioctl, arg);
}
out:
return r;
}
#endif
static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page[1];
unsigned long addr;
int npages;
gfn_t gfn = vmf->pgoff;
struct kvm *kvm = vma->vm_file->private_data;
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return VM_FAULT_SIGBUS;
npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
NULL);
if (unlikely(npages != 1))
return VM_FAULT_SIGBUS;
vmf->page = page[0];
return 0;
}
static const struct vm_operations_struct kvm_vm_vm_ops = {
.fault = kvm_vm_fault,
};
static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
{
vma->vm_ops = &kvm_vm_vm_ops;
return 0;
}
static struct file_operations kvm_vm_fops = {
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = kvm_vm_compat_ioctl,
#endif
.mmap = kvm_vm_mmap,
.llseek = noop_llseek,
};
static int kvm_dev_ioctl_create_vm(unsigned long type)
{
int r;
struct kvm *kvm;
kvm = kvm_create_vm(type);
if (IS_ERR(kvm))
return PTR_ERR(kvm);
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
r = kvm_coalesced_mmio_init(kvm);
if (r < 0) {
kvm_put_kvm(kvm);
return r;
}
#endif
r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
if (r < 0)
kvm_put_kvm(kvm);
return r;
}
static long kvm_dev_ioctl_check_extension_generic(long arg)
{
switch (arg) {
case KVM_CAP_USER_MEMORY:
case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
#ifdef CONFIG_KVM_APIC_ARCHITECTURE
case KVM_CAP_SET_BOOT_CPU_ID:
#endif
case KVM_CAP_INTERNAL_ERROR_DATA:
#ifdef CONFIG_HAVE_KVM_MSI
case KVM_CAP_SIGNAL_MSI:
#endif
return 1;
#ifdef KVM_CAP_IRQ_ROUTING
case KVM_CAP_IRQ_ROUTING:
return KVM_MAX_IRQ_ROUTES;
#endif
default:
break;
}
return kvm_dev_ioctl_check_extension(arg);
}
static long kvm_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
long r = -EINVAL;
switch (ioctl) {
case KVM_GET_API_VERSION:
r = -EINVAL;
if (arg)
goto out;
r = KVM_API_VERSION;
break;
case KVM_CREATE_VM:
r = kvm_dev_ioctl_create_vm(arg);
break;
case KVM_CHECK_EXTENSION:
r = kvm_dev_ioctl_check_extension_generic(arg);
break;
case KVM_GET_VCPU_MMAP_SIZE:
r = -EINVAL;
if (arg)
goto out;
r = PAGE_SIZE; /* struct kvm_run */
#ifdef CONFIG_X86
r += PAGE_SIZE; /* pio data page */
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
r += PAGE_SIZE; /* coalesced mmio ring page */
#endif
break;
case KVM_TRACE_ENABLE:
case KVM_TRACE_PAUSE:
case KVM_TRACE_DISABLE:
r = -EOPNOTSUPP;
break;
default:
return kvm_arch_dev_ioctl(filp, ioctl, arg);
}
out:
return r;
}
static struct file_operations kvm_chardev_ops = {
.unlocked_ioctl = kvm_dev_ioctl,
.compat_ioctl = kvm_dev_ioctl,
.llseek = noop_llseek,
};
static struct miscdevice kvm_dev = {
KVM_MINOR,
"kvm",
&kvm_chardev_ops,
};
static void hardware_enable_nolock(void *junk)
{
int cpu = raw_smp_processor_id();
int r;
if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
return;
cpumask_set_cpu(cpu, cpus_hardware_enabled);
r = kvm_arch_hardware_enable(NULL);
if (r) {
cpumask_clear_cpu(cpu, cpus_hardware_enabled);
atomic_inc(&hardware_enable_failed);
printk(KERN_INFO "kvm: enabling virtualization on "
"CPU%d failed\n", cpu);
}
}
static void hardware_enable(void *junk)
{
raw_spin_lock(&kvm_lock);
hardware_enable_nolock(junk);
raw_spin_unlock(&kvm_lock);
}
static void hardware_disable_nolock(void *junk)
{
int cpu = raw_smp_processor_id();
if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
return;
cpumask_clear_cpu(cpu, cpus_hardware_enabled);
kvm_arch_hardware_disable(NULL);
}
static void hardware_disable(void *junk)
{
raw_spin_lock(&kvm_lock);
hardware_disable_nolock(junk);
raw_spin_unlock(&kvm_lock);
}
static void hardware_disable_all_nolock(void)
{
BUG_ON(!kvm_usage_count);
kvm_usage_count--;
if (!kvm_usage_count)
on_each_cpu(hardware_disable_nolock, NULL, 1);
}
static void hardware_disable_all(void)
{
raw_spin_lock(&kvm_lock);
hardware_disable_all_nolock();
raw_spin_unlock(&kvm_lock);
}
static int hardware_enable_all(void)
{
int r = 0;
raw_spin_lock(&kvm_lock);
kvm_usage_count++;
if (kvm_usage_count == 1) {
atomic_set(&hardware_enable_failed, 0);
on_each_cpu(hardware_enable_nolock, NULL, 1);
if (atomic_read(&hardware_enable_failed)) {
hardware_disable_all_nolock();
r = -EBUSY;
}
}
raw_spin_unlock(&kvm_lock);
return r;
}
static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
void *v)
{
int cpu = (long)v;
if (!kvm_usage_count)
return NOTIFY_OK;
val &= ~CPU_TASKS_FROZEN;
switch (val) {
case CPU_DYING:
printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
cpu);
hardware_disable(NULL);
break;
case CPU_STARTING:
printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
cpu);
hardware_enable(NULL);
break;
}
return NOTIFY_OK;
}
asmlinkage void kvm_spurious_fault(void)
{
/* Fault while not rebooting. We want the trace. */
BUG();
}
EXPORT_SYMBOL_GPL(kvm_spurious_fault);
static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
void *v)
{
/*
* Some (well, at least mine) BIOSes hang on reboot if
* in vmx root mode.
*
* And Intel TXT required VMX off for all cpu when system shutdown.
*/
printk(KERN_INFO "kvm: exiting hardware virtualization\n");
kvm_rebooting = true;
on_each_cpu(hardware_disable_nolock, NULL, 1);
return NOTIFY_OK;
}
static struct notifier_block kvm_reboot_notifier = {
.notifier_call = kvm_reboot,
.priority = 0,
};
static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
{
int i;
for (i = 0; i < bus->dev_count; i++) {
struct kvm_io_device *pos = bus->range[i].dev;
kvm_iodevice_destructor(pos);
}
kfree(bus);
}
int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
{
const struct kvm_io_range *r1 = p1;
const struct kvm_io_range *r2 = p2;
if (r1->addr < r2->addr)
return -1;
if (r1->addr + r1->len > r2->addr + r2->len)
return 1;
return 0;
}
int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
gpa_t addr, int len)
{
bus->range[bus->dev_count++] = (struct kvm_io_range) {
.addr = addr,
.len = len,
.dev = dev,
};
sort(bus->range, bus->dev_count, sizeof(struct kvm_io_range),
kvm_io_bus_sort_cmp, NULL);
return 0;
}
int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
gpa_t addr, int len)
{
struct kvm_io_range *range, key;
int off;
key = (struct kvm_io_range) {
.addr = addr,
.len = len,
};
range = bsearch(&key, bus->range, bus->dev_count,
sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
if (range == NULL)
return -ENOENT;
off = range - bus->range;
while (off > 0 && kvm_io_bus_sort_cmp(&key, &bus->range[off-1]) == 0)
off--;
return off;
}
/* kvm_io_bus_write - called under kvm->slots_lock */
int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val)
{
int idx;
struct kvm_io_bus *bus;
struct kvm_io_range range;
range = (struct kvm_io_range) {
.addr = addr,
.len = len,
};
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
idx = kvm_io_bus_get_first_dev(bus, addr, len);
if (idx < 0)
return -EOPNOTSUPP;
while (idx < bus->dev_count &&
kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
if (!kvm_iodevice_write(bus->range[idx].dev, addr, len, val))
return 0;
idx++;
}
return -EOPNOTSUPP;
}
/* kvm_io_bus_read - called under kvm->slots_lock */
int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val)
{
int idx;
struct kvm_io_bus *bus;
struct kvm_io_range range;
range = (struct kvm_io_range) {
.addr = addr,
.len = len,
};
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
idx = kvm_io_bus_get_first_dev(bus, addr, len);
if (idx < 0)
return -EOPNOTSUPP;
while (idx < bus->dev_count &&
kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
if (!kvm_iodevice_read(bus->range[idx].dev, addr, len, val))
return 0;
idx++;
}
return -EOPNOTSUPP;
}
/* Caller must hold slots_lock. */
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev)
{
struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx];
if (bus->dev_count > NR_IOBUS_DEVS - 1)
return -ENOSPC;
new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
sizeof(struct kvm_io_range)), GFP_KERNEL);
if (!new_bus)
return -ENOMEM;
memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
sizeof(struct kvm_io_range)));
kvm_io_bus_insert_dev(new_bus, dev, addr, len);
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
kfree(bus);
return 0;
}
/* Caller must hold slots_lock. */
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev)
{
int i, r;
struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx];
r = -ENOENT;
for (i = 0; i < bus->dev_count; i++)
if (bus->range[i].dev == dev) {
r = 0;
break;
}
if (r)
return r;
new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
sizeof(struct kvm_io_range)), GFP_KERNEL);
if (!new_bus)
return -ENOMEM;
memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
new_bus->dev_count--;
memcpy(new_bus->range + i, bus->range + i + 1,
(new_bus->dev_count - i) * sizeof(struct kvm_io_range));
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
kfree(bus);
return r;
}
static struct notifier_block kvm_cpu_notifier = {
.notifier_call = kvm_cpu_hotplug,
};
static int vm_stat_get(void *_offset, u64 *val)
{
unsigned offset = (long)_offset;
struct kvm *kvm;
*val = 0;
raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
*val += *(u32 *)((void *)kvm + offset);
raw_spin_unlock(&kvm_lock);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
static int vcpu_stat_get(void *_offset, u64 *val)
{
unsigned offset = (long)_offset;
struct kvm *kvm;
struct kvm_vcpu *vcpu;
int i;
*val = 0;
raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm)
*val += *(u32 *)((void *)vcpu + offset);
raw_spin_unlock(&kvm_lock);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
static const struct file_operations *stat_fops[] = {
[KVM_STAT_VCPU] = &vcpu_stat_fops,
[KVM_STAT_VM] = &vm_stat_fops,
};
static int kvm_init_debug(void)
{
int r = -EFAULT;
struct kvm_stats_debugfs_item *p;
kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
if (kvm_debugfs_dir == NULL)
goto out;
for (p = debugfs_entries; p->name; ++p) {
p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
(void *)(long)p->offset,
stat_fops[p->kind]);
if (p->dentry == NULL)
goto out_dir;
}
return 0;
out_dir:
debugfs_remove_recursive(kvm_debugfs_dir);
out:
return r;
}
static void kvm_exit_debug(void)
{
struct kvm_stats_debugfs_item *p;
for (p = debugfs_entries; p->name; ++p)
debugfs_remove(p->dentry);
debugfs_remove(kvm_debugfs_dir);
}
static int kvm_suspend(void)
{
if (kvm_usage_count)
hardware_disable_nolock(NULL);
return 0;
}
static void kvm_resume(void)
{
if (kvm_usage_count) {
WARN_ON(raw_spin_is_locked(&kvm_lock));
hardware_enable_nolock(NULL);
}
}
static struct syscore_ops kvm_syscore_ops = {
.suspend = kvm_suspend,
.resume = kvm_resume,
};
static inline
struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
{
return container_of(pn, struct kvm_vcpu, preempt_notifier);
}
static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
kvm_arch_vcpu_load(vcpu, cpu);
}
static void kvm_sched_out(struct preempt_notifier *pn,
struct task_struct *next)
{
struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
kvm_arch_vcpu_put(vcpu);
}
int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
struct module *module)
{
int r;
int cpu;
r = kvm_arch_init(opaque);
if (r)
goto out_fail;
if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
r = -ENOMEM;
goto out_free_0;
}
r = kvm_arch_hardware_setup();
if (r < 0)
goto out_free_0a;
for_each_online_cpu(cpu) {
smp_call_function_single(cpu,
kvm_arch_check_processor_compat,
&r, 1);
if (r < 0)
goto out_free_1;
}
r = register_cpu_notifier(&kvm_cpu_notifier);
if (r)
goto out_free_2;
register_reboot_notifier(&kvm_reboot_notifier);
/* A kmem cache lets us meet the alignment requirements of fx_save. */
if (!vcpu_align)
vcpu_align = __alignof__(struct kvm_vcpu);
kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
0, NULL);
if (!kvm_vcpu_cache) {
r = -ENOMEM;
goto out_free_3;
}
r = kvm_async_pf_init();
if (r)
goto out_free;
kvm_chardev_ops.owner = module;
kvm_vm_fops.owner = module;
kvm_vcpu_fops.owner = module;
r = misc_register(&kvm_dev);
if (r) {
printk(KERN_ERR "kvm: misc device register failed\n");
goto out_unreg;
}
register_syscore_ops(&kvm_syscore_ops);
kvm_preempt_ops.sched_in = kvm_sched_in;
kvm_preempt_ops.sched_out = kvm_sched_out;
r = kvm_init_debug();
if (r) {
printk(KERN_ERR "kvm: create debugfs files failed\n");
goto out_undebugfs;
}
return 0;
out_undebugfs:
unregister_syscore_ops(&kvm_syscore_ops);
out_unreg:
kvm_async_pf_deinit();
out_free:
kmem_cache_destroy(kvm_vcpu_cache);
out_free_3:
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
out_free_2:
out_free_1:
kvm_arch_hardware_unsetup();
out_free_0a:
free_cpumask_var(cpus_hardware_enabled);
out_free_0:
kvm_arch_exit();
out_fail:
return r;
}
EXPORT_SYMBOL_GPL(kvm_init);
void kvm_exit(void)
{
kvm_exit_debug();
misc_deregister(&kvm_dev);
kmem_cache_destroy(kvm_vcpu_cache);
kvm_async_pf_deinit();
unregister_syscore_ops(&kvm_syscore_ops);
unregister_reboot_notifier(&kvm_reboot_notifier);
unregister_cpu_notifier(&kvm_cpu_notifier);
on_each_cpu(hardware_disable_nolock, NULL, 1);
kvm_arch_hardware_unsetup();
kvm_arch_exit();
free_cpumask_var(cpus_hardware_enabled);
}
EXPORT_SYMBOL_GPL(kvm_exit);
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_5768_0 |
crossvul-cpp_data_good_5852_0 | /*
* Copyright (c) 2006 Oracle. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/kernel.h>
#include <linux/in.h>
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/if_arp.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include "rds.h"
#include "ib.h"
static unsigned int fmr_pool_size = RDS_FMR_POOL_SIZE;
unsigned int fmr_message_size = RDS_FMR_SIZE + 1; /* +1 allows for unaligned MRs */
unsigned int rds_ib_retry_count = RDS_IB_DEFAULT_RETRY_COUNT;
module_param(fmr_pool_size, int, 0444);
MODULE_PARM_DESC(fmr_pool_size, " Max number of fmr per HCA");
module_param(fmr_message_size, int, 0444);
MODULE_PARM_DESC(fmr_message_size, " Max size of a RDMA transfer");
module_param(rds_ib_retry_count, int, 0444);
MODULE_PARM_DESC(rds_ib_retry_count, " Number of hw retries before reporting an error");
/*
* we have a clumsy combination of RCU and a rwsem protecting this list
* because it is used both in the get_mr fast path and while blocking in
* the FMR flushing path.
*/
DECLARE_RWSEM(rds_ib_devices_lock);
struct list_head rds_ib_devices;
/* NOTE: if also grabbing ibdev lock, grab this first */
DEFINE_SPINLOCK(ib_nodev_conns_lock);
LIST_HEAD(ib_nodev_conns);
static void rds_ib_nodev_connect(void)
{
struct rds_ib_connection *ic;
spin_lock(&ib_nodev_conns_lock);
list_for_each_entry(ic, &ib_nodev_conns, ib_node)
rds_conn_connect_if_down(ic->conn);
spin_unlock(&ib_nodev_conns_lock);
}
static void rds_ib_dev_shutdown(struct rds_ib_device *rds_ibdev)
{
struct rds_ib_connection *ic;
unsigned long flags;
spin_lock_irqsave(&rds_ibdev->spinlock, flags);
list_for_each_entry(ic, &rds_ibdev->conn_list, ib_node)
rds_conn_drop(ic->conn);
spin_unlock_irqrestore(&rds_ibdev->spinlock, flags);
}
/*
* rds_ib_destroy_mr_pool() blocks on a few things and mrs drop references
* from interrupt context so we push freing off into a work struct in krdsd.
*/
static void rds_ib_dev_free(struct work_struct *work)
{
struct rds_ib_ipaddr *i_ipaddr, *i_next;
struct rds_ib_device *rds_ibdev = container_of(work,
struct rds_ib_device, free_work);
if (rds_ibdev->mr_pool)
rds_ib_destroy_mr_pool(rds_ibdev->mr_pool);
if (rds_ibdev->mr)
ib_dereg_mr(rds_ibdev->mr);
if (rds_ibdev->pd)
ib_dealloc_pd(rds_ibdev->pd);
list_for_each_entry_safe(i_ipaddr, i_next, &rds_ibdev->ipaddr_list, list) {
list_del(&i_ipaddr->list);
kfree(i_ipaddr);
}
kfree(rds_ibdev);
}
void rds_ib_dev_put(struct rds_ib_device *rds_ibdev)
{
BUG_ON(atomic_read(&rds_ibdev->refcount) <= 0);
if (atomic_dec_and_test(&rds_ibdev->refcount))
queue_work(rds_wq, &rds_ibdev->free_work);
}
static void rds_ib_add_one(struct ib_device *device)
{
struct rds_ib_device *rds_ibdev;
struct ib_device_attr *dev_attr;
/* Only handle IB (no iWARP) devices */
if (device->node_type != RDMA_NODE_IB_CA)
return;
dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
if (!dev_attr)
return;
if (ib_query_device(device, dev_attr)) {
rdsdebug("Query device failed for %s\n", device->name);
goto free_attr;
}
rds_ibdev = kzalloc_node(sizeof(struct rds_ib_device), GFP_KERNEL,
ibdev_to_node(device));
if (!rds_ibdev)
goto free_attr;
spin_lock_init(&rds_ibdev->spinlock);
atomic_set(&rds_ibdev->refcount, 1);
INIT_WORK(&rds_ibdev->free_work, rds_ib_dev_free);
rds_ibdev->max_wrs = dev_attr->max_qp_wr;
rds_ibdev->max_sge = min(dev_attr->max_sge, RDS_IB_MAX_SGE);
rds_ibdev->fmr_max_remaps = dev_attr->max_map_per_fmr?: 32;
rds_ibdev->max_fmrs = dev_attr->max_fmr ?
min_t(unsigned int, dev_attr->max_fmr, fmr_pool_size) :
fmr_pool_size;
rds_ibdev->max_initiator_depth = dev_attr->max_qp_init_rd_atom;
rds_ibdev->max_responder_resources = dev_attr->max_qp_rd_atom;
rds_ibdev->dev = device;
rds_ibdev->pd = ib_alloc_pd(device);
if (IS_ERR(rds_ibdev->pd)) {
rds_ibdev->pd = NULL;
goto put_dev;
}
rds_ibdev->mr = ib_get_dma_mr(rds_ibdev->pd, IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(rds_ibdev->mr)) {
rds_ibdev->mr = NULL;
goto put_dev;
}
rds_ibdev->mr_pool = rds_ib_create_mr_pool(rds_ibdev);
if (IS_ERR(rds_ibdev->mr_pool)) {
rds_ibdev->mr_pool = NULL;
goto put_dev;
}
INIT_LIST_HEAD(&rds_ibdev->ipaddr_list);
INIT_LIST_HEAD(&rds_ibdev->conn_list);
down_write(&rds_ib_devices_lock);
list_add_tail_rcu(&rds_ibdev->list, &rds_ib_devices);
up_write(&rds_ib_devices_lock);
atomic_inc(&rds_ibdev->refcount);
ib_set_client_data(device, &rds_ib_client, rds_ibdev);
atomic_inc(&rds_ibdev->refcount);
rds_ib_nodev_connect();
put_dev:
rds_ib_dev_put(rds_ibdev);
free_attr:
kfree(dev_attr);
}
/*
* New connections use this to find the device to associate with the
* connection. It's not in the fast path so we're not concerned about the
* performance of the IB call. (As of this writing, it uses an interrupt
* blocking spinlock to serialize walking a per-device list of all registered
* clients.)
*
* RCU is used to handle incoming connections racing with device teardown.
* Rather than use a lock to serialize removal from the client_data and
* getting a new reference, we use an RCU grace period. The destruction
* path removes the device from client_data and then waits for all RCU
* readers to finish.
*
* A new connection can get NULL from this if its arriving on a
* device that is in the process of being removed.
*/
struct rds_ib_device *rds_ib_get_client_data(struct ib_device *device)
{
struct rds_ib_device *rds_ibdev;
rcu_read_lock();
rds_ibdev = ib_get_client_data(device, &rds_ib_client);
if (rds_ibdev)
atomic_inc(&rds_ibdev->refcount);
rcu_read_unlock();
return rds_ibdev;
}
/*
* The IB stack is letting us know that a device is going away. This can
* happen if the underlying HCA driver is removed or if PCI hotplug is removing
* the pci function, for example.
*
* This can be called at any time and can be racing with any other RDS path.
*/
static void rds_ib_remove_one(struct ib_device *device)
{
struct rds_ib_device *rds_ibdev;
rds_ibdev = ib_get_client_data(device, &rds_ib_client);
if (!rds_ibdev)
return;
rds_ib_dev_shutdown(rds_ibdev);
/* stop connection attempts from getting a reference to this device. */
ib_set_client_data(device, &rds_ib_client, NULL);
down_write(&rds_ib_devices_lock);
list_del_rcu(&rds_ibdev->list);
up_write(&rds_ib_devices_lock);
/*
* This synchronize rcu is waiting for readers of both the ib
* client data and the devices list to finish before we drop
* both of those references.
*/
synchronize_rcu();
rds_ib_dev_put(rds_ibdev);
rds_ib_dev_put(rds_ibdev);
}
struct ib_client rds_ib_client = {
.name = "rds_ib",
.add = rds_ib_add_one,
.remove = rds_ib_remove_one
};
static int rds_ib_conn_info_visitor(struct rds_connection *conn,
void *buffer)
{
struct rds_info_rdma_connection *iinfo = buffer;
struct rds_ib_connection *ic;
/* We will only ever look at IB transports */
if (conn->c_trans != &rds_ib_transport)
return 0;
iinfo->src_addr = conn->c_laddr;
iinfo->dst_addr = conn->c_faddr;
memset(&iinfo->src_gid, 0, sizeof(iinfo->src_gid));
memset(&iinfo->dst_gid, 0, sizeof(iinfo->dst_gid));
if (rds_conn_state(conn) == RDS_CONN_UP) {
struct rds_ib_device *rds_ibdev;
struct rdma_dev_addr *dev_addr;
ic = conn->c_transport_data;
dev_addr = &ic->i_cm_id->route.addr.dev_addr;
rdma_addr_get_sgid(dev_addr, (union ib_gid *) &iinfo->src_gid);
rdma_addr_get_dgid(dev_addr, (union ib_gid *) &iinfo->dst_gid);
rds_ibdev = ic->rds_ibdev;
iinfo->max_send_wr = ic->i_send_ring.w_nr;
iinfo->max_recv_wr = ic->i_recv_ring.w_nr;
iinfo->max_send_sge = rds_ibdev->max_sge;
rds_ib_get_mr_info(rds_ibdev, iinfo);
}
return 1;
}
static void rds_ib_ic_info(struct socket *sock, unsigned int len,
struct rds_info_iterator *iter,
struct rds_info_lengths *lens)
{
rds_for_each_conn_info(sock, len, iter, lens,
rds_ib_conn_info_visitor,
sizeof(struct rds_info_rdma_connection));
}
/*
* Early RDS/IB was built to only bind to an address if there is an IPoIB
* device with that address set.
*
* If it were me, I'd advocate for something more flexible. Sending and
* receiving should be device-agnostic. Transports would try and maintain
* connections between peers who have messages queued. Userspace would be
* allowed to influence which paths have priority. We could call userspace
* asserting this policy "routing".
*/
static int rds_ib_laddr_check(__be32 addr)
{
int ret;
struct rdma_cm_id *cm_id;
struct sockaddr_in sin;
/* Create a CMA ID and try to bind it. This catches both
* IB and iWARP capable NICs.
*/
cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = addr;
/* rdma_bind_addr will only succeed for IB & iWARP devices */
ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
/* due to this, we will claim to support iWARP devices unless we
check node_type. */
if (ret || !cm_id->device ||
cm_id->device->node_type != RDMA_NODE_IB_CA)
ret = -EADDRNOTAVAIL;
rdsdebug("addr %pI4 ret %d node type %d\n",
&addr, ret,
cm_id->device ? cm_id->device->node_type : -1);
rdma_destroy_id(cm_id);
return ret;
}
static void rds_ib_unregister_client(void)
{
ib_unregister_client(&rds_ib_client);
/* wait for rds_ib_dev_free() to complete */
flush_workqueue(rds_wq);
}
void rds_ib_exit(void)
{
rds_info_deregister_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
rds_ib_unregister_client();
rds_ib_destroy_nodev_conns();
rds_ib_sysctl_exit();
rds_ib_recv_exit();
rds_trans_unregister(&rds_ib_transport);
}
struct rds_transport rds_ib_transport = {
.laddr_check = rds_ib_laddr_check,
.xmit_complete = rds_ib_xmit_complete,
.xmit = rds_ib_xmit,
.xmit_rdma = rds_ib_xmit_rdma,
.xmit_atomic = rds_ib_xmit_atomic,
.recv = rds_ib_recv,
.conn_alloc = rds_ib_conn_alloc,
.conn_free = rds_ib_conn_free,
.conn_connect = rds_ib_conn_connect,
.conn_shutdown = rds_ib_conn_shutdown,
.inc_copy_to_user = rds_ib_inc_copy_to_user,
.inc_free = rds_ib_inc_free,
.cm_initiate_connect = rds_ib_cm_initiate_connect,
.cm_handle_connect = rds_ib_cm_handle_connect,
.cm_connect_complete = rds_ib_cm_connect_complete,
.stats_info_copy = rds_ib_stats_info_copy,
.exit = rds_ib_exit,
.get_mr = rds_ib_get_mr,
.sync_mr = rds_ib_sync_mr,
.free_mr = rds_ib_free_mr,
.flush_mrs = rds_ib_flush_mrs,
.t_owner = THIS_MODULE,
.t_name = "infiniband",
.t_type = RDS_TRANS_IB
};
int rds_ib_init(void)
{
int ret;
INIT_LIST_HEAD(&rds_ib_devices);
ret = ib_register_client(&rds_ib_client);
if (ret)
goto out;
ret = rds_ib_sysctl_init();
if (ret)
goto out_ibreg;
ret = rds_ib_recv_init();
if (ret)
goto out_sysctl;
ret = rds_trans_register(&rds_ib_transport);
if (ret)
goto out_recv;
rds_info_register_func(RDS_INFO_IB_CONNECTIONS, rds_ib_ic_info);
goto out;
out_recv:
rds_ib_recv_exit();
out_sysctl:
rds_ib_sysctl_exit();
out_ibreg:
rds_ib_unregister_client();
out:
return ret;
}
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_5852_0 |
crossvul-cpp_data_good_1677_1 | /*
* UDP over IPv6
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* Based on linux/ipv4/udp.c
*
* Fixes:
* Hideaki YOSHIFUJI : sin6_scope_id support
* YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
* Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
* a single port at the same time.
* Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
* YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <net/ndisc.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/raw.h>
#include <net/tcp_states.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/inet6_hashtables.h>
#include <net/busy_poll.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <trace/events/skb.h>
#include "udp_impl.h"
static u32 udp6_ehashfn(const struct net *net,
const struct in6_addr *laddr,
const u16 lport,
const struct in6_addr *faddr,
const __be16 fport)
{
static u32 udp6_ehash_secret __read_mostly;
static u32 udp_ipv6_hash_secret __read_mostly;
u32 lhash, fhash;
net_get_random_once(&udp6_ehash_secret,
sizeof(udp6_ehash_secret));
net_get_random_once(&udp_ipv6_hash_secret,
sizeof(udp_ipv6_hash_secret));
lhash = (__force u32)laddr->s6_addr32[3];
fhash = __ipv6_addr_jhash(faddr, udp_ipv6_hash_secret);
return __inet6_ehashfn(lhash, lport, fhash, fport,
udp_ipv6_hash_secret + net_hash_mix(net));
}
int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
{
const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
int sk2_ipv6only = inet_v6_ipv6only(sk2);
int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
/* if both are mapped, treat as IPv4 */
if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
return (!sk2_ipv6only &&
(!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr ||
sk->sk_rcv_saddr == sk2->sk_rcv_saddr));
if (addr_type2 == IPV6_ADDR_ANY &&
!(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
return 1;
if (addr_type == IPV6_ADDR_ANY &&
!(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED))
return 1;
if (sk2_rcv_saddr6 &&
ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6))
return 1;
return 0;
}
static u32 udp6_portaddr_hash(const struct net *net,
const struct in6_addr *addr6,
unsigned int port)
{
unsigned int hash, mix = net_hash_mix(net);
if (ipv6_addr_any(addr6))
hash = jhash_1word(0, mix);
else if (ipv6_addr_v4mapped(addr6))
hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
else
hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
return hash ^ port;
}
int udp_v6_get_port(struct sock *sk, unsigned short snum)
{
unsigned int hash2_nulladdr =
udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
unsigned int hash2_partial =
udp6_portaddr_hash(sock_net(sk), &sk->sk_v6_rcv_saddr, 0);
/* precompute partial secondary hash */
udp_sk(sk)->udp_portaddr_hash = hash2_partial;
return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
}
static void udp_v6_rehash(struct sock *sk)
{
u16 new_hash = udp6_portaddr_hash(sock_net(sk),
&sk->sk_v6_rcv_saddr,
inet_sk(sk)->inet_num);
udp_lib_rehash(sk, new_hash);
}
static inline int compute_score(struct sock *sk, struct net *net,
unsigned short hnum,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif)
{
int score;
struct inet_sock *inet;
if (!net_eq(sock_net(sk), net) ||
udp_sk(sk)->udp_port_hash != hnum ||
sk->sk_family != PF_INET6)
return -1;
score = 0;
inet = inet_sk(sk);
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score++;
}
if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
return -1;
score++;
}
if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
return -1;
score++;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score++;
}
return score;
}
#define SCORE2_MAX (1 + 1 + 1)
static inline int compute_score2(struct sock *sk, struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr,
unsigned short hnum, int dif)
{
int score;
struct inet_sock *inet;
if (!net_eq(sock_net(sk), net) ||
udp_sk(sk)->udp_port_hash != hnum ||
sk->sk_family != PF_INET6)
return -1;
if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
return -1;
score = 0;
inet = inet_sk(sk);
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score++;
}
if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
if (!ipv6_addr_equal(&sk->sk_v6_daddr, saddr))
return -1;
score++;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score++;
}
return score;
}
/* called with read_rcu_lock() */
static struct sock *udp6_lib_lookup2(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned int hnum, int dif,
struct udp_hslot *hslot2, unsigned int slot2)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
begin:
result = NULL;
badness = -1;
udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
score = compute_score2(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
result = sk;
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp6_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
} else if (score == SCORE2_MAX)
goto exact_match;
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot2)
goto begin;
if (result) {
exact_match:
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score2(result, net, saddr, sport,
daddr, hnum, dif) < badness)) {
sock_put(result);
goto begin;
}
}
return result;
}
struct sock *__udp6_lib_lookup(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, struct udp_table *udptable)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(dport);
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
rcu_read_lock();
if (hslot->count > 10) {
hash2 = udp6_portaddr_hash(net, daddr, hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
hslot2, slot2);
if (!result) {
hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
&in6addr_any, hnum, dif,
hslot2, slot2);
}
rcu_read_unlock();
return result;
}
begin:
result = NULL;
badness = -1;
sk_nulls_for_each_rcu(sk, node, &hslot->head) {
score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
if (score > badness) {
result = sk;
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = udp6_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
}
} else if (score == badness && reuseport) {
matches++;
if (reciprocal_scale(hash, matches) == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot)
goto begin;
if (result) {
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score(result, net, hnum, saddr, sport,
daddr, dport, dif) < badness)) {
sock_put(result);
goto begin;
}
}
rcu_read_unlock();
return result;
}
EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport,
struct udp_table *udptable)
{
struct sock *sk;
const struct ipv6hdr *iph = ipv6_hdr(skb);
sk = skb_steal_sock(skb);
if (unlikely(sk))
return sk;
return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport,
&iph->daddr, dport, inet6_iif(skb),
udptable);
}
struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport, int dif)
{
return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
}
EXPORT_SYMBOL_GPL(udp6_lib_lookup);
/*
* This should be easy, if there is something there we
* return it, otherwise we block.
*/
int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int noblock, int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
unsigned int ulen, copied;
int peeked, off = 0;
int err;
int is_udplite = IS_UDPLITE(sk);
int is_udp4;
bool slow;
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len, addr_len);
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
return ipv6_recv_rxpmtu(sk, msg, len, addr_len);
try_again:
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
&peeked, &off, &err);
if (!skb)
goto out;
ulen = skb->len - sizeof(struct udphdr);
copied = len;
if (copied > ulen)
copied = ulen;
else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
is_udp4 = (skb->protocol == htons(ETH_P_IP));
/*
* If checksum is needed at all, try to do it while copying the
* data. If the data is truncated, or if we only want a partial
* coverage checksum (UDP-Lite), do it before the copy.
*/
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_msg(skb, sizeof(struct udphdr),
msg, copied);
else {
err = skb_copy_and_csum_datagram_msg(skb, sizeof(struct udphdr), msg);
if (err == -EINVAL)
goto csum_copy_err;
}
if (unlikely(err)) {
trace_kfree_skb(skb, udpv6_recvmsg);
if (!peeked) {
atomic_inc(&sk->sk_drops);
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS,
is_udplite);
else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS,
is_udplite);
}
goto out_free;
}
if (!peeked) {
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
}
sock_recv_ts_and_drops(msg, sk, skb);
/* Copy the address. */
if (msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
sin6->sin6_family = AF_INET6;
sin6->sin6_port = udp_hdr(skb)->source;
sin6->sin6_flowinfo = 0;
if (is_udp4) {
ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
&sin6->sin6_addr);
sin6->sin6_scope_id = 0;
} else {
sin6->sin6_addr = ipv6_hdr(skb)->saddr;
sin6->sin6_scope_id =
ipv6_iface_scope_id(&sin6->sin6_addr,
inet6_iif(skb));
}
*addr_len = sizeof(*sin6);
}
if (np->rxopt.all)
ip6_datagram_recv_common_ctl(sk, msg, skb);
if (is_udp4) {
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
} else {
if (np->rxopt.all)
ip6_datagram_recv_specific_ctl(sk, msg, skb);
}
err = copied;
if (flags & MSG_TRUNC)
err = ulen;
out_free:
skb_free_datagram_locked(sk, skb);
out:
return err;
csum_copy_err:
slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags)) {
if (is_udp4) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite);
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
} else {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite);
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
}
unlock_sock_fast(sk, slow);
/* starting over for a new packet, but check if we need to yield */
cond_resched();
msg->msg_flags &= ~MSG_TRUNC;
goto try_again;
}
void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info,
struct udp_table *udptable)
{
struct ipv6_pinfo *np;
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct in6_addr *saddr = &hdr->saddr;
const struct in6_addr *daddr = &hdr->daddr;
struct udphdr *uh = (struct udphdr *)(skb->data+offset);
struct sock *sk;
int err;
struct net *net = dev_net(skb->dev);
sk = __udp6_lib_lookup(net, daddr, uh->dest,
saddr, uh->source, inet6_iif(skb), udptable);
if (!sk) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS);
return;
}
if (type == ICMPV6_PKT_TOOBIG) {
if (!ip6_sk_accept_pmtu(sk))
goto out;
ip6_sk_update_pmtu(skb, sk, info);
}
if (type == NDISC_REDIRECT) {
ip6_sk_redirect(skb, sk);
goto out;
}
np = inet6_sk(sk);
if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
goto out;
if (sk->sk_state != TCP_ESTABLISHED && !np->recverr)
goto out;
if (np->recverr)
ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
sk->sk_err = err;
sk->sk_error_report(sk);
out:
sock_put(sk);
}
static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
if (!ipv6_addr_any(&sk->sk_v6_daddr)) {
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
sk_incoming_cpu_update(sk);
}
rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
/* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM)
UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_RCVBUFERRORS, is_udplite);
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
kfree_skb(skb);
return -1;
}
return 0;
}
static __inline__ void udpv6_err(struct sk_buff *skb,
struct inet6_skb_parm *opt, u8 type,
u8 code, int offset, __be32 info)
{
__udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
}
static struct static_key udpv6_encap_needed __read_mostly;
void udpv6_encap_enable(void)
{
if (!static_key_enabled(&udpv6_encap_needed))
static_key_slow_inc(&udpv6_encap_needed);
}
EXPORT_SYMBOL(udpv6_encap_enable);
int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int rc;
int is_udplite = IS_UDPLITE(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
/*
* This is an encapsulation socket so pass the skb to
* the socket's udp_encap_rcv() hook. Otherwise, just
* fall through and pass this up the UDP socket.
* up->encap_rcv() returns the following value:
* =0 if skb was successfully passed to the encap
* handler or was discarded by it.
* >0 if skb should be passed on to UDP.
* <0 if skb should be resubmitted as proto -N
*/
/* if we're overly short, let UDP handle it */
encap_rcv = ACCESS_ONCE(up->encap_rcv);
if (skb->len > sizeof(struct udphdr) && encap_rcv) {
int ret;
/* Verify checksum before giving to encap */
if (udp_lib_checksum_complete(skb))
goto csum_error;
ret = encap_rcv(sk, skb);
if (ret <= 0) {
UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INDATAGRAMS,
is_udplite);
return -ret;
}
}
/* FALLTHROUGH -- it's a UDP Packet */
}
/*
* UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
*/
if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
if (up->pcrlen == 0) { /* full coverage was set */
net_dbg_ratelimited("UDPLITE6: partial coverage %d while full coverage %d requested\n",
UDP_SKB_CB(skb)->cscov, skb->len);
goto drop;
}
if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
net_dbg_ratelimited("UDPLITE6: coverage %d too small, need min %d\n",
UDP_SKB_CB(skb)->cscov, up->pcrlen);
goto drop;
}
}
if (rcu_access_pointer(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
goto csum_error;
}
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_RCVBUFERRORS, is_udplite);
goto drop;
}
skb_dst_drop(skb);
bh_lock_sock(sk);
rc = 0;
if (!sock_owned_by_user(sk))
rc = __udpv6_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk);
goto drop;
}
bh_unlock_sock(sk);
return rc;
csum_error:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return -1;
}
static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
__be16 loc_port, const struct in6_addr *loc_addr,
__be16 rmt_port, const struct in6_addr *rmt_addr,
int dif, unsigned short hnum)
{
struct inet_sock *inet = inet_sk(sk);
if (!net_eq(sock_net(sk), net))
return false;
if (udp_sk(sk)->udp_port_hash != hnum ||
sk->sk_family != PF_INET6 ||
(inet->inet_dport && inet->inet_dport != rmt_port) ||
(!ipv6_addr_any(&sk->sk_v6_daddr) &&
!ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ||
(!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
return false;
if (!inet6_mc_check(sk, loc_addr, rmt_addr))
return false;
return true;
}
static void flush_stack(struct sock **stack, unsigned int count,
struct sk_buff *skb, unsigned int final)
{
struct sk_buff *skb1 = NULL;
struct sock *sk;
unsigned int i;
for (i = 0; i < count; i++) {
sk = stack[i];
if (likely(!skb1))
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb1) {
atomic_inc(&sk->sk_drops);
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
}
if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
skb1 = NULL;
sock_put(sk);
}
if (unlikely(skb1))
kfree_skb(skb1);
}
static void udp6_csum_zero_error(struct sk_buff *skb)
{
/* RFC 2460 section 8.1 says that we SHOULD log
* this error. Well, it is reasonable.
*/
net_dbg_ratelimited("IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
&ipv6_hdr(skb)->saddr, ntohs(udp_hdr(skb)->source),
&ipv6_hdr(skb)->daddr, ntohs(udp_hdr(skb)->dest));
}
/*
* Note: called only from the BH handler context,
* so we don't need to lock the hashes.
*/
static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
const struct in6_addr *saddr, const struct in6_addr *daddr,
struct udp_table *udptable, int proto)
{
struct sock *sk, *stack[256 / sizeof(struct sock *)];
const struct udphdr *uh = udp_hdr(skb);
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(uh->dest);
struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
int dif = inet6_iif(skb);
unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
bool inner_flushed = false;
if (use_hash2) {
hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
udp_table.mask;
hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask;
start_lookup:
hslot = &udp_table.hash2[hash2];
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
}
spin_lock(&hslot->lock);
sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
if (__udp_v6_is_mcast_sock(net, sk,
uh->dest, daddr,
uh->source, saddr,
dif, hnum) &&
/* If zero checksum and no_check is not on for
* the socket then skip it.
*/
(uh->check || udp_sk(sk)->no_check6_rx)) {
if (unlikely(count == ARRAY_SIZE(stack))) {
flush_stack(stack, count, skb, ~0);
inner_flushed = true;
count = 0;
}
stack[count++] = sk;
sock_hold(sk);
}
}
spin_unlock(&hslot->lock);
/* Also lookup *:port if we are using hash2 and haven't done so yet. */
if (use_hash2 && hash2 != hash2_any) {
hash2 = hash2_any;
goto start_lookup;
}
if (count) {
flush_stack(stack, count, skb, count - 1);
} else {
if (!inner_flushed)
UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI,
proto == IPPROTO_UDPLITE);
consume_skb(skb);
}
return 0;
}
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
struct net *net = dev_net(skb->dev);
struct sock *sk;
struct udphdr *uh;
const struct in6_addr *saddr, *daddr;
u32 ulen = 0;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto discard;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
ulen = ntohs(uh->len);
if (ulen > skb->len)
goto short_packet;
if (proto == IPPROTO_UDP) {
/* UDP validates ulen. */
/* Check for jumbo payload */
if (ulen == 0)
ulen = skb->len;
if (ulen < sizeof(*uh))
goto short_packet;
if (ulen < skb->len) {
if (pskb_trim_rcsum(skb, ulen))
goto short_packet;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
}
}
if (udp6_csum_init(skb, uh, proto))
goto csum_error;
/*
* Multicast receive code
*/
if (ipv6_addr_is_multicast(daddr))
return __udp6_lib_mcast_deliver(net, skb,
saddr, daddr, udptable, proto);
/* Unicast */
/*
* check socket cache ... must talk to Alan about his plans
* for sock caches... i'll skip this for now.
*/
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk) {
int ret;
if (!uh->check && !udp_sk(sk)->no_check6_rx) {
sock_put(sk);
udp6_csum_zero_error(skb);
goto csum_error;
}
if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
ip6_compute_pseudo);
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
*/
if (ret > 0)
return -ret;
return 0;
}
if (!uh->check) {
udp6_csum_zero_error(skb);
goto csum_error;
}
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
if (udp_lib_checksum_complete(skb))
goto csum_error;
UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
kfree_skb(skb);
return 0;
short_packet:
net_dbg_ratelimited("UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
proto == IPPROTO_UDPLITE ? "-Lite" : "",
saddr, ntohs(uh->source),
ulen, skb->len,
daddr, ntohs(uh->dest));
goto discard;
csum_error:
UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
discard:
UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
kfree_skb(skb);
return 0;
}
static __inline__ int udpv6_rcv(struct sk_buff *skb)
{
return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
}
/*
* Throw away all pending data and cancel the corking. Socket is locked.
*/
static void udp_v6_flush_pending_frames(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
if (up->pending == AF_INET)
udp_flush_pending_frames(sk);
else if (up->pending) {
up->len = 0;
up->pending = 0;
ip6_flush_pending_frames(sk);
}
}
/**
* udp6_hwcsum_outgoing - handle outgoing HW checksumming
* @sk: socket we are sending on
* @skb: sk_buff containing the filled-in UDP header
* (checksum field must be zeroed out)
*/
static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
const struct in6_addr *saddr,
const struct in6_addr *daddr, int len)
{
unsigned int offset;
struct udphdr *uh = udp_hdr(skb);
struct sk_buff *frags = skb_shinfo(skb)->frag_list;
__wsum csum = 0;
if (!frags) {
/* Only one fragment on the socket. */
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
} else {
/*
* HW-checksum won't work as there are two or more
* fragments on the socket so that all csums of sk_buffs
* should be together
*/
offset = skb_transport_offset(skb);
skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
skb->ip_summed = CHECKSUM_NONE;
do {
csum = csum_add(csum, frags->csum);
} while ((frags = frags->next));
uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
}
}
/*
* Sending
*/
static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6)
{
struct sock *sk = skb->sk;
struct udphdr *uh;
int err = 0;
int is_udplite = IS_UDPLITE(sk);
__wsum csum = 0;
int offset = skb_transport_offset(skb);
int len = skb->len - offset;
/*
* Create a UDP header
*/
uh = udp_hdr(skb);
uh->source = fl6->fl6_sport;
uh->dest = fl6->fl6_dport;
uh->len = htons(len);
uh->check = 0;
if (is_udplite)
csum = udplite_csum(skb);
else if (udp_sk(sk)->no_check6_tx) { /* UDP csum disabled */
skb->ip_summed = CHECKSUM_NONE;
goto send;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr, len);
goto send;
} else
csum = udp_csum(skb);
/* add protocol-dependent pseudo-header */
uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
len, fl6->flowi6_proto, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
send:
err = ip6_send_skb(skb);
if (err) {
if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0;
}
} else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite);
return err;
}
static int udp_v6_push_pending_frames(struct sock *sk)
{
struct sk_buff *skb;
struct udp_sock *up = udp_sk(sk);
struct flowi6 fl6;
int err = 0;
if (up->pending == AF_INET)
return udp_push_pending_frames(sk);
/* ip6_finish_skb will release the cork, so make a copy of
* fl6 here.
*/
fl6 = inet_sk(sk)->cork.fl.u.ip6;
skb = ip6_finish_skb(sk);
if (!skb)
goto out;
err = udp_v6_send_skb(skb, &fl6);
out:
up->len = 0;
up->pending = 0;
return err;
}
int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
{
struct ipv6_txoptions opt_space;
struct udp_sock *up = udp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name);
struct in6_addr *daddr, *final_p, final;
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_len = msg->msg_namelen;
int ulen = len;
int hlimit = -1;
int tclass = -1;
int dontfrag = -1;
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int err;
int connected = 0;
int is_udplite = IS_UDPLITE(sk);
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
/* destination address check */
if (sin6) {
if (addr_len < offsetof(struct sockaddr, sa_data))
return -EINVAL;
switch (sin6->sin6_family) {
case AF_INET6:
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
daddr = &sin6->sin6_addr;
break;
case AF_INET:
goto do_udp_sendmsg;
case AF_UNSPEC:
msg->msg_name = sin6 = NULL;
msg->msg_namelen = addr_len = 0;
daddr = NULL;
break;
default:
return -EINVAL;
}
} else if (!up->pending) {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = &sk->sk_v6_daddr;
} else
daddr = NULL;
if (daddr) {
if (ipv6_addr_v4mapped(daddr)) {
struct sockaddr_in sin;
sin.sin_family = AF_INET;
sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
sin.sin_addr.s_addr = daddr->s6_addr32[3];
msg->msg_name = &sin;
msg->msg_namelen = sizeof(sin);
do_udp_sendmsg:
if (__ipv6_only_sock(sk))
return -ENETUNREACH;
return udp_sendmsg(sk, msg, len);
}
}
if (up->pending == AF_INET)
return udp_sendmsg(sk, msg, len);
/* Rough check on arithmetic overflow,
better check is made in ip6_append_data().
*/
if (len > INT_MAX - sizeof(struct udphdr))
return -EMSGSIZE;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
if (up->pending) {
/*
* There are pending frames.
* The socket lock must be held while it's corked.
*/
lock_sock(sk);
if (likely(up->pending)) {
if (unlikely(up->pending != AF_INET6)) {
release_sock(sk);
return -EAFNOSUPPORT;
}
dst = NULL;
goto do_append_data;
}
release_sock(sk);
}
ulen += sizeof(struct udphdr);
memset(&fl6, 0, sizeof(fl6));
if (sin6) {
if (sin6->sin6_port == 0)
return -EINVAL;
fl6.fl6_dport = sin6->sin6_port;
daddr = &sin6->sin6_addr;
if (np->sndflow) {
fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
}
}
/*
* Otherwise it will be difficult to maintain
* sk->sk_dst_cache.
*/
if (sk->sk_state == TCP_ESTABLISHED &&
ipv6_addr_equal(daddr, &sk->sk_v6_daddr))
daddr = &sk->sk_v6_daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
sin6->sin6_scope_id &&
__ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
fl6.flowi6_oif = sin6->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
fl6.fl6_dport = inet->inet_dport;
daddr = &sk->sk_v6_daddr;
fl6.flowlabel = np->flow_label;
connected = 1;
}
if (!fl6.flowi6_oif)
fl6.flowi6_oif = sk->sk_bound_dev_if;
if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
fl6.flowi6_mark = sk->sk_mark;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(*opt);
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
&hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
}
if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (!flowlabel)
return -EINVAL;
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
connected = 0;
}
if (!opt)
opt = np->opt;
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = sk->sk_protocol;
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
fl6.fl6_sport = inet->inet_sport;
final_p = fl6_update_dst(&fl6, opt, &final);
if (final_p)
connected = 0;
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
fl6.flowi6_oif = np->mcast_oif;
connected = 0;
} else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
goto out;
}
if (hlimit < 0)
hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst);
if (tclass < 0)
tclass = np->tclass;
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
/* Lockless fast path for the non-corking case */
if (!corkreq) {
struct sk_buff *skb;
skb = ip6_make_skb(sk, getfrag, msg, ulen,
sizeof(struct udphdr), hlimit, tclass, opt,
&fl6, (struct rt6_info *)dst,
msg->msg_flags, dontfrag);
err = PTR_ERR(skb);
if (!IS_ERR_OR_NULL(skb))
err = udp_v6_send_skb(skb, &fl6);
goto release_dst;
}
lock_sock(sk);
if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */
/* ... which is an evident application bug. --ANK */
release_sock(sk);
net_dbg_ratelimited("udp cork app bug 2\n");
err = -EINVAL;
goto out;
}
up->pending = AF_INET6;
do_append_data:
if (dontfrag < 0)
dontfrag = np->dontfrag;
up->len += ulen;
err = ip6_append_data(sk, getfrag, msg, ulen,
sizeof(struct udphdr), hlimit, tclass, opt, &fl6,
(struct rt6_info *)dst,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
if (err)
udp_v6_flush_pending_frames(sk);
else if (!corkreq)
err = udp_v6_push_pending_frames(sk);
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
up->pending = 0;
if (err > 0)
err = np->recverr ? net_xmit_errno(err) : 0;
release_sock(sk);
release_dst:
if (dst) {
if (connected) {
ip6_dst_store(sk, dst,
ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr) ?
&sk->sk_v6_daddr : NULL,
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
&np->saddr :
#endif
NULL);
} else {
dst_release(dst);
}
dst = NULL;
}
out:
dst_release(dst);
fl6_sock_release(flowlabel);
if (!err)
return len;
/*
* ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
* ENOBUFS might not be good (it's not tunable per se), but otherwise
* we don't have a good statistic (IpOutDiscards but it can be too many
* things). We could add another new stat but at least for now that
* seems like overkill.
*/
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
}
return err;
do_confirm:
dst_confirm(dst);
if (!(msg->msg_flags&MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto out;
}
void udpv6_destroy_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
lock_sock(sk);
udp_v6_flush_pending_frames(sk);
release_sock(sk);
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
encap_destroy = ACCESS_ONCE(up->encap_destroy);
if (encap_destroy)
encap_destroy(sk);
}
inet6_destroy_sock(sk);
}
/*
* Socket option code for UDP
*/
int udpv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_v6_push_pending_frames);
return ipv6_setsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_v6_push_pending_frames);
return compat_ipv6_setsockopt(sk, level, optname, optval, optlen);
}
#endif
int udpv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return compat_ipv6_getsockopt(sk, level, optname, optval, optlen);
}
#endif
static const struct inet6_protocol udpv6_protocol = {
.handler = udpv6_rcv,
.err_handler = udpv6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
int udp6_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
} else {
int bucket = ((struct udp_iter_state *)seq->private)->bucket;
struct inet_sock *inet = inet_sk(v);
__u16 srcp = ntohs(inet->inet_sport);
__u16 destp = ntohs(inet->inet_dport);
ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
}
return 0;
}
static const struct file_operations udp6_afinfo_seq_fops = {
.owner = THIS_MODULE,
.open = udp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net
};
static struct udp_seq_afinfo udp6_seq_afinfo = {
.name = "udp6",
.family = AF_INET6,
.udp_table = &udp_table,
.seq_fops = &udp6_afinfo_seq_fops,
.seq_ops = {
.show = udp6_seq_show,
},
};
int __net_init udp6_proc_init(struct net *net)
{
return udp_proc_register(net, &udp6_seq_afinfo);
}
void udp6_proc_exit(struct net *net) {
udp_proc_unregister(net, &udp6_seq_afinfo);
}
#endif /* CONFIG_PROC_FS */
void udp_v6_clear_sk(struct sock *sk, int size)
{
struct inet_sock *inet = inet_sk(sk);
/* we do not want to clear pinet6 field, because of RCU lookups */
sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
memset(&inet->pinet6 + 1, 0, size);
}
/* ------------------------------------------------------------------------ */
struct proto udpv6_prot = {
.name = "UDPv6",
.owner = THIS_MODULE,
.close = udp_lib_close,
.connect = ip6_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = udpv6_destroy_sock,
.setsockopt = udpv6_setsockopt,
.getsockopt = udpv6_getsockopt,
.sendmsg = udpv6_sendmsg,
.recvmsg = udpv6_recvmsg,
.backlog_rcv = __udpv6_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.rehash = udp_v6_rehash,
.get_port = udp_v6_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
.sysctl_wmem = &sysctl_udp_wmem_min,
.sysctl_rmem = &sysctl_udp_rmem_min,
.obj_size = sizeof(struct udp6_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.h.udp_table = &udp_table,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
.clear_sk = udp_v6_clear_sk,
};
static struct inet_protosw udpv6_protosw = {
.type = SOCK_DGRAM,
.protocol = IPPROTO_UDP,
.prot = &udpv6_prot,
.ops = &inet6_dgram_ops,
.flags = INET_PROTOSW_PERMANENT,
};
int __init udpv6_init(void)
{
int ret;
ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
if (ret)
goto out;
ret = inet6_register_protosw(&udpv6_protosw);
if (ret)
goto out_udpv6_protocol;
out:
return ret;
out_udpv6_protocol:
inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
goto out;
}
void udpv6_exit(void)
{
inet6_unregister_protosw(&udpv6_protosw);
inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_1677_1 |
crossvul-cpp_data_good_3432_4 | /*
* runtime.c: Runtime functions
*
* Authors:
* Jonathan Pryor
*
* Copyright 2010 Novell, Inc (http://www.novell.com)
*/
#include <config.h>
#include <glib.h>
#include <mono/metadata/appdomain.h>
#include <mono/metadata/class.h>
#include <mono/metadata/class-internals.h>
#include <mono/metadata/runtime.h>
static void
fire_process_exit_event (MonoDomain *domain, gpointer user_data)
{
MonoClassField *field;
gpointer pa [2];
MonoObject *delegate, *exc;
field = mono_class_get_field_from_name (mono_defaults.appdomain_class, "ProcessExit");
g_assert (field);
delegate = *(MonoObject **)(((char *)domain->domain) + field->offset);
if (delegate == NULL)
return;
pa [0] = domain;
pa [1] = NULL;
mono_runtime_delegate_invoke (delegate, pa, &exc);
}
void
mono_runtime_shutdown (void)
{
mono_domain_foreach (fire_process_exit_event, NULL);
/*From this point on, no more DM methods can be created. */
mono_reflection_shutdown ();
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3432_4 |
crossvul-cpp_data_good_5672_0 | #include <linux/err.h>
#include <linux/igmp.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/rculist.h>
#include <linux/skbuff.h>
#include <linux/if_ether.h>
#include <net/ip.h>
#include <net/netlink.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
#endif
#include "br_private.h"
static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_port *p;
struct nlattr *nest;
if (!br->multicast_router || hlist_empty(&br->router_list))
return 0;
nest = nla_nest_start(skb, MDBA_ROUTER);
if (nest == NULL)
return -EMSGSIZE;
hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
if (p && nla_put_u32(skb, MDBA_ROUTER_PORT, p->dev->ifindex))
goto fail;
}
nla_nest_end(skb, nest);
return 0;
fail:
nla_nest_cancel(skb, nest);
return -EMSGSIZE;
}
static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
struct net_bridge_mdb_htable *mdb;
struct nlattr *nest, *nest2;
int i, err = 0;
int idx = 0, s_idx = cb->args[1];
if (br->multicast_disabled)
return 0;
mdb = rcu_dereference(br->mdb);
if (!mdb)
return 0;
nest = nla_nest_start(skb, MDBA_MDB);
if (nest == NULL)
return -EMSGSIZE;
for (i = 0; i < mdb->max; i++) {
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p, **pp;
struct net_bridge_port *port;
hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
if (idx < s_idx)
goto skip;
nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
if (nest2 == NULL) {
err = -EMSGSIZE;
goto out;
}
for (pp = &mp->ports;
(p = rcu_dereference(*pp)) != NULL;
pp = &p->next) {
port = p->port;
if (port) {
struct br_mdb_entry e;
memset(&e, 0, sizeof(e));
e.ifindex = port->dev->ifindex;
e.state = p->state;
if (p->addr.proto == htons(ETH_P_IP))
e.addr.u.ip4 = p->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
if (p->addr.proto == htons(ETH_P_IPV6))
e.addr.u.ip6 = p->addr.u.ip6;
#endif
e.addr.proto = p->addr.proto;
if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(e), &e)) {
nla_nest_cancel(skb, nest2);
err = -EMSGSIZE;
goto out;
}
}
}
nla_nest_end(skb, nest2);
skip:
idx++;
}
}
out:
cb->args[1] = idx;
nla_nest_end(skb, nest);
return err;
}
static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net_device *dev;
struct net *net = sock_net(skb->sk);
struct nlmsghdr *nlh = NULL;
int idx = 0, s_idx;
s_idx = cb->args[0];
rcu_read_lock();
/* In theory this could be wrapped to 0... */
cb->seq = net->dev_base_seq + br_mdb_rehash_seq;
for_each_netdev_rcu(net, dev) {
if (dev->priv_flags & IFF_EBRIDGE) {
struct br_port_msg *bpm;
if (idx < s_idx)
goto skip;
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, RTM_GETMDB,
sizeof(*bpm), NLM_F_MULTI);
if (nlh == NULL)
break;
bpm = nlmsg_data(nlh);
memset(bpm, 0, sizeof(*bpm));
bpm->ifindex = dev->ifindex;
if (br_mdb_fill_info(skb, cb, dev) < 0)
goto out;
if (br_rports_fill_info(skb, cb, dev) < 0)
goto out;
cb->args[1] = 0;
nlmsg_end(skb, nlh);
skip:
idx++;
}
}
out:
if (nlh)
nlmsg_end(skb, nlh);
rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
struct net_device *dev,
struct br_mdb_entry *entry, u32 pid,
u32 seq, int type, unsigned int flags)
{
struct nlmsghdr *nlh;
struct br_port_msg *bpm;
struct nlattr *nest, *nest2;
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
if (!nlh)
return -EMSGSIZE;
bpm = nlmsg_data(nlh);
memset(bpm, 0, sizeof(*bpm));
bpm->family = AF_BRIDGE;
bpm->ifindex = dev->ifindex;
nest = nla_nest_start(skb, MDBA_MDB);
if (nest == NULL)
goto cancel;
nest2 = nla_nest_start(skb, MDBA_MDB_ENTRY);
if (nest2 == NULL)
goto end;
if (nla_put(skb, MDBA_MDB_ENTRY_INFO, sizeof(*entry), entry))
goto end;
nla_nest_end(skb, nest2);
nla_nest_end(skb, nest);
return nlmsg_end(skb, nlh);
end:
nla_nest_end(skb, nest);
cancel:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static inline size_t rtnl_mdb_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct br_port_msg))
+ nla_total_size(sizeof(struct br_mdb_entry));
}
static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
int type)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
int err = -ENOBUFS;
skb = nlmsg_new(rtnl_mdb_nlmsg_size(), GFP_ATOMIC);
if (!skb)
goto errout;
err = nlmsg_populate_mdb_fill(skb, dev, entry, 0, 0, type, NTF_SELF);
if (err < 0) {
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
return;
errout:
rtnl_set_sk_err(net, RTNLGRP_MDB, err);
}
void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
struct br_ip *group, int type)
{
struct br_mdb_entry entry;
memset(&entry, 0, sizeof(entry));
entry.ifindex = port->dev->ifindex;
entry.addr.proto = group->proto;
entry.addr.u.ip4 = group->u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
entry.addr.u.ip6 = group->u.ip6;
#endif
__br_mdb_notify(dev, &entry, type);
}
static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
{
if (entry->ifindex == 0)
return false;
if (entry->addr.proto == htons(ETH_P_IP)) {
if (!ipv4_is_multicast(entry->addr.u.ip4))
return false;
if (ipv4_is_local_multicast(entry->addr.u.ip4))
return false;
#if IS_ENABLED(CONFIG_IPV6)
} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
if (!ipv6_is_transient_multicast(&entry->addr.u.ip6))
return false;
#endif
} else
return false;
if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY)
return false;
return true;
}
static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
struct net_device **pdev, struct br_mdb_entry **pentry)
{
struct net *net = sock_net(skb->sk);
struct br_mdb_entry *entry;
struct br_port_msg *bpm;
struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
struct net_device *dev;
int err;
err = nlmsg_parse(nlh, sizeof(*bpm), tb, MDBA_SET_ENTRY, NULL);
if (err < 0)
return err;
bpm = nlmsg_data(nlh);
if (bpm->ifindex == 0) {
pr_info("PF_BRIDGE: br_mdb_parse() with invalid ifindex\n");
return -EINVAL;
}
dev = __dev_get_by_index(net, bpm->ifindex);
if (dev == NULL) {
pr_info("PF_BRIDGE: br_mdb_parse() with unknown ifindex\n");
return -ENODEV;
}
if (!(dev->priv_flags & IFF_EBRIDGE)) {
pr_info("PF_BRIDGE: br_mdb_parse() with non-bridge\n");
return -EOPNOTSUPP;
}
*pdev = dev;
if (!tb[MDBA_SET_ENTRY] ||
nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
pr_info("PF_BRIDGE: br_mdb_parse() with invalid attr\n");
return -EINVAL;
}
entry = nla_data(tb[MDBA_SET_ENTRY]);
if (!is_valid_mdb_entry(entry)) {
pr_info("PF_BRIDGE: br_mdb_parse() with invalid entry\n");
return -EINVAL;
}
*pentry = entry;
return 0;
}
static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
struct br_ip *group, unsigned char state)
{
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct net_bridge_mdb_htable *mdb;
int err;
mdb = mlock_dereference(br->mdb, br);
mp = br_mdb_ip_get(mdb, group);
if (!mp) {
mp = br_multicast_new_group(br, port, group);
err = PTR_ERR(mp);
if (IS_ERR(mp))
return err;
}
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (p->port == port)
return -EEXIST;
if ((unsigned long)p->port < (unsigned long)port)
break;
}
p = br_multicast_new_port_group(port, group, *pp, state);
if (unlikely(!p))
return -ENOMEM;
rcu_assign_pointer(*pp, p);
br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
return 0;
}
static int __br_mdb_add(struct net *net, struct net_bridge *br,
struct br_mdb_entry *entry)
{
struct br_ip ip;
struct net_device *dev;
struct net_bridge_port *p;
int ret;
if (!netif_running(br->dev) || br->multicast_disabled)
return -EINVAL;
dev = __dev_get_by_index(net, entry->ifindex);
if (!dev)
return -ENODEV;
p = br_port_get_rtnl(dev);
if (!p || p->br != br || p->state == BR_STATE_DISABLED)
return -EINVAL;
ip.proto = entry->addr.proto;
if (ip.proto == htons(ETH_P_IP))
ip.u.ip4 = entry->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
else
ip.u.ip6 = entry->addr.u.ip6;
#endif
spin_lock_bh(&br->multicast_lock);
ret = br_mdb_add_group(br, p, &ip, entry->state);
spin_unlock_bh(&br->multicast_lock);
return ret;
}
static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct br_mdb_entry *entry;
struct net_device *dev;
struct net_bridge *br;
int err;
err = br_mdb_parse(skb, nlh, &dev, &entry);
if (err < 0)
return err;
br = netdev_priv(dev);
err = __br_mdb_add(net, br, entry);
if (!err)
__br_mdb_notify(dev, entry, RTM_NEWMDB);
return err;
}
static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
struct net_bridge_port_group __rcu **pp;
struct br_ip ip;
int err = -EINVAL;
if (!netif_running(br->dev) || br->multicast_disabled)
return -EINVAL;
if (timer_pending(&br->multicast_querier_timer))
return -EBUSY;
ip.proto = entry->addr.proto;
if (ip.proto == htons(ETH_P_IP))
ip.u.ip4 = entry->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
else
ip.u.ip6 = entry->addr.u.ip6;
#endif
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
mp = br_mdb_ip_get(mdb, &ip);
if (!mp)
goto unlock;
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
if (!p->port || p->port->dev->ifindex != entry->ifindex)
continue;
if (p->port->state == BR_STATE_DISABLED)
goto unlock;
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
err = 0;
if (!mp->ports && !mp->mglist &&
netif_running(br->dev))
mod_timer(&mp->timer, jiffies);
break;
}
unlock:
spin_unlock_bh(&br->multicast_lock);
return err;
}
static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net_device *dev;
struct br_mdb_entry *entry;
struct net_bridge *br;
int err;
err = br_mdb_parse(skb, nlh, &dev, &entry);
if (err < 0)
return err;
br = netdev_priv(dev);
err = __br_mdb_del(br, entry);
if (!err)
__br_mdb_notify(dev, entry, RTM_DELMDB);
return err;
}
void br_mdb_init(void)
{
rtnl_register(PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, NULL);
rtnl_register(PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, NULL);
rtnl_register(PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, NULL);
}
void br_mdb_uninit(void)
{
rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_5672_0 |
crossvul-cpp_data_good_5116_0 | /* packet-wbxml.c
*
* Routines for WAP Binary XML dissection
* Copyright 2003, 2004, Olivier Biot.
*
* Routines for WV-CSP 1.3 dissection
* Copyright 2007, Andrei Rubaniuk.
*
* Refer to the AUTHORS file or the AUTHORS section in the man page
* for contacting the author(s) of this file.
*
* Wireshark - Network traffic analyzer
* By Gerald Combs <gerald@wireshark.org>
* Copyright 1998 Gerald Combs
*
* WAP Binary XML decoding functionality provided by Olivier Biot.
* WV-CSP 1.2 updated to Release version and WV-CSP 1.3 protocol
* decoding functionality provided by Andrei Rubaniuk.
*
* The WAP specifications used to be found at the WAP Forum:
* <http://www.wapforum.org/what/Technical.htm>
* But now the correct link is at the Open Mobile Alliance:
* <http://www.openmobilealliance.org/tech/affiliates/wap/wapindex.html>
* Media types defined by OMA affiliates will have their standards at:
* <http://www.openmobilealliance.org/tech/affiliates/index.html>
* <http://www.openmobilealliance.org/release_program/index.html>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* Edit this file with 4-space tabulation */
#include "config.h"
#include <string.h>
#include <glib.h>
#include <epan/packet.h>
#include <epan/exceptions.h>
#include <epan/prefs.h>
#include <epan/wmem/wmem.h>
/* We need the function tvb_get_guintvar() */
#include "packet-wap.h"
void proto_register_wbxml(void);
void proto_reg_handoff_wbxml(void);
/* General-purpose debug logger.
* Requires double parentheses because of variable arguments of printf().
*
* Enable debug logging for WBXML by defining AM_FLAGS
* so that it contains "-DDEBUG_wbxml"
*/
#ifdef DEBUG_wbxml
#define DebugLog(x) \
g_print("%s:%u: ", __FILE__, __LINE__); \
g_print x
#else
#define DebugLog(x) ;
#endif
/* The code in this source file dissects the WAP Binary XML content,
* and if possible renders it. WBXML mappings are defined in the
* "wbxml_decoding" structure.
*
* NOTES:
*
* - Some WBXML content is *not* backwards compatible across minor versions.
* This painful remark is true for:
* o WMLC 1.0 with respect to later WMLC 1.x
* o All WV-CSP versions (never backwards compatible)
* The only way of correctly rendering the WBXML is to let the end-user
* choose from the possible renderings. This only applies to the case when
* the WBXML DocType is not included in the WBXML header (unknown/missing).
*
* - Some WBXML content uses EXT_T_* in a non-tableref manner. This is the
* case with WV-CSP 1.1 and up, where the index points to a value_string
* containing WV-CSP specific token values. This is allowed as it is not
* explicitly forbidden in the WBXML specifications. Hence the global token
* map for content must also contain a function pointer if no tableref
* string is used.
*
* - Code page switches apply until a new code page switch. In the WBXML/1.x
* ABNF notation, it can be proven that the switch_page can only precede
* the following tokens:
* o stag : TAG | LITERAL | LITERAL_A | LITERAL_C | LITERAL_AC
* o attr : ATTRSTART | ATTRVALUE
* o extension : EXT_I | EXT_T | EXT
* Code page switches are displayed in a separate column.
*
* - The WBXML spec states that code pages are static to both the tag and the
* attribute state parser. A SWITCH_PAGE within a state switches the code
* page of the active state only. Note that code page 255 is reserved for
* application-specific (read: testing) purposes.
*
* - In order to render the XML content, recursion is inevitable at some
* point (when a tag with content occurs in the content of a tag with
* content). The code will however not recurse if this is not strictly
* required (e.g., tag without content in the content of a tag with
* content).
*
* - I found it useful to display the XML nesting level as a first "column",
* followed by the abbreviated WBXML token interpretation. When a mapping
* is defined for the parsed WBXML content, then the XML rendering is
* displayed with appropriate indentation (maximum nesting level = 255,
* after which the nesting and level will safely roll-over to 0).
*
* - The WAP Forum defines the order of precedence for finding out the
* WBXML content type (same rules for charset) as follows:
* 1. Look in the Content-Type WSP header
* 2. Look in the WBXML header
* Currently there is no means of using content type parameters:
* o Type=<some_type>
* o Charset=<charset_of_the_content>
* So it is possible some WBXML content types are incorrectly parsed.
* This would only be the case when the content type declaration in the
* WSP Content-Type header would be different (or would have parameters
* which are relevant to the WBXML decoding) from the content type
* identifier specified in the WBXML header. This has to do with the
* decoding of terminated text strings in the different character codings.
* TODO: investigate this and provide correct decoding at all times.
*/
typedef struct _value_valuestring {
guint32 value;
const value_string *valstrptr;
} value_valuestring;
/* Tries to match val against each element in the value_value_string array vvs.
* Returns the associated value_string ptr on a match, or NULL on failure. */
static const value_string *
val_to_valstr(guint32 val, const value_valuestring *vvs)
{
gint i = 0;
while (vvs[i].valstrptr) {
if (vvs[i].value == val)
return(vvs[i].valstrptr);
i++;
}
return(NULL);
}
/* Note on Token mapping
* ---------------------
*
* The WBXML dissector will try mapping the token decoding to their textual
* representation if the media type has a defined token representation. The
* following logic applies:
*
* a. Inspect the WBXML PublicID
* This means that I need a list { PublicID, decoding }
*
* b. Inspect the literal media type
* This requires a list { "media/type", discriminator, { decodings } }
*
* b.1. Use a discriminator to choose an appropriate token mapping;
* The disciminator needs a small number of bytes from the data tvbuff_t.
*
* else
* b.2. Provide a list to the end-user with all possible token mappings.
*
* c. If none match then only show the tokens without mapping.
*
*/
/* ext_t_func_ptr is a pointer to a function handling the EXT_T_i tokens:
*
* char * ext_t_function(tvbuff_t *tvb, guint32 value, guint32 strtbl);
*/
typedef char * (* ext_t_func_ptr)(tvbuff_t *, guint32, guint32);
/* Note on parsing of OPAQUE data
* ------------------------------
*
* The WBXML encapsulation allows the insertion of opaque binary data in the
* WBXML body. Although this opaque data has no meaning in WBXML, the media
* type itself may define compact encoding of given input by encoding it in
* such a OPAQUE blob of bytes.
*
* The WBXML dissector now supports dissection of OPAQUE data by means of a
* mapping function that will operate based on the token (well-known or literal)
* and the active code page.
*
* For well-known tokens the simplest approach is to use a switch for the code
* pages and another switch for the relevant tokens within a code page.
*
* For literal tokens (tags and attribute names), the only approach is a string
* comparison with the literal representation of the given tag or attribute
* name.
*
* opaque_token_func_ptr is a pointer to a function handling OPAQUE values
* for binary tokens representing tags or attribute starts.
* opaque_literal_func_ptr is a pointer to a function handling OPAQUE values
* for literal tokens representing tags or attribute starts.
*
* The length field of the OPAQUE entry starts at offset (not offset + 1).
*
* The length of the processed OPAQUE value is returned by reference.
*
* char * opaque_token_function(tvbuff_t *tvb, guint32 offset,
* guint8 token, guint8 codepage, guint32 *length);
* char * opaque_literal_function(tvbuff_t *tvb, guint32 offset,
* const char *token, guint8 codepage, guint32 *length);
*/
typedef char * (* opaque_token_func_ptr)(tvbuff_t *, guint32, guint8, guint8, guint32 *);
typedef char * (* opaque_literal_func_ptr)(tvbuff_t *, guint32, const char *, guint8, guint32 *);
static char *
default_opaque_binary_tag(tvbuff_t *tvb, guint32 offset,
guint8 token _U_, guint8 codepage _U_, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of opaque data)", data_len);
*length += data_len;
return str;
}
static char *
default_opaque_literal_tag(tvbuff_t *tvb, guint32 offset,
const char *token _U_, guint8 codepage _U_, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of opaque data)", data_len);
*length += data_len;
return str;
}
static char *
default_opaque_binary_attr(tvbuff_t *tvb, guint32 offset,
guint8 token _U_, guint8 codepage _U_, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of opaque data)", data_len);
*length += data_len;
return str;
}
static char *
default_opaque_literal_attr(tvbuff_t *tvb, guint32 offset,
const char *token _U_, guint8 codepage _U_, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of opaque data)", data_len);
*length += data_len;
return str;
}
/* Render a hex %dateTime encoded timestamp as a string.
* 0x20011231123456 becomes "2001-12-31T12:34:56Z" */
static char *
date_time_from_opaque(tvbuff_t *tvb, guint32 offset, guint32 data_len)
{
char *str;
switch (data_len) {
case 4: /* YYYY-MM-DD[T00:00:00Z] */
str = wmem_strdup_printf(wmem_packet_scope(), "%%DateTime: "
"%02x%02x-%02x-%02xT00:00:00Z",
tvb_get_guint8(tvb, offset),
tvb_get_guint8(tvb, offset + 1),
tvb_get_guint8(tvb, offset + 2),
tvb_get_guint8(tvb, offset + 3));
break;
case 5: /* YYYY-MM-DDThh[:00:00Z] */
str = wmem_strdup_printf(wmem_packet_scope(), "%%DateTime: "
"%02x%02x-%02x-%02xT%02x:00:00Z",
tvb_get_guint8(tvb, offset),
tvb_get_guint8(tvb, offset + 1),
tvb_get_guint8(tvb, offset + 2),
tvb_get_guint8(tvb, offset + 3),
tvb_get_guint8(tvb, offset + 4));
break;
case 6: /* YYYY-MM-DDThh:mm[:00Z] */
str = wmem_strdup_printf(wmem_packet_scope(), "%%DateTime: "
"%02x%02x-%02x-%02xT%02x:%02x:00Z",
tvb_get_guint8(tvb, offset),
tvb_get_guint8(tvb, offset + 1),
tvb_get_guint8(tvb, offset + 2),
tvb_get_guint8(tvb, offset + 3),
tvb_get_guint8(tvb, offset + 4),
tvb_get_guint8(tvb, offset + 5));
break;
case 7: /* YYYY-MM-DDThh:mm[:00Z] */
str = wmem_strdup_printf(wmem_packet_scope(), "%%DateTime: "
"%02x%02x-%02x-%02xT%02x:%02x:%02xZ",
tvb_get_guint8(tvb, offset),
tvb_get_guint8(tvb, offset + 1),
tvb_get_guint8(tvb, offset + 2),
tvb_get_guint8(tvb, offset + 3),
tvb_get_guint8(tvb, offset + 4),
tvb_get_guint8(tvb, offset + 5),
tvb_get_guint8(tvb, offset + 6));
break;
default:
str = wmem_strdup_printf(wmem_packet_scope(), "<Error: invalid binary %%DateTime "
"(%d bytes of opaque data)>", data_len);
break;
}
return str;
}
/* Is ALWAYS 6 bytes long:
* 00YY YYYY YYYY YYMM MMDD DDDh hhhh mmmm mmss ssss ZZZZ ZZZZ */
static char *
wv_datetime_from_opaque(tvbuff_t *tvb, guint32 offset, guint32 data_len)
{
char *str;
guint16 year;
guint8 month, day, hour, minute, second, time_zone;
guint8 peek;
if (data_len == 6) { /* Valid */
/* Octet 1: 00YY YYYY */
year = tvb_get_guint8(tvb, offset) & 0x3F; /* ..11 1111 */
year <<=6;
/* Octet 2: YYYY YYMM */
peek = tvb_get_guint8(tvb, offset + 1);
year += (peek >> 2); /* 1111 11.. */
month = (peek & 0x03) << 2; /* .... ..11 */
/* Octet 3: MMDD DDDh */
peek = tvb_get_guint8(tvb, offset + 2);
month += (peek >> 6); /* 11.. .... */
day = (peek & 0x3E) >> 1; /* ..11 111. */
hour = (peek & 0x01) << 4; /* .... ...1 */
/* Octet 4: hhhh mmmm */
peek = tvb_get_guint8(tvb, offset + 3);
hour += (peek >> 4);
minute = (peek & 0x0F) << 2; /* .... 1111 */
/* Octet 5: mmss ssss */
peek = tvb_get_guint8(tvb, offset + 4);
minute += (peek >> 6); /* 11.. .... */
second = peek & 0x3F; /* ..11 1111 */
/* octet 6: ZZZZZZZZ */
time_zone = tvb_get_guint8(tvb, offset + 5);
/* Now construct the string */
str = wmem_strdup_printf(wmem_packet_scope(), "WV-CSP DateTime: "
"%04d-%02d-%02dT%02d:%02d:%02d%c",
year, month, day, hour, minute, second, time_zone);
} else { /* Invalid length for a WV-CSP DateTime tag value */
str = wmem_strdup_printf(wmem_packet_scope(), "<Error: invalid binary WV-CSP DateTime value "
"(%d bytes of opaque data)>", data_len);
}
return str;
}
/* WV-CSP integer values for tag content is encoded in a fashion similar
* to a Long-Integer in WSP */
static char *
wv_integer_from_opaque(tvbuff_t *tvb, guint32 offset, guint32 data_len)
{
char *str;
switch (data_len) {
case 1:
str = wmem_strdup_printf(wmem_packet_scope(), "WV-CSP Integer: %d",
tvb_get_guint8(tvb, offset));
break;
case 2:
str = wmem_strdup_printf(wmem_packet_scope(), "WV-CSP Integer: %d",
tvb_get_ntohs(tvb, offset));
break;
case 3:
str = wmem_strdup_printf(wmem_packet_scope(), "WV-CSP Integer: %d",
tvb_get_ntoh24(tvb, offset));
break;
case 4:
str = wmem_strdup_printf(wmem_packet_scope(), "WV-CSP Integer: %d",
tvb_get_ntohl(tvb, offset));
break;
default:
str = wmem_strdup_printf(wmem_packet_scope(), "<Error: invalid binary WV-CSP Integer value "
"(%d bytes of opaque data)>", data_len);
break;
}
return str;
}
static char *
wv_csp10_opaque_binary_tag(tvbuff_t *tvb, guint32 offset,
guint8 token, guint8 codepage, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = NULL;
switch (codepage) {
case 0: /* Common code page */
switch (token) {
case 0x0B: /* <Code> */
case 0x0F: /* <ContentSize> */
case 0x1A: /* <MessageCount> */
case 0x3C: /* <Validity> */
str = wv_integer_from_opaque(tvb,
offset + *length, data_len);
break;
case 0x11: /* <DateTime> */
str = wv_datetime_from_opaque(tvb,
offset + *length, data_len);
break;
default:
break;
}
break;
case 1: /* Access code page */
switch (token) {
case 0x1C: /* <KeepAliveTime> */
case 0x32: /* <TimeToLive> */
str = wv_integer_from_opaque(tvb,
offset + *length, data_len);
break;
default:
break;
}
break;
case 3: /* Client capability code page */
switch (token) {
case 0x06: /* <AcceptedContentLength> */
case 0x0C: /* <MultiTrans> */
case 0x0D: /* <ParserSize> */
case 0x0E: /* <ServerPollMin> */
case 0x11: /* <TCPAddress> */
case 0x12: /* <TCPPort> */
case 0x13: /* <UDPPort> */
str = wv_integer_from_opaque(tvb,
offset + *length, data_len);
break;
default:
break;
}
break;
default:
break;
}
if (str == NULL) { /* Error, or not parsed */
str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of unparsed opaque data)", data_len);
}
*length += data_len;
return str;
}
static char *
wv_csp10_opaque_literal_tag(tvbuff_t *tvb, guint32 offset,
const char *token, guint8 codepage _U_, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = NULL;
if ( token && ( (strcmp(token, "Code") == 0)
|| (strcmp(token, "ContentSize") == 0)
|| (strcmp(token, "MessageCount") == 0)
|| (strcmp(token, "Validity") == 0)
|| (strcmp(token, "KeepAliveTime") == 0)
|| (strcmp(token, "TimeToLive") == 0)
|| (strcmp(token, "AcceptedContentLength") == 0)
|| (strcmp(token, "MultiTrans") == 0)
|| (strcmp(token, "ParserSize") == 0)
|| (strcmp(token, "ServerPollMin") == 0)
|| (strcmp(token, "TCPAddress") == 0)
|| (strcmp(token, "TCPPort") == 0)
|| (strcmp(token, "UDPPort") == 0) ) )
{
str = wv_integer_from_opaque(tvb, offset + *length, data_len);
}
else if ( token && ( strcmp(token, "DateTime") == 0) )
{
str = wv_datetime_from_opaque(tvb, offset + *length, data_len);
}
if (str == NULL) { /* Error, or not parsed */
str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of unparsed opaque data)", data_len);
}
*length += data_len;
return str;
}
static char *
wv_csp11_opaque_binary_tag(tvbuff_t *tvb, guint32 offset,
guint8 token, guint8 codepage, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = NULL;
switch (codepage) {
case 0: /* Common code page */
switch (token) {
case 0x0B: /* <Code> */
case 0x0F: /* <ContentSize> */
case 0x1A: /* <MessageCount> */
case 0x3C: /* <Validity> */
str = wv_integer_from_opaque(tvb,
offset + *length, data_len);
break;
case 0x11: /* <DateTime> */
str = wv_datetime_from_opaque(tvb,
offset + *length, data_len);
break;
default:
break;
}
break;
case 1: /* Access code page */
switch (token) {
case 0x1C: /* <KeepAliveTime> */
case 0x32: /* <TimeToLive> */
str = wv_integer_from_opaque(tvb,
offset + *length, data_len);
break;
default:
break;
}
break;
case 3: /* Client capability code page */
switch (token) {
case 0x06: /* <AcceptedContentLength> */
case 0x0C: /* <MultiTrans> */
case 0x0D: /* <ParserSize> */
case 0x0E: /* <ServerPollMin> */
case 0x12: /* <TCPPort> */
case 0x13: /* <UDPPort> */
str = wv_integer_from_opaque(tvb,
offset + *length, data_len);
break;
default:
break;
}
break;
case 6: /* Messaging code page */
switch (token) {
case 0x1A: /* <DeliveryTime> - not in 1.0 */
str = wv_datetime_from_opaque(tvb,
offset + *length, data_len);
break;
default:
break;
}
break;
default:
break;
}
if (str == NULL) { /* Error, or not parsed */
str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of unparsed opaque data)", data_len);
}
*length += data_len;
return str;
}
static char *
wv_csp11_opaque_literal_tag(tvbuff_t *tvb, guint32 offset,
const char *token, guint8 codepage _U_, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = NULL;
if ( token && ( (strcmp(token, "Code") == 0)
|| (strcmp(token, "ContentSize") == 0)
|| (strcmp(token, "MessageCount") == 0)
|| (strcmp(token, "Validity") == 0)
|| (strcmp(token, "KeepAliveTime") == 0)
|| (strcmp(token, "TimeToLive") == 0)
|| (strcmp(token, "AcceptedContentLength") == 0)
|| (strcmp(token, "MultiTrans") == 0)
|| (strcmp(token, "ParserSize") == 0)
|| (strcmp(token, "ServerPollMin") == 0)
|| (strcmp(token, "TCPPort") == 0)
|| (strcmp(token, "UDPPort") == 0) ) )
{
str = wv_integer_from_opaque(tvb, offset + *length, data_len);
}
else
if ( token && ( (strcmp(token, "DateTime") == 0)
|| (strcmp(token, "DeliveryTime") == 0) ) )
{
str = wv_datetime_from_opaque(tvb, offset + *length, data_len);
}
if (str == NULL) { /* Error, or not parsed */
str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of unparsed opaque data)", data_len);
}
*length += data_len;
return str;
}
static char *
wv_csp12_opaque_binary_tag(tvbuff_t *tvb, guint32 offset,
guint8 token, guint8 codepage, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = NULL;
switch (codepage) {
case 0: /* Common code page */
switch (token) {
case 0x0B: /* <Code> */
case 0x0F: /* <ContentSize> */
case 0x1A: /* <MessageCount> */
case 0x3C: /* <Validity> */
str = wv_integer_from_opaque(tvb,
offset + *length, data_len);
break;
case 0x11: /* <DateTime> */
str = wv_datetime_from_opaque(tvb,
offset + *length, data_len);
break;
default:
break;
}
break;
case 1: /* Access code page */
switch (token) {
case 0x1C: /* <KeepAliveTime> */
case 0x32: /* <TimeToLive> */
str = wv_integer_from_opaque(tvb,
offset + *length, data_len);
break;
default:
break;
}
break;
case 3: /* Client capability code page */
switch (token) {
case 0x06: /* <AcceptedContentLength> */
case 0x0C: /* <MultiTrans> */
case 0x0D: /* <ParserSize> */
case 0x0E: /* <ServerPollMin> */
case 0x12: /* <TCPPort> */
case 0x13: /* <UDPPort> */
str = wv_integer_from_opaque(tvb,
offset + *length, data_len);
break;
default:
break;
}
break;
case 6: /* Messaging code page */
switch (token) {
case 0x1A: /* <DeliveryTime> - not in 1.0 */
str = wv_datetime_from_opaque(tvb,
offset + *length, data_len);
break;
default:
break;
}
break;
case 9: /* Common code page (continued) */
switch (token) {
case 0x08: /* <HistoryPeriod> - 1.2 only */
case 0x0A: /* <MaxWatcherList> - 1.2 only */
str = wv_integer_from_opaque(tvb,
offset + *length, data_len);
break;
default:
break;
}
break;
default:
break;
}
if (str == NULL) { /* Error, or not parsed */
str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of unparsed opaque data)", data_len);
}
*length += data_len;
return str;
}
static char *
wv_csp12_opaque_literal_tag(tvbuff_t *tvb, guint32 offset,
const char *token, guint8 codepage _U_, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = NULL;
if ( token && ( (strcmp(token, "Code") == 0)
|| (strcmp(token, "ContentSize") == 0)
|| (strcmp(token, "MessageCount") == 0)
|| (strcmp(token, "Validity") == 0)
|| (strcmp(token, "KeepAliveTime") == 0)
|| (strcmp(token, "TimeToLive") == 0)
|| (strcmp(token, "AcceptedContentLength") == 0)
|| (strcmp(token, "MultiTrans") == 0)
|| (strcmp(token, "ParserSize") == 0)
|| (strcmp(token, "ServerPollMin") == 0)
|| (strcmp(token, "TCPPort") == 0)
|| (strcmp(token, "UDPPort") == 0)
|| (strcmp(token, "HistoryPeriod") == 0)
|| (strcmp(token, "MaxWatcherList") == 0) ) )
{
str = wv_integer_from_opaque(tvb, offset + *length, data_len);
}
else
if ( token && ( (strcmp(token, "DateTime") == 0)
|| (strcmp(token, "DeliveryTime") == 0) ) )
{
str = wv_datetime_from_opaque(tvb, offset + *length, data_len);
}
if (str == NULL) { /* Error, or not parsed */
str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of unparsed opaque data)", data_len);
}
*length += data_len;
return str;
}
static char *
wv_csp13_opaque_binary_tag(tvbuff_t *tvb, guint32 offset,
guint8 token, guint8 codepage, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = NULL;
switch (codepage)
{
case 0: /* Common code page */
switch (token)
{
case 0x0B: /* <Code> */
case 0x0F: /* <ContentSize> */
case 0x1A: /* <MessageCount> */
case 0x3C: /* <Validity> */
str = wv_integer_from_opaque(tvb, offset + *length, data_len);
break;
case 0x11: /* <DateTime> */
str = wv_datetime_from_opaque(tvb, offset + *length, data_len);
break;
default:
break;
}
break;
case 1: /* Access code page */
switch (token)
{
case 0x1C: /* <KeepAliveTime> */
case 0x25: /* <SearchFindings> */
case 0x26: /* <SearchID> */
case 0x27: /* <SearchIndex> */
case 0x28: /* <SearchLimit> */
case 0x32: /* <TimeToLive> */
str = wv_integer_from_opaque(tvb, offset + *length, data_len);
break;
default:
break;
}
break;
case 3: /* Client capability code page */
switch (token)
{
case 0x06: /* <AcceptedContentLength> */
case 0x0C: /* <MultiTrans> */
case 0x0D: /* <ParserSize> */
case 0x0E: /* <ServerPollMin> */
case 0x12: /* <TCPPort> */
case 0x13: /* <UDPPort> */
/* New in WV-CSP 1.3*/
case 0x16: /* <AcceptedPullLength> */
case 0x17: /* <AcceptedPushLength> */
case 0x18: /* <AcceptedRichContentLength> */
case 0x19: /* <AcceptedTextContentLength> */
case 0x1B: /* <PlainTextCharset> MIBenum number - character set, i.e. UTF-8, windows-1251, etc. */
case 0x1C: /* <SessionPriority> */
case 0x1F: /* <UserSessionLimit> */
case 0x21: /* <MultiTransPerMessage> */
case 0x24: /* <ContentPolicyLimit> */
str = wv_integer_from_opaque(tvb, offset + *length, data_len);
break;
default:
break;
}
break;
case 5: /* Presence attribute code page */
switch (token)
{
/* New in WV-CSP 1.3*/
/* case 0x3B: */ /* <ClientContentLimit> */
case 0x3C: /* <ClientIMPriority> */
case 0x3D: /* <MaxPullLength> */
case 0x3E: /* <MaxPushLength> */
str = wv_integer_from_opaque(tvb, offset + *length, data_len);
break;
default:
break;
}
break;
case 6: /* Messaging code page */
switch (token)
{
case 0x1A: /* <DeliveryTime> - not in 1.0 */
/* New in WV-CSP 1.3*/
case 0x1C: /* <AnswerOptionID> */
str = wv_datetime_from_opaque(tvb, offset + *length, data_len);
break;
default:
break;
}
break;
case 9: /* Common code page (continued) */
switch (token)
{
case 0x08: /* <HistoryPeriod> - 1.2 only */
case 0x0A: /* <MaxWatcherList> - 1.2 only */
/* New in WV-CSP 1.3*/
case 0x25: /* <SegmentCount> */
case 0x28: /* <SegmentReference> */
case 0x30: /* <TryAgainTimeout> */
case 0x3A: /* <GroupContentLimit> */
case 0x3B: /* <MessageTotalCount> */
str = wv_integer_from_opaque(tvb, offset + *length, data_len);
break;
default:
break;
}
break;
case 10:
switch (token)
{
/* New in WV-CSP 1.3*/
case 0x0C: /* <PairID> */
str = wv_integer_from_opaque(tvb, offset + *length, data_len);
break;
default:
break;
}
break;
default:
break;
}
if (str == NULL)
{ /* Error, or not parsed */
str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of unparsed opaque data)", data_len);
}
*length += data_len;
return str;
}
static char *
wv_csp13_opaque_literal_tag(tvbuff_t *tvb, guint32 offset,
const char *token, guint8 codepage _U_, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = NULL;
if ( token && ( (strcmp(token, "Code") == 0)
|| (strcmp(token, "ContentSize") == 0)
|| (strcmp(token, "MessageCount") == 0)
|| (strcmp(token, "Validity") == 0)
|| (strcmp(token, "KeepAliveTime") == 0)
|| (strcmp(token, "TimeToLive") == 0)
|| (strcmp(token, "AcceptedContentLength") == 0)
|| (strcmp(token, "MultiTrans") == 0)
|| (strcmp(token, "ParserSize") == 0)
|| (strcmp(token, "ServerPollMin") == 0)
|| (strcmp(token, "TCPPort") == 0)
|| (strcmp(token, "UDPPort") == 0)
|| (strcmp(token, "HistoryPeriod") == 0)
|| (strcmp(token, "MaxWatcherList") == 0)
/* New in WV-CSP 1.3*/
|| (strcmp(token, "SearchFindings") == 0)
|| (strcmp(token, "SearchID") == 0)
|| (strcmp(token, "SearchIndex") == 0)
|| (strcmp(token, "SearchLimit") == 0)
|| (strcmp(token, "AcceptedPullLength") == 0)
|| (strcmp(token, "AcceptedPushLength") == 0)
|| (strcmp(token, "AcceptedRichContentLength") == 0)
|| (strcmp(token, "AcceptedTextContentLength") == 0)
|| (strcmp(token, "SessionPriority") == 0)
|| (strcmp(token, "UserSessionLimit") == 0)
|| (strcmp(token, "MultiTransPerMessage") == 0)
|| (strcmp(token, "ContentPolicyLimit") == 0)
|| (strcmp(token, "AnswerOptionID") == 0)
|| (strcmp(token, "SegmentCount") == 0)
|| (strcmp(token, "SegmentReference") == 0)
|| (strcmp(token, "TryAgainTimeout") == 0)
|| (strcmp(token, "GroupContentLimit") == 0)
|| (strcmp(token, "MessageTotalCount") == 0)
|| (strcmp(token, "PairID") == 0) ) )
{
str = wv_integer_from_opaque(tvb, offset + *length, data_len);
}
else
if ( token && ( (strcmp(token, "DateTime") == 0)
|| (strcmp(token, "DeliveryTime") == 0) ) )
{
str = wv_datetime_from_opaque(tvb, offset + *length, data_len);
}
if (str == NULL) { /* Error, or not parsed */
str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of unparsed opaque data)", data_len);
}
*length += data_len;
return str;
}
static char *
sic10_opaque_literal_attr(tvbuff_t *tvb, guint32 offset,
const char *token, guint8 codepage _U_, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = NULL;
if ( token && ( (strcmp(token, "created") == 0)
|| (strcmp(token, "si-expires") == 0) ) )
{
str = date_time_from_opaque(tvb, offset + *length, data_len);
}
if (str == NULL) { /* Error, or not parsed */
str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of unparsed opaque data)", data_len);
}
*length += data_len;
return str;
}
static char *
sic10_opaque_binary_attr(tvbuff_t *tvb, guint32 offset,
guint8 token, guint8 codepage, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = NULL;
switch (codepage) {
case 0: /* Only valid codepage for SI */
switch (token) {
case 0x0A: /* created= */
case 0x10: /* si-expires= */
str = date_time_from_opaque(tvb,
offset + *length, data_len);
break;
default:
break;
}
break;
default:
break;
}
if (str == NULL) { /* Error, or not parsed */
str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of unparsed opaque data)", data_len);
}
*length += data_len;
return str;
}
static char *
emnc10_opaque_literal_attr(tvbuff_t *tvb, guint32 offset,
const char *token, guint8 codepage _U_, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = NULL;
if ( token && (strcmp(token, "timestamp") == 0) )
{
str = date_time_from_opaque(tvb, offset + *length, data_len);
}
if (str == NULL) { /* Error, or not parsed */
str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of unparsed opaque data)", data_len);
}
*length += data_len;
return str;
}
static char *
emnc10_opaque_binary_attr(tvbuff_t *tvb, guint32 offset,
guint8 token, guint8 codepage, guint32 *length)
{
guint32 data_len = tvb_get_guintvar(tvb, offset, length);
char *str = NULL;
switch (codepage) {
case 0: /* Only valid codepage for EMN */
switch (token) {
case 0x05: /* timestamp= */
str = date_time_from_opaque(tvb,
offset + *length, data_len);
break;
default:
break;
}
break;
default:
break;
}
if (str == NULL) { /* Error, or not parsed */
str = wmem_strdup_printf(wmem_packet_scope(), "(%d bytes of unparsed opaque data)", data_len);
}
*length += data_len;
return str;
}
typedef struct _wbxml_decoding {
const char *name;
const char *abbrev;
ext_t_func_ptr ext_t[3];
opaque_token_func_ptr opaque_binary_tag;
opaque_literal_func_ptr opaque_literal_tag;
opaque_token_func_ptr opaque_binary_attr;
opaque_literal_func_ptr opaque_literal_attr;
const value_valuestring *global;
const value_valuestring *tags;
const value_valuestring *attrStart;
const value_valuestring *attrValue;
} wbxml_decoding;
/* Define a pointer to a discriminator function taking a tvb and the start
* offset of the WBXML tokens in the body as arguments.
*/
typedef const wbxml_decoding * (* discriminator_func_ptr)(tvbuff_t *, guint32);
/* For the decoding lists based on the known WBXML public ID */
typedef struct _wbxml_integer_list {
guint32 public_id;
const wbxml_decoding *map;
} wbxml_integer_list;
/* For the decoding lists on the literal content type */
typedef struct _wbxml_literal_list {
const char *content_type;
discriminator_func_ptr discriminator; /* TODO */
const wbxml_decoding *map;
} wbxml_literal_list;
/************************** Variable declarations **************************/
/* Initialize the protocol and registered fields */
static int proto_wbxml = -1;
static int hf_wbxml_version = -1;
static int hf_wbxml_public_id_known = -1;
static int hf_wbxml_public_id_literal = -1;
static int hf_wbxml_charset = -1;
/* Initialize the subtree pointers */
static gint ett_wbxml = -1;
static gint ett_wbxml_str_tbl = -1;
static gint ett_wbxml_content = -1;
/* WBXML Preferences */
static gboolean skip_wbxml_token_mapping = FALSE;
static gboolean disable_wbxml_token_parsing = FALSE;
/**************** WBXML related declarations and definitions ****************/
/* WBXML public ID mappings. For an up-to-date list, see
* http://www.openmobilealliance.org/tech/omna/ */
static const value_string vals_wbxml_public_ids[] = {
/* 0x00 = literal public identifier */
{ 0x01, "Unknown or missing Public Identifier" },
{ 0x02, "-//WAPFORUM//DTD WML 1.0//EN (WML 1.0)" },
{ 0x03, "-//WAPFORUM//DTD WTA 1.0//EN (WTA Event 1.0) - Deprecated" },
{ 0x04, "-//WAPFORUM//DTD WML 1.1//EN (WML 1.1)" },
{ 0x05, "-//WAPFORUM//DTD SI 1.0//EN (Service Indication 1.0)" },
{ 0x06, "-//WAPFORUM//DTD SL 1.0//EN (Service Loading 1.0)" },
{ 0x07, "-//WAPFORUM//DTD CO 1.0//EN (Cache Operation 1.0)" },
{ 0x08, "-//WAPFORUM//DTD CHANNEL 1.1//EN (Channel 1.1)" },
{ 0x09, "-//WAPFORUM//DTD WML 1.2//EN (WML 1.2)" },
{ 0x0a, "-//WAPFORUM//DTD WML 1.3//EN (WML 1.3)" },
{ 0x0b, "-//WAPFORUM//DTD PROV 1.0//EN (Provisioning 1.0)" },
{ 0x0c, "-//WAPFORUM//DTD WTA-WML 1.2//EN (WTA-WML 1.2)" },
{ 0x0d, "-//WAPFORUM//DTD EMN 1.0//EN (Email Notification 1.0)" },
{ 0x0e, "-//WAPFORUM//DTD DRMREL 1.0//EN (DRMREL 1.0)" },
{ 0x0f, "-//WIRELESSVILLAGE//DTD CSP 1.0//EN"
" (Wireless Village Client-Server Protocol DTD v1.0)" },
{ 0x10, "-//WIRELESSVILLAGE//DTD CSP 1.1//EN"
" (Wireless Village Client-Server Protocol DTD v1.1)" },
{ 0x11, "-//OMA//DTD WV-CSP 1.2//EN (OMA IMPS - CSP protocol DTD v1.2)" },
{ 0x12, "-//OMA//DTD IMPS-CSP 1.3//EN (OMA IMPS - CSP protocol DTD v1.3)" },
{ 0x13, "-//OMA//DRM 2.1//EN (OMA DRM 2.1)" },
/* 0x14 -- 0x7F: reserved */
/* Registered values - www.syncml.org */
{ 0x0fd1, "-//SYNCML//DTD SyncML 1.0//EN (SyncML 1.0)" },
{ 0x0fd3, "-//SYNCML//DTD SyncML 1.1//EN (SyncML 1.1)" },
/* Registered values - www.wapforum.org/wina/ */
{ 0x1100, "-//PHONE.COM//DTD ALERT 1.0//EN" },
{ 0x1101, "-//PHONE.COM//DTD CACHE-OPERATION 1.0//EN" },
{ 0x1102, "-//PHONE.COM//DTD SIGNAL 1.0//EN" },
{ 0x1103, "-//PHONE.COM//DTD LIST 1.0//EN" },
{ 0x1104, "-//PHONE.COM//DTD LISTCMD 1.0//EN" },
{ 0x1105, "-//PHONE.COM//DTD CHANNEL 1.0//EN" },
{ 0x1106, "-//PHONE.COM//DTD MMC 1.0//EN" },
{ 0x1107, "-//PHONE.COM//DTD BEARER-CHOICE 1.0//EN" },
{ 0x1108, "-//PHONE.COM//DTD WML 1.1//EN (WML+ 1.1)" },
{ 0x1109, "-//PHONE.COM//DTD CHANNEL 1.1//EN" },
{ 0x110a, "-//PHONE.COM//DTD LIST 1.1//EN" },
{ 0x110b, "-//PHONE.COM//DTD LISTCMD 1.1//EN" },
{ 0x110c, "-//PHONE.COM//DTD MMC 1.1//EN" },
{ 0x110d, "-//PHONE.COM//DTD WML 1.3//EN (WML+ 1.3)" },
{ 0x110e, "-//PHONE.COM//DTD MMC 2.0//EN" },
/* 0x110F -- 0x11FF: unassigned */
{ 0x1200, "-//3GPP2.COM//DTD IOTA 1.0//EN" },
{ 0x1201, "-//SYNCML//DTD SyncML 1.2//EN" },
{ 0x1202, "-//SYNCML//DTD MetaInf 1.2//EN" },
{ 0x1203, "-//SYNCML//DTD DevInf 1.2//EN" },
{ 0x1204, "-//NOKIA//DTD LANDMARKS 1.0//EN" },
{ 0x00, NULL }
};
static value_string_ext vals_wbxml_public_ids_ext = VALUE_STRING_EXT_INIT(vals_wbxml_public_ids);
static const value_string vals_wbxml_versions[] = {
{ 0x00, "1.0" }, /* WAP-104-WBXML */
{ 0x01, "1.1" }, /* WAP-135-WBXML */
{ 0x02, "1.2" }, /* WAP-154-WBXML */
{ 0x03, "1.3" }, /* WAP-192-WBXML */
{ 0x00, NULL }
};
static value_string_ext vals_wbxml_versions_ext = VALUE_STRING_EXT_INIT(vals_wbxml_versions);
/* WBXML 1.0 global tokens: WAP-104-WBXML
* Same token mapping as in vals_wbxml1x_global_tokens, but:
* { 0xC3, "RESERVED_2" }
*/
/* WBXML 1.x (x>0) global tokens: WAP-135-WBXML, WAP-154-WBXML, WAP-192-WBXML
*/
static const value_string vals_wbxml1x_global_tokens[] = {
{ 0x00, "SWITCH_PAGE" },
{ 0x01, "END" },
{ 0x02, "ENTITY" },
{ 0x03, "STR_I" },
{ 0x04, "LITERAL" },
{ 0x40, "EXT_I_0" },
{ 0x41, "EXT_I_1" },
{ 0x42, "EXT_I_2" },
{ 0x43, "PI" },
{ 0x44, "LITERAL_C" },
{ 0x80, "EXT_T_0" },
{ 0x81, "EXT_T_1" },
{ 0x82, "EXT_T_2" },
{ 0x83, "STR_T" },
{ 0x84, "LITERAL_A" },
{ 0xC0, "EXT_0" },
{ 0xC1, "EXT_1" },
{ 0xC2, "EXT_2" },
{ 0xC3, "OPAQUE" },
{ 0xC4, "LITERAL_AC" },
{ 0x00, NULL }
};
static value_string_ext vals_wbxml1x_global_tokens_ext = VALUE_STRING_EXT_INIT(vals_wbxml1x_global_tokens);
/********************** WBXML token mapping definition **********************/
/*
* NOTE: Please make sure the Attribute Start values all contain an equal sign
* even in cases where they do not contain the start of an Attribute
* Value.
*/
/* WML 1.0
*
* Wireless Markup Language
***************************************/
static char *
ext_t_0_wml_10(tvbuff_t *tvb, guint32 value, guint32 str_tbl)
{
char *str = wmem_strdup_printf(wmem_packet_scope(), "Variable substitution - escaped: '%s'",
tvb_get_const_stringz(tvb, str_tbl + value, NULL));
return str;
}
static char *
ext_t_1_wml_10(tvbuff_t *tvb, guint32 value, guint32 str_tbl)
{
char *str = wmem_strdup_printf(wmem_packet_scope(), "Variable substitution - unescaped: '%s'",
tvb_get_const_stringz(tvb, str_tbl + value, NULL));
return str;
}
static char *
ext_t_2_wml_10(tvbuff_t *tvb, guint32 value, guint32 str_tbl)
{
char *str = wmem_strdup_printf(wmem_packet_scope(), "Variable substitution - no transformation: '%s'",
tvb_get_const_stringz(tvb, str_tbl + value, NULL));
return str;
}
/***** Global extension tokens *****/
static const value_string wbxml_wmlc10_global_cp0[] = {
{ 0x40, "Variable substitution - escaped" },
{ 0x41, "Variable substitution - unescaped" },
{ 0x42, "Variable substitution - no transformation" },
{ 0x80, "Variable substitution - escaped" },
{ 0x81, "Variable substitution - unescaped" },
{ 0x82, "Variable substitution - no transformation" },
{ 0xC0, "Reserved" },
{ 0xC1, "Reserved" },
{ 0xC2, "Reserved" },
{ 0x00, NULL }
};
/***** Tag tokens *****/
static const value_string wbxml_wmlc10_tags_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
/* 0x05 -- 0x21 */
{ 0x22, "A" },
{ 0x23, "ACCESS" },
{ 0x24, "B" },
{ 0x25, "BIG" },
{ 0x26, "BR" },
{ 0x27, "CARD" },
{ 0x28, "DO" },
{ 0x29, "EM" },
{ 0x2A, "FIELDSET" },
{ 0x2B, "GO" },
{ 0x2C, "HEAD" },
{ 0x2D, "I" },
{ 0x2E, "IMG" },
{ 0x2F, "INPUT" },
{ 0x30, "META" },
{ 0x31, "NOOP" },
{ 0x32, "PREV" },
{ 0x33, "ONEVENT" },
{ 0x34, "OPTGROUP" },
{ 0x35, "OPTION" },
{ 0x36, "REFRESH" },
{ 0x37, "SELECT" },
{ 0x38, "SMALL" },
{ 0x39, "STRONG" },
{ 0x3A, "TAB" },
{ 0x3B, "TEMPLATE" },
{ 0x3C, "TIMER" },
{ 0x3D, "U" },
{ 0x3E, "VAR" },
{ 0x3F, "WML" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
static const value_string wbxml_wmlc10_attrStart_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "ACCEPT-CHARSET=" },
{ 0x06, "ALIGN='BOTTOM'" },
{ 0x07, "ALIGN='CENTER'" },
{ 0x08, "ALIGN='LEFT'" },
{ 0x09, "ALIGN='MIDDLE'" },
{ 0x0A, "ALIGN='RIGHT'" },
{ 0x0B, "ALIGN='TOP'" },
{ 0x0C, "ALT=" },
{ 0x0D, "CONTENT=" },
{ 0x0E, "DEFAULT=" },
{ 0x0F, "DOMAIN=" },
{ 0x10, "EMPTYOK='FALSE'" },
{ 0x11, "EMPTYOK='TRUE'" },
{ 0x12, "FORMAT=" },
{ 0x13, "HEIGHT=" },
{ 0x14, "HSPACE=" },
{ 0x15, "IDEFAULT=" },
{ 0x16, "IKEY=" },
{ 0x17, "KEY=" },
{ 0x18, "LABEL=" },
{ 0x19, "LOCALSRC=" },
{ 0x1A, "MAXLENGTH=" },
{ 0x1B, "METHOD='GET'" },
{ 0x1C, "METHOD='POST'" },
{ 0x1D, "MODE='NOWRAP'" },
{ 0x1E, "MODE='WRAP'" },
{ 0x1F, "MULTIPLE='FALSE'" },
{ 0x20, "MULTIPLE='TRUE'" },
{ 0x21, "NAME=" },
{ 0x22, "NEWCONTEXT='FALSE'" },
{ 0x23, "NEWCONTEXT='TRUE'" },
{ 0x24, "ONCLICK=" },
{ 0x25, "ONENTERBACKWARD=" },
{ 0x26, "ONENTERFORWARD=" },
{ 0x27, "ONTIMER=" },
{ 0x28, "OPTIONAL='FALSE'" },
{ 0x29, "OPTIONAL='TRUE'" },
{ 0x2A, "PATH=" },
{ 0x2B, "POSTDATA=" },
{ 0x2C, "PUBLIC='FALSE'" },
{ 0x2D, "PUBLIC='TRUE'" },
{ 0x2E, "SCHEME=" },
{ 0x2F, "SENDREFERER='FALSE'" },
{ 0x30, "SENDREFERER='TRUE'" },
{ 0x31, "SIZE=" },
{ 0x32, "SRC=" },
{ 0x33, "STYLE='LIST'" },
{ 0x34, "STYLE='SET'" },
{ 0x35, "TABINDEX=" },
{ 0x36, "TITLE=" },
{ 0x37, "TYPE=" },
{ 0x38, "TYPE='ACCEPT'" },
{ 0x39, "TYPE='DELETE'" },
{ 0x3A, "TYPE='HELP'" },
{ 0x3B, "TYPE='PASSWORD'" },
{ 0x3C, "TYPE='ONCLICK'" },
{ 0x3D, "TYPE='ONENTERBACKWARD'" },
{ 0x3E, "TYPE='ONENTERFORWARD'" },
{ 0x3F, "TYPE='ONTIMER'" },
/* 0x40 -- 0x44 GLOBAL */
{ 0x45, "TYPE='OPTIONS'" },
{ 0x46, "TYPE='PREV'" },
{ 0x47, "TYPE='RESET'" },
{ 0x48, "TYPE='TEXT'" },
{ 0x49, "TYPE='vnd.'" },
{ 0x4A, "URL=" },
{ 0x4B, "URL='http://'" },
{ 0x4C, "URL='https://'" },
{ 0x4D, "USER-AGENT=" },
{ 0x4E, "VALUE=" },
{ 0x4F, "VSPACE=" },
{ 0x50, "WIDTH=" },
{ 0x51, "xml:lang=" },
{ 0x00, NULL }
};
/***** Attribute Value tokens *****/
static const value_string wbxml_wmlc10_attrValue_cp0[] = {
/* 0x80 -- 0x84 GLOBAL */
{ 0x85, "'.com/'" },
{ 0x86, "'.edu/'" },
{ 0x87, "'.net/'" },
{ 0x88, "'.org/'" },
{ 0x89, "'ACCEPT'" },
{ 0x8A, "'BOTTOM'" },
{ 0x8B, "'CLEAR'" },
{ 0x8C, "'DELETE'" },
{ 0x8D, "'HELP'" },
{ 0x8E, "'http://'" },
{ 0x8F, "'http://www.'" },
{ 0x90, "'https://'" },
{ 0x91, "'https://www.'" },
{ 0x92, "'LIST'" },
{ 0x93, "'MIDDLE'" },
{ 0x94, "'NOWRAP'" },
{ 0x95, "'ONCLICK'" },
{ 0x96, "'ONENTERBACKWARD'" },
{ 0x97, "'ONENTERFORWARD'" },
{ 0x98, "'ONTIMER'" },
{ 0x99, "'OPTIONS'" },
{ 0x9A, "'PASSWORD'" },
{ 0x9B, "'RESET'" },
{ 0x9C, "'SET'" },
{ 0x9D, "'TEXT'" },
{ 0x9E, "'TOP'" },
{ 0x9F, "'UNKNOWN'" },
{ 0xA0, "'WRAP'" },
{ 0xA1, "'www.'" },
{ 0x00, NULL }
};
/***** Token code page aggregation *****/
static const value_valuestring wbxml_wmlc10_global[] = {
{ 0, wbxml_wmlc10_global_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_wmlc10_tags[] = {
{ 0, wbxml_wmlc10_tags_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_wmlc10_attrStart[] = {
{ 0, wbxml_wmlc10_attrStart_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_wmlc10_attrValue[] = {
{ 0, wbxml_wmlc10_attrValue_cp0 },
{ 0, NULL }
};
static const wbxml_decoding decode_wmlc_10 = {
"Wireless Markup Language 1.0",
"WML 1.0",
{ ext_t_0_wml_10, ext_t_1_wml_10, ext_t_2_wml_10 },
default_opaque_binary_tag,
default_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
wbxml_wmlc10_global,
wbxml_wmlc10_tags,
wbxml_wmlc10_attrStart,
wbxml_wmlc10_attrValue
};
/* WML 1.1
*
* Wireless Markup Language
***************************************/
/***** Global extension tokens *****/
/* Same as in WML 1.0 */
/***** Tag tokens *****/
static const value_string wbxml_wmlc11_tags_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
/* 0x05 -- 0x1B */
{ 0x1C, "a" },
{ 0x1D, "td" },
{ 0x1E, "tr" },
{ 0x1F, "table" },
{ 0x20, "p" },
{ 0x21, "postfield" },
{ 0x22, "anchor" },
{ 0x23, "access" },
{ 0x24, "b" },
{ 0x25, "big" },
{ 0x26, "br" },
{ 0x27, "card" },
{ 0x28, "do" },
{ 0x29, "em" },
{ 0x2A, "fieldset" },
{ 0x2B, "go" },
{ 0x2C, "head" },
{ 0x2D, "i" },
{ 0x2E, "img" },
{ 0x2F, "input" },
{ 0x30, "meta" },
{ 0x31, "noop" },
{ 0x32, "prev" },
{ 0x33, "onevent" },
{ 0x34, "optgroup" },
{ 0x35, "option" },
{ 0x36, "refresh" },
{ 0x37, "select" },
{ 0x38, "small" },
{ 0x39, "strong" },
/* 0x3A */
{ 0x3B, "template" },
{ 0x3C, "timer" },
{ 0x3D, "u" },
{ 0x3E, "setvar" },
{ 0x3F, "wml" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
static const value_string wbxml_wmlc11_attrStart_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "accept-charset=" },
{ 0x06, "align='bottom'" },
{ 0x07, "align='center'" },
{ 0x08, "align='left'" },
{ 0x09, "align='middle'" },
{ 0x0A, "align='right'" },
{ 0x0B, "align='top'" },
{ 0x0C, "alt=" },
{ 0x0D, "content=" },
/* 0x0E */
{ 0x0F, "domain=" },
{ 0x10, "emptyok='false'" },
{ 0x11, "emptyok='true'" },
{ 0x12, "format=" },
{ 0x13, "height=" },
{ 0x14, "hspace=" },
{ 0x15, "ivalue=" },
{ 0x16, "iname=" },
/* 0x17 */
{ 0x18, "label=" },
{ 0x19, "localsrc=" },
{ 0x1A, "maxlength=" },
{ 0x1B, "method='get'" },
{ 0x1C, "method='post'" },
{ 0x1D, "mode='nowrap'" },
{ 0x1E, "mode='wrap'" },
{ 0x1F, "multiple='false'" },
{ 0x20, "multiple='true'" },
{ 0x21, "name=" },
{ 0x22, "newcontext='false'" },
{ 0x23, "newcontext='true'" },
{ 0x24, "onpick=" },
{ 0x25, "onenterbackward=" },
{ 0x26, "onenterforward=" },
{ 0x27, "ontimer=" },
{ 0x28, "optional='false'" },
{ 0x29, "optional='true'" },
{ 0x2A, "path=" },
/* 0x2B -- 0x2D */
{ 0x2E, "scheme=" },
{ 0x2F, "sendreferer='false'" },
{ 0x30, "sendreferer='true'" },
{ 0x31, "size=" },
{ 0x32, "src=" },
{ 0x33, "ordered='false'" },
{ 0x34, "ordered='true'" },
{ 0x35, "tabindex=" },
{ 0x36, "title=" },
{ 0x37, "type=" },
{ 0x38, "type='accept'" },
{ 0x39, "type='delete'" },
{ 0x3A, "type='help'" },
{ 0x3B, "type='password'" },
{ 0x3C, "type='onpick'" },
{ 0x3D, "type='onenterbackward'" },
{ 0x3E, "type='onenterforward'" },
{ 0x3F, "type='ontimer'" },
/* 0x40 -- 0x44 GLOBAL */
{ 0x45, "type='options'" },
{ 0x46, "type='prev'" },
{ 0x47, "type='reset'" },
{ 0x48, "type='text'" },
{ 0x49, "type='vnd.'" },
{ 0x4A, "href=" },
{ 0x4B, "href='http://'" },
{ 0x4C, "href='https://'" },
{ 0x4D, "value=" },
{ 0x4E, "vspace=" },
{ 0x4F, "width=" },
{ 0x50, "xml:lang=" },
/* 0x51 */
{ 0x52, "align=" },
{ 0x53, "columns=" },
{ 0x54, "class=" },
{ 0x55, "id=" },
{ 0x56, "forua='false'" },
{ 0x57, "forua='true'" },
{ 0x58, "src='http://'" },
{ 0x59, "src='https://'" },
{ 0x5A, "http-equiv=" },
{ 0x5B, "http-equiv='Content-Type'" },
{ 0x5C, "content='application/vnd.wap.wmlc;charset='" },
{ 0x5D, "http-equiv='Expires'" },
{ 0x00, NULL }
};
/***** Attribute Value tokens *****/
static const value_string wbxml_wmlc11_attrValue_cp0[] = {
/* 0x80 -- 0x84 GLOBAL */
{ 0x85, "'.com/'" },
{ 0x86, "'.edu/'" },
{ 0x87, "'.net/'" },
{ 0x88, "'.org/'" },
{ 0x89, "'accept'" },
{ 0x8A, "'bottom'" },
{ 0x8B, "'clear'" },
{ 0x8C, "'delete'" },
{ 0x8D, "'help'" },
{ 0x8E, "'http://'" },
{ 0x8F, "'http://www.'" },
{ 0x90, "'https://'" },
{ 0x91, "'https://www.'" },
/* 0x92 */
{ 0x93, "'middle'" },
{ 0x94, "'nowrap'" },
{ 0x95, "'onpick'" },
{ 0x96, "'onenterbackward'" },
{ 0x97, "'onenterforward'" },
{ 0x98, "'ontimer'" },
{ 0x99, "'options'" },
{ 0x9A, "'password'" },
{ 0x9B, "'reset'" },
/* 0x9C */
{ 0x9D, "'text'" },
{ 0x9E, "'top'" },
{ 0x9F, "'unknown'" },
{ 0xA0, "'wrap'" },
{ 0xA1, "'www.'" },
{ 0x00, NULL }
};
/***** Token code page aggregation *****/
static const value_valuestring wbxml_wmlc11_global[] = {
{ 0, wbxml_wmlc10_global_cp0 }, /* Same as WML 1.0 */
{ 0, NULL }
};
static const value_valuestring wbxml_wmlc11_tags[] = {
{ 0, wbxml_wmlc11_tags_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_wmlc11_attrStart[] = {
{ 0, wbxml_wmlc11_attrStart_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_wmlc11_attrValue[] = {
{ 0, wbxml_wmlc11_attrValue_cp0 },
{ 0, NULL }
};
static const wbxml_decoding decode_wmlc_11 = {
"Wireless Markup Language 1.1",
"WML 1.1",
{ ext_t_0_wml_10, ext_t_1_wml_10, ext_t_2_wml_10 },
default_opaque_binary_tag,
default_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
wbxml_wmlc11_global,
wbxml_wmlc11_tags,
wbxml_wmlc11_attrStart,
wbxml_wmlc11_attrValue
};
/* WML 1.2
*
* Wireless Markup Language
***************************************/
/***** Global extension tokens *****/
/* Same as in WML 1.0 */
/***** Tag tokens *****/
static const value_string wbxml_wmlc12_tags_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
/* 0x05 -- 0x1A */
{ 0x1B, "pre" },
{ 0x1C, "a" },
{ 0x1D, "td" },
{ 0x1E, "tr" },
{ 0x1F, "table" },
{ 0x20, "p" },
{ 0x21, "postfield" },
{ 0x22, "anchor" },
{ 0x23, "access" },
{ 0x24, "b" },
{ 0x25, "big" },
{ 0x26, "br" },
{ 0x27, "card" },
{ 0x28, "do" },
{ 0x29, "em" },
{ 0x2A, "fieldset" },
{ 0x2B, "go" },
{ 0x2C, "head" },
{ 0x2D, "i" },
{ 0x2E, "img" },
{ 0x2F, "input" },
{ 0x30, "meta" },
{ 0x31, "noop" },
{ 0x32, "prev" },
{ 0x33, "onevent" },
{ 0x34, "optgroup" },
{ 0x35, "option" },
{ 0x36, "refresh" },
{ 0x37, "select" },
{ 0x38, "small" },
{ 0x39, "strong" },
/* 0x3A */
{ 0x3B, "template" },
{ 0x3C, "timer" },
{ 0x3D, "u" },
{ 0x3E, "setvar" },
{ 0x3F, "wml" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
static const value_string wbxml_wmlc12_attrStart_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "accept-charset=" },
{ 0x06, "align='bottom'" },
{ 0x07, "align='center'" },
{ 0x08, "align='left'" },
{ 0x09, "align='middle'" },
{ 0x0A, "align='right'" },
{ 0x0B, "align='top'" },
{ 0x0C, "alt=" },
{ 0x0D, "content=" },
/* 0x0E */
{ 0x0F, "domain=" },
{ 0x10, "emptyok='false'" },
{ 0x11, "emptyok='true'" },
{ 0x12, "format=" },
{ 0x13, "height=" },
{ 0x14, "hspace=" },
{ 0x15, "ivalue=" },
{ 0x16, "iname=" },
/* 0x17 */
{ 0x18, "label=" },
{ 0x19, "localsrc=" },
{ 0x1A, "maxlength=" },
{ 0x1B, "method='get'" },
{ 0x1C, "method='post'" },
{ 0x1D, "mode='nowrap'" },
{ 0x1E, "mode='wrap'" },
{ 0x1F, "multiple='false'" },
{ 0x20, "multiple='true'" },
{ 0x21, "name=" },
{ 0x22, "newcontext='false'" },
{ 0x23, "newcontext='true'" },
{ 0x24, "onpick=" },
{ 0x25, "onenterbackward=" },
{ 0x26, "onenterforward=" },
{ 0x27, "ontimer=" },
{ 0x28, "optional='false'" },
{ 0x29, "optional='true'" },
{ 0x2A, "path=" },
/* 0x2B -- 0x2D */
{ 0x2E, "scheme=" },
{ 0x2F, "sendreferer='false'" },
{ 0x30, "sendreferer='true'" },
{ 0x31, "size=" },
{ 0x32, "src=" },
{ 0x33, "ordered='false'" },
{ 0x34, "ordered='true'" },
{ 0x35, "tabindex=" },
{ 0x36, "title=" },
{ 0x37, "type=" },
{ 0x38, "type='accept'" },
{ 0x39, "type='delete'" },
{ 0x3A, "type='help'" },
{ 0x3B, "type='password'" },
{ 0x3C, "type='onpick'" },
{ 0x3D, "type='onenterbackward'" },
{ 0x3E, "type='onenterforward'" },
{ 0x3F, "type='ontimer'" },
/* 0x40 -- 0x44 GLOBAL */
{ 0x45, "type='options'" },
{ 0x46, "type='prev'" },
{ 0x47, "type='reset'" },
{ 0x48, "type='text'" },
{ 0x49, "type='vnd.'" },
{ 0x4A, "href=" },
{ 0x4B, "href='http://'" },
{ 0x4C, "href='https://'" },
{ 0x4D, "value=" },
{ 0x4E, "vspace=" },
{ 0x4F, "width=" },
{ 0x50, "xml:lang=" },
/* 0x51 */
{ 0x52, "align=" },
{ 0x53, "columns=" },
{ 0x54, "class=" },
{ 0x55, "id=" },
{ 0x56, "forua='false'" },
{ 0x57, "forua='true'" },
{ 0x58, "src='http://'" },
{ 0x59, "src='https://'" },
{ 0x5A, "http-equiv=" },
{ 0x5B, "http-equiv='Content-Type'" },
{ 0x5C, "content='application/vnd.wap.wmlc;charset='" },
{ 0x5D, "http-equiv='Expires'" },
{ 0x5E, "accesskey=" },
{ 0x5F, "enctype=" },
{ 0x60, "enctype='application/x-www-form-urlencoded'" },
{ 0x61, "enctype='multipart/form-data'" },
{ 0x00, NULL }
};
/***** Attribute Value tokens *****/
/* Same as in WML 1.1 */
/***** Token code page aggregation *****/
static const value_valuestring wbxml_wmlc12_global[] = {
{ 0, wbxml_wmlc10_global_cp0 }, /* Same as WML 1.0 */
{ 0, NULL }
};
static const value_valuestring wbxml_wmlc12_tags[] = {
{ 0, wbxml_wmlc12_tags_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_wmlc12_attrStart[] = {
{ 0, wbxml_wmlc12_attrStart_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_wmlc12_attrValue[] = {
{ 0, wbxml_wmlc11_attrValue_cp0 }, /* Same as WML 1.1 */
{ 0, NULL }
};
static const wbxml_decoding decode_wmlc_12 = {
"Wireless Markup Language 1.2",
"WML 1.2",
{ ext_t_0_wml_10, ext_t_1_wml_10, ext_t_2_wml_10 },
default_opaque_binary_tag,
default_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
wbxml_wmlc12_global,
wbxml_wmlc12_tags,
wbxml_wmlc12_attrStart,
wbxml_wmlc12_attrValue
};
/* WML 1.3
*
* Wireless Markup Language
***************************************/
/***** Global extension tokens *****/
/* Same as in WML 1.0 */
/***** Tag tokens *****/
/* Same as in WML 1.2 */
/***** Attribute Start tokens *****/
static const value_string wbxml_wmlc13_attrStart_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "accept-charset=" },
{ 0x06, "align='bottom'" },
{ 0x07, "align='center'" },
{ 0x08, "align='left'" },
{ 0x09, "align='middle'" },
{ 0x0A, "align='right'" },
{ 0x0B, "align='top'" },
{ 0x0C, "alt=" },
{ 0x0D, "content=" },
/* 0x0E */
{ 0x0F, "domain=" },
{ 0x10, "emptyok='false'" },
{ 0x11, "emptyok='true'" },
{ 0x12, "format=" },
{ 0x13, "height=" },
{ 0x14, "hspace=" },
{ 0x15, "ivalue=" },
{ 0x16, "iname=" },
/* 0x17 */
{ 0x18, "label=" },
{ 0x19, "localsrc=" },
{ 0x1A, "maxlength=" },
{ 0x1B, "method='get'" },
{ 0x1C, "method='post'" },
{ 0x1D, "mode='nowrap'" },
{ 0x1E, "mode='wrap'" },
{ 0x1F, "multiple='false'" },
{ 0x20, "multiple='true'" },
{ 0x21, "name=" },
{ 0x22, "newcontext='false'" },
{ 0x23, "newcontext='true'" },
{ 0x24, "onpick=" },
{ 0x25, "onenterbackward=" },
{ 0x26, "onenterforward=" },
{ 0x27, "ontimer=" },
{ 0x28, "optional='false'" },
{ 0x29, "optional='true'" },
{ 0x2A, "path=" },
/* 0x2B -- 0x2D */
{ 0x2E, "scheme=" },
{ 0x2F, "sendreferer='false'" },
{ 0x30, "sendreferer='true'" },
{ 0x31, "size=" },
{ 0x32, "src=" },
{ 0x33, "ordered='false'" },
{ 0x34, "ordered='true'" },
{ 0x35, "tabindex=" },
{ 0x36, "title=" },
{ 0x37, "type=" },
{ 0x38, "type='accept'" },
{ 0x39, "type='delete'" },
{ 0x3A, "type='help'" },
{ 0x3B, "type='password'" },
{ 0x3C, "type='onpick'" },
{ 0x3D, "type='onenterbackward'" },
{ 0x3E, "type='onenterforward'" },
{ 0x3F, "type='ontimer'" },
/* 0x40 -- 0x44 GLOBAL */
{ 0x45, "type='options'" },
{ 0x46, "type='prev'" },
{ 0x47, "type='reset'" },
{ 0x48, "type='text'" },
{ 0x49, "type='vnd.'" },
{ 0x4A, "href=" },
{ 0x4B, "href='http://'" },
{ 0x4C, "href='https://'" },
{ 0x4D, "value=" },
{ 0x4E, "vspace=" },
{ 0x4F, "width=" },
{ 0x50, "xml:lang=" },
/* 0x51 */
{ 0x52, "align=" },
{ 0x53, "columns=" },
{ 0x54, "class=" },
{ 0x55, "id=" },
{ 0x56, "forua='false'" },
{ 0x57, "forua='true'" },
{ 0x58, "src='http://'" },
{ 0x59, "src='https://'" },
{ 0x5A, "http-equiv=" },
{ 0x5B, "http-equiv='Content-Type'" },
{ 0x5C, "content='application/vnd.wap.wmlc;charset='" },
{ 0x5D, "http-equiv='Expires'" },
{ 0x5E, "accesskey=" },
{ 0x5F, "enctype=" },
{ 0x60, "enctype='application/x-www-form-urlencoded'" },
{ 0x61, "enctype='multipart/form-data'" },
{ 0x62, "xml:space='preserve'" },
{ 0x63, "xml:space='default'" },
{ 0x64, "cache-control='no-cache'" },
{ 0x00, NULL }
};
/***** Attribute Value tokens *****/
/* Same as in WML 1.1 */
/***** Token code page aggregation *****/
static const value_valuestring wbxml_wmlc13_global[] = {
{ 0, wbxml_wmlc10_global_cp0 }, /* Same as WML 1.0 */
{ 0, NULL }
};
static const value_valuestring wbxml_wmlc13_tags[] = {
{ 0, wbxml_wmlc12_tags_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_wmlc13_attrStart[] = {
{ 0, wbxml_wmlc13_attrStart_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_wmlc13_attrValue[] = {
{ 0, wbxml_wmlc11_attrValue_cp0 }, /* Same as WML 1.1 */
{ 0, NULL }
};
static const wbxml_decoding decode_wmlc_13 = {
"Wireless Markup Language 1.3",
"WML 1.3",
{ ext_t_0_wml_10, ext_t_1_wml_10, ext_t_2_wml_10 },
default_opaque_binary_tag,
default_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
wbxml_wmlc13_global,
wbxml_wmlc13_tags,
wbxml_wmlc13_attrStart,
wbxml_wmlc13_attrValue
};
/* SI 1.0
*
* Service Indication
***************************************/
/***** Global extension tokens *****/
/***** Tag tokens *****/
static const value_string wbxml_sic10_tags_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "si" },
{ 0x06, "indication" },
{ 0x07, "info" },
{ 0x08, "item" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
static const value_string wbxml_sic10_attrStart_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "action='signal-none'" },
{ 0x06, "action='signal-low'" },
{ 0x07, "action='signal-medium'" },
{ 0x08, "action='signal-high'" },
{ 0x09, "action='delete'" },
{ 0x0a, "created=" },
{ 0x0b, "href=" },
{ 0x0c, "href='http://'" },
{ 0x0d, "href='http://www.'" },
{ 0x0e, "href='https://'" },
{ 0x0f, "href='https://www.'" },
{ 0x10, "si-expires=" },
{ 0x11, "si-id=" },
{ 0x12, "class=" },
{ 0x00, NULL }
};
/***** Attribute Value tokens *****/
static const value_string wbxml_sic10_attrValue_cp0[] = {
/* 0x80 -- 0x84 GLOBAL */
{ 0x85, "'.com/'" },
{ 0x86, "'.edu/'" },
{ 0x87, "'.net/'" },
{ 0x88, "'.org/'" },
{ 0x00, NULL }
};
/***** Token code page aggregation *****/
static const value_valuestring wbxml_sic10_tags[] = {
{ 0, wbxml_sic10_tags_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_sic10_attrStart[] = {
{ 0, wbxml_sic10_attrStart_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_sic10_attrValue[] = {
{ 0, wbxml_sic10_attrValue_cp0 },
{ 0, NULL }
};
static const wbxml_decoding decode_sic_10 = {
"Service Indication 1.0",
"SI 1.0",
{ NULL, NULL, NULL },
default_opaque_binary_tag,
default_opaque_literal_tag,
sic10_opaque_binary_attr,
sic10_opaque_literal_attr,
NULL,
wbxml_sic10_tags,
wbxml_sic10_attrStart,
wbxml_sic10_attrValue
};
/* SL 1.0
*
* Service Loading
***************************************/
/***** Global extension tokens *****/
/***** Tag tokens *****/
static const value_string wbxml_slc10_tags_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "sl" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
static const value_string wbxml_slc10_attrStart_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "action='execute-low'" },
{ 0x06, "action='execute-high'" },
{ 0x07, "action='cache'" },
{ 0x08, "href=" },
{ 0x09, "href='http://'" },
{ 0x0a, "href='http://www.'" },
{ 0x0b, "href='https://'" },
{ 0x0c, "href='https://www.'" },
{ 0x00, NULL }
};
/***** Attribute Value tokens *****/
/* Same as in SI 1.0 */
/***** Token code page aggregation *****/
static const value_valuestring wbxml_slc10_tags[] = {
{ 0, wbxml_slc10_tags_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_slc10_attrStart[] = {
{ 0, wbxml_slc10_attrStart_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_slc10_attrValue[] = {
{ 0, wbxml_sic10_attrValue_cp0 }, /* Same as SI 1.0 */
{ 0, NULL }
};
static const wbxml_decoding decode_slc_10 = {
"Service Loading 1.0",
"SL 1.0",
{ NULL, NULL, NULL },
default_opaque_binary_tag,
default_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
NULL,
wbxml_slc10_tags,
wbxml_slc10_attrStart,
wbxml_slc10_attrValue
};
/* CO 1.0
*
* Cache Operation
***************************************/
/***** Global extension tokens *****/
/***** Tag tokens *****/
static const value_string wbxml_coc10_tags_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "co" },
{ 0x06, "invalidate-object" },
{ 0x07, "invalidate-service" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
static const value_string wbxml_coc10_attrStart_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "uri=" },
{ 0x06, "uri='http://'" },
{ 0x07, "uri='http://www.'" },
{ 0x08, "uri='https://'" },
{ 0x09, "uri='https://www.'" },
{ 0x00, NULL }
};
/***** Attribute Value tokens *****/
/* Same as in SI 1.0 */
/***** Token code page aggregation *****/
static const value_valuestring wbxml_coc10_tags[] = {
{ 0, wbxml_coc10_tags_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_coc10_attrStart[] = {
{ 0, wbxml_coc10_attrStart_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_coc10_attrValue[] = {
{ 0, wbxml_sic10_attrValue_cp0 }, /* Same as SI 1.0 */
{ 0, NULL }
};
static const wbxml_decoding decode_coc_10 = {
"Cache Operation 1.0",
"CO 1.0",
{ NULL, NULL, NULL },
default_opaque_binary_tag,
default_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
NULL,
wbxml_coc10_tags,
wbxml_coc10_attrStart,
wbxml_coc10_attrValue
};
/* PROV 1.0
*
* Client Provisioning
***************************************/
/***** Global extension tokens *****/
/***** Tag tokens *****/
static const value_string wbxml_provc10_tags_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "wap-provisioningdoc" },
{ 0x06, "characteristic" },
{ 0x07, "parm" },
{ 0x00, NULL }
};
static const value_string wbxml_provc10_tags_cp1[] = {
/* 0x00 -- 0x04 GLOBAL */
/* 0x05 */
{ 0x06, "characteristic" },
{ 0x07, "parm" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
static const value_string wbxml_provc10_attrStart_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "name=" },
{ 0x06, "value=" },
{ 0x07, "name='NAME'" },
{ 0x08, "name='NAP-ADDRESS'" },
{ 0x09, "name='NAP-ADDRTYPE'" },
{ 0x0A, "name='CALLTYPE'" },
{ 0x0B, "name='VALIDUNTIL'" },
{ 0x0C, "name='AUTHTYPE'" },
{ 0x0D, "name='AUTHNAME'" },
{ 0x0E, "name='AUTHSECRET'" },
{ 0x0F, "name='LINGER'" },
{ 0x10, "name='BEARER'" },
{ 0x11, "name='NAPID'" },
{ 0x12, "name='COUNTRY'" },
{ 0x13, "name='NETWORK'" },
{ 0x14, "name='INTERNET'" },
{ 0x15, "name='PROXY-ID'" },
{ 0x16, "name='PROXY-PROVIDER-ID'" },
{ 0x17, "name='DOMAIN'" },
{ 0x18, "name='PROVURL'" },
{ 0x19, "name='PXAUTH-TYPE'" },
{ 0x1A, "name='PXAUTH-ID'" },
{ 0x1B, "name='PXAUTH-PW'" },
{ 0x1C, "name='STARTPAGE'" },
{ 0x1D, "name='BASAUTH-ID'" },
{ 0x1E, "name='BASAUTH-PW'" },
{ 0x1F, "name='PUSHENABLED'" },
{ 0x20, "name='PXADDR'" },
{ 0x21, "name='PXADDRTYPE'" },
{ 0x22, "name='TO-NAPID'" },
{ 0x23, "name='PORTNBR'" },
{ 0x24, "name='SERVICE'" },
{ 0x25, "name='LINKSPEED'" },
{ 0x26, "name='DNLINKSPEED'" },
{ 0x27, "name='LOCAL-ADDR'" },
{ 0x28, "name='LOCAL-ADDRTYPE'" },
{ 0x29, "name='CONTEXT-ALLOW'" },
{ 0x2A, "name='TRUST'" },
{ 0x2B, "name='MASTER'" },
{ 0x2C, "name='SID'" },
{ 0x2D, "name='SOC'" },
{ 0x2E, "name='WSP-VERSION'" },
{ 0x2F, "name='PHYSICAL-PROXY-ID'" },
{ 0x30, "name='CLIENT-ID'" },
{ 0x31, "name='DELIVERY-ERR-SDU'" },
{ 0x32, "name='DELIVERY-ORDER'" },
{ 0x33, "name='TRAFFIC-CLASS'" },
{ 0x34, "name='MAX-SDU-SIZE'" },
{ 0x35, "name='MAX-BITRATE-UPLINK'" },
{ 0x36, "name='MAX-BITRATE-DNLINK'" },
{ 0x37, "name='RESIDUAL-BER'" },
{ 0x38, "name='SDU-ERROR-RATIO'" },
{ 0x39, "name='TRAFFIC-HANDL-PRIO'" },
{ 0x3A, "name='TRANSFER-DELAY'" },
{ 0x3B, "name='GUARANTEED-BITRATE-UPLINK'" },
{ 0x3C, "name='GUARANTEED-BITRATE-DNLINK'" },
{ 0x3D, "name='PXADDR-FQDN'" },
{ 0x3E, "name='PROXY-PW'" },
{ 0x3F, "name='PPGAUTH-TYPE'" },
/* 0x40 -- 0x44 GLOBAL */
{ 0x45, "version=" },
{ 0x46, "version='1.0'" },
{ 0x47, "name='PULLENABLED'" },
{ 0x48, "name='DNS-ADDR'" },
{ 0x49, "name='MAX-NUM-RETRY'" },
{ 0x4A, "name='FIRST-RETRY-TIMEOUT'" },
{ 0x4B, "name='REREG-THRESHOLD'" },
{ 0x4C, "name='T-BIT'" },
/* 0x4D */
{ 0x4E, "name='AUTH-ENTITY'" },
{ 0x4F, "name='SPI'" },
{ 0x50, "type=" },
{ 0x51, "type='PXLOGICAL'" },
{ 0x52, "type='PXPHYSICAL'" },
{ 0x53, "type='PORT'" },
{ 0x54, "type='VALIDITY'" },
{ 0x55, "type='NAPDEF'" },
{ 0x56, "type='BOOTSTRAP'" },
{ 0x57, "type='VENDORCONFIG'" },
{ 0x58, "type='CLIENTIDENTITY'" },
{ 0x59, "type='PXAUTHINFO'" },
{ 0x5A, "type='NAPAUTHINFO'" },
{ 0x5B, "type='ACCESS'" },
{ 0x00, NULL }
};
static const value_string wbxml_provc10_attrStart_cp1[] = {
/* 0x00 -- 0x04 GLOBAL */
/* 0x05 -- 0x06 */
{ 0x07, "name='NAME'" },
/* 0x08 -- 0x13 */
{ 0x14, "name='INTERNET'" },
/* 0x15 -- 0x1B */
{ 0x1C, "name='STARTPAGE'" },
/* 0x1D -- 0x21 */
{ 0x22, "name='TO-NAPID'" },
{ 0x23, "name='PORTNBR'" },
{ 0x24, "name='SERVICE'" },
/* 0x25 -- 0x2D */
{ 0x2E, "name='AACCEPT'" },
{ 0x2F, "name='AAUTHDATA'" },
{ 0x30, "name='AAUTHLEVEL'" },
{ 0x31, "name='AAUTHNAME'" },
{ 0x32, "name='AAUTHSECRET'" },
{ 0x33, "name='AAUTHTYPE'" },
{ 0x34, "name='ADDR'" },
{ 0x35, "name='ADDRTYPE'" },
{ 0x36, "name='APPID'" },
{ 0x37, "name='APROTOCOL'" },
{ 0x38, "name='PROVIDER-ID'" },
{ 0x39, "name='TO-PROXY'" },
{ 0x3A, "name='URI'" },
{ 0x3B, "name='RULE'" },
/* 0x3C -- 0x3F */
/* 0x40 -- 0x44 GLOBAL */
/* 0x45 -- 0x4F */
{ 0x50, "type=" },
/* 0x51 -- 0x52 */
{ 0x53, "type='PORT'" },
/* 0x54 */
{ 0x55, "type='APPLICATION'" },
{ 0x56, "type='APPADDR'" },
{ 0x57, "type='APPAUTH'" },
{ 0x58, "type='CLIENTIDENTITY'" },
{ 0x59, "type='RESOURCE'" },
/* 0x5A -- 0x7F */
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
static const value_string wbxml_provc10_attrValue_cp0[] = {
/* 0x80 -- 0x84 GLOBAL */
{ 0x85, "'IPV4'" },
{ 0x86, "'IPV6'" },
{ 0x87, "'E164'" },
{ 0x88, "'ALPHA'" },
{ 0x89, "'APN'" },
{ 0x8A, "'SCODE'" },
{ 0x8B, "'TETRA-ITSI'" },
{ 0x8C, "'MAN'" },
/* 0x8D -- 0x8F */
{ 0x90, "'ANALOG-MODEM'" },
{ 0x91, "'V.120'" },
{ 0x92, "'V.110'" },
{ 0x93, "'X.31'" },
{ 0x94, "'BIT-TRANSPARENT'" },
{ 0x95, "'DIRECT-ASYNCHRONOUS-DATA-SERVICE'" },
/* 0x96 -- 0x99 */
{ 0x9A, "'PAP'" },
{ 0x9B, "'CHAP'" },
{ 0x9C, "'HTTP-BASIC'" },
{ 0x9D, "'HTTP-DIGEST'" },
{ 0x9E, "'WTLS-SS'" },
{ 0x9F, "'MD5'" },
/* 0xA0 -- 0xA1 */
{ 0xA2, "'GSM-USSD'" },
{ 0xA3, "'GSM-SMS'" },
{ 0xA4, "'ANSI-136-GUTS'" },
{ 0xA5, "'IS-95-CDMA-SMS'" },
{ 0xA6, "'IS-95-CDMA-CSD'" },
{ 0xA7, "'IS-95-CDMA-PACKET'" },
{ 0xA8, "'ANSI-136-CSD'" },
{ 0xA9, "'ANSI-136-GPRS'" },
{ 0xAA, "'GSM-CSD'" },
{ 0xAB, "'GSM-GPRS'" },
{ 0xAC, "'AMPS-CDPD'" },
{ 0xAD, "'PDC-CSD'" },
{ 0xAE, "'PDC-PACKET'" },
{ 0xAF, "'IDEN-SMS'" },
{ 0xB0, "'IDEN-CSD'" },
{ 0xB1, "'IDEN-PACKET'" },
{ 0xB2, "'FLEX/REFLEX'" },
{ 0xB3, "'PHS-SMS'" },
{ 0xB4, "'PHS-CSD'" },
{ 0xB5, "'TETRA-SDS'" },
{ 0xB6, "'TETRA-PACKET'" },
{ 0xB7, "'ANSI-136-GHOST'" },
{ 0xB8, "'MOBITEX-MPAK'" },
{ 0xB9, "'CDMA2000-IX-SIMPLE-IP'" },
{ 0xBA, "'CDMA2000-IX-MOBILE-IP'" },
/* 0xBB -- 0xBF */
/* 0xC0 -- 0xC4 GLOBAL */
{ 0xC5, "'AUTOBAUDING'" },
/* 0xC6 -- 0xC9 */
{ 0xCA, "'CL-WSP'" },
{ 0xCB, "'CO-WSP'" },
{ 0xCC, "'CL-SEC-WSP'" },
{ 0xCD, "'CO-SEC-WSP'" },
{ 0xCE, "'CL-SEC-WTA'" },
{ 0xCF, "'CO-SEC-WTA'" },
{ 0xD0, "'OTA-HTTP-TO'" },
{ 0xD1, "'OTA-HTTP-TLS-TO'" },
{ 0xD2, "'OTA-HTTP-PO'" },
{ 0xD3, "'OTA-HTTP-TLS-PO'" },
/* 0xD4 -- 0xFF */
{ 0x00, NULL }
};
static const value_string wbxml_provc10_attrValue_cp1[] = {
/* 0x80 -- 0x84 GLOBAL */
/* 0x85 */
{ 0x86, "'IPV6'" },
{ 0x87, "'E164'" },
{ 0x88, "'ALPHA'" },
{ 0x8D, "'APPSRV'" },
{ 0x8E, "'OBEX'" },
/* 0x8F */
/* XXX - Errors that require a fix in the OMA/WAP Client Provisioning specs:
{ 0xXXX, "','" },
{ 0xXXX, "'HTTP-'" },
{ 0xXXX, "'BASIC'" },
{ 0xXXX, "'DIGEST'" },
*/
{ 0xE0, "'AAA'" },
{ 0xE1, "'HA'" },
{ 0x00, NULL }
};
/***** Token code page aggregation *****/
static const value_valuestring wbxml_provc10_tags[] = {
{ 0, wbxml_provc10_tags_cp0 },
{ 1, wbxml_provc10_tags_cp1 },
{ 0, NULL }
};
static const value_valuestring wbxml_provc10_attrStart[] = {
{ 0, wbxml_provc10_attrStart_cp0 },
{ 1, wbxml_provc10_attrStart_cp1 },
{ 0, NULL }
};
static const value_valuestring wbxml_provc10_attrValue[] = {
{ 0, wbxml_provc10_attrValue_cp0 },
{ 1, wbxml_provc10_attrValue_cp1 },
{ 0, NULL }
};
static const wbxml_decoding decode_provc_10 = {
"WAP Client Provisioning Document 1.0",
"WAP ProvisioningDoc 1.0",
{ NULL, NULL, NULL },
default_opaque_binary_tag,
default_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
NULL,
wbxml_provc10_tags,
wbxml_provc10_attrStart,
wbxml_provc10_attrValue
};
/* EMN 1.0
*
* Email Notification
***************************************/
/***** Global extension tokens *****/
/***** Tag tokens *****/
static const value_string wbxml_emnc10_tags_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "emn" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
static const value_string wbxml_emnc10_attrStart_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "timestamp=" },
{ 0x06, "mailbox=" },
{ 0x07, "mailbox='mailat:'" },
{ 0x08, "mailbox='pop://'" },
{ 0x09, "mailbox='imap://'" },
{ 0x0a, "mailbox='http://'" },
{ 0x0b, "mailbox='http://www.'" },
{ 0x0c, "mailbox='https://'" },
{ 0x0D, "mailbox='https://www.'" },
{ 0x00, NULL }
};
/***** Attribute Value tokens *****/
/* Same as in SI 1.0 */
/***** Token code page aggregation *****/
static const value_valuestring wbxml_emnc10_tags[] = {
{ 0, wbxml_emnc10_tags_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_emnc10_attrStart[] = {
{ 0, wbxml_emnc10_attrStart_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_emnc10_attrValue[] = {
{ 0, wbxml_sic10_attrValue_cp0 }, /* Same as SI 1.0 */
{ 0, NULL }
};
static const wbxml_decoding decode_emnc_10 = {
"E-Mail Notification 1.0",
"EMN 1.0",
{ NULL, NULL, NULL },
default_opaque_binary_tag,
default_opaque_literal_tag,
emnc10_opaque_binary_attr,
emnc10_opaque_literal_attr,
NULL,
wbxml_emnc10_tags,
wbxml_emnc10_attrStart,
wbxml_emnc10_attrValue
};
/* SyncML 1.0
*
* SyncML Representation Protocol
***************************************/
/***** Global extension tokens *****/
/***** Tag tokens *****/
static const value_string wbxml_syncmlc10_tags_cp0[] = { /* SyncML 1.0 */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Add" },
{ 0x06, "Alert" },
{ 0x07, "Archive" },
{ 0x08, "Atomic" },
{ 0x09, "Chal" },
{ 0x0A, "Cmd" },
{ 0x0B, "CmdID" },
{ 0x0C, "CmdRef" },
{ 0x0D, "Copy" },
{ 0x0E, "Cred" },
{ 0x0F, "Data" },
{ 0x10, "Delete" },
{ 0x11, "Exec" },
{ 0x12, "Final" },
{ 0x13, "Get" },
{ 0x14, "Item" },
{ 0x15, "Lang" },
{ 0x16, "LocName" },
{ 0x17, "LocURI" },
{ 0x18, "Map" },
{ 0x19, "MapItem" },
{ 0x1A, "Meta" },
{ 0x1B, "MsgID" },
{ 0x1C, "MsgRef" },
{ 0x1D, "NoResp" },
{ 0x1E, "NoResults" },
{ 0x1F, "Put" },
{ 0x20, "Replace" },
{ 0x21, "RespURI" },
{ 0x22, "Results" },
{ 0x23, "Search" },
{ 0x24, "Sequence" },
{ 0x25, "SessionID" },
{ 0x26, "SftDel" },
{ 0x27, "Source" },
{ 0x28, "SourceRef" },
{ 0x29, "Status" },
{ 0x2A, "Sync" },
{ 0x2B, "SyncBody" },
{ 0x2C, "SyncHdr" },
{ 0x2D, "SyncML" },
{ 0x2E, "Target" },
{ 0x2F, "TargetRef" },
/* 0x30 - Reserved */
{ 0x31, "VerDTD" },
{ 0x32, "VerProto" },
{ 0x00, NULL }
};
static const value_string wbxml_syncmlc10_tags_cp1[] = { /* MetInf 1.0 */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Anchor" },
{ 0x06, "EMI" },
{ 0x07, "Format" },
{ 0x08, "FreeID" },
{ 0x09, "FreeMem" },
{ 0x0A, "Last" },
{ 0x0B, "Mark" },
{ 0x0C, "MaxMsgSize" },
{ 0x0D, "Mem" },
{ 0x0E, "MetInf" },
{ 0x0F, "Next" },
{ 0x10, "NextNonce" },
{ 0x11, "SharedMem" },
{ 0x12, "Size" },
{ 0x13, "Type" },
{ 0x14, "Version" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
/***** Attribute Value tokens *****/
/***** Token code page aggregation *****/
static const value_valuestring wbxml_syncmlc10_tags[] = {
{ 0, wbxml_syncmlc10_tags_cp0 }, /* -//SYNCML//DTD SyncML 1.0//EN */
{ 1, wbxml_syncmlc10_tags_cp1 }, /* -//SYNCML//DTD MetInf 1.0//EN */
{ 0, NULL }
};
static const wbxml_decoding decode_syncmlc_10 = {
"SyncML Representation Protocol 1.0",
"SyncML 1.0",
{ NULL, NULL, NULL },
default_opaque_binary_tag,
default_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
NULL,
wbxml_syncmlc10_tags,
NULL,
NULL
};
/* SyncML 1.1
*
* SyncML Representation Protocol
***************************************/
/***** Global extension tokens *****/
/***** Tag tokens *****/
static const value_string wbxml_syncmlc11_tags_cp0[] = { /* SyncML 1.1 */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Add" },
{ 0x06, "Alert" },
{ 0x07, "Archive" },
{ 0x08, "Atomic" },
{ 0x09, "Chal" },
{ 0x0a, "Cmd" },
{ 0x0b, "CmdID" },
{ 0x0c, "CmdRef" },
{ 0x0d, "Copy" },
{ 0x0e, "Cred" },
{ 0x0f, "Data" },
{ 0x10, "Delete" },
{ 0x11, "Exec" },
{ 0x12, "Final" },
{ 0x13, "Get" },
{ 0x14, "Item" },
{ 0x15, "Lang" },
{ 0x16, "LocName" },
{ 0x17, "LocURI" },
{ 0x18, "Map" },
{ 0x19, "MapItem" },
{ 0x1a, "Meta" },
{ 0x1b, "MsgID" },
{ 0x1c, "MsgRef" },
{ 0x1d, "NoResp" },
{ 0x1e, "NoResults" },
{ 0x1f, "Put" },
{ 0x20, "Replace" },
{ 0x21, "RespURI" },
{ 0x22, "Results" },
{ 0x23, "Search" },
{ 0x24, "Sequence" },
{ 0x25, "SessionID" },
{ 0x26, "SftDel" },
{ 0x27, "Source" },
{ 0x28, "SourceRef" },
{ 0x29, "Status" },
{ 0x2a, "Sync" },
{ 0x2b, "SyncBody" },
{ 0x2c, "SyncHdr" },
{ 0x2d, "SyncML" },
{ 0x2e, "Target" },
{ 0x2f, "TargetRef" },
/* 0x30 - Reserved */
{ 0x31, "VerDTD" },
{ 0x32, "VerProto" },
{ 0x33, "NumberOfChanges" },
{ 0x34, "MoreData" },
{ 0x00, NULL }
};
static const value_string wbxml_syncmlc11_tags_cp1[] = { /* MetInf 1.1 */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Anchor" },
{ 0x06, "EMI" },
{ 0x07, "Format" },
{ 0x08, "FreeID" },
{ 0x09, "FreeMem" },
{ 0x0A, "Last" },
{ 0x0B, "Mark" },
{ 0x0C, "MaxMsgSize" },
{ 0x0D, "Mem" },
{ 0x0E, "MetInf" },
{ 0x0F, "Next" },
{ 0x10, "NextNonce" },
{ 0x11, "SharedMem" },
{ 0x12, "Size" },
{ 0x13, "Type" },
{ 0x14, "Version" },
{ 0x15, "MaxObjSize" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
/***** Attribute Value tokens *****/
/***** Token code page aggregation *****/
static const value_valuestring wbxml_syncmlc11_tags[] = {
{ 0, wbxml_syncmlc11_tags_cp0 }, /* -//SYNCML//DTD SyncML 1.1//EN */
{ 1, wbxml_syncmlc11_tags_cp1 }, /* -//SYNCML//DTD MetInf 1.1//EN */
{ 0, NULL }
};
static const wbxml_decoding decode_syncmlc_11 = {
"SyncML Representation Protocol 1.1",
"SyncML 1.1",
{ NULL, NULL, NULL },
default_opaque_binary_tag,
default_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
NULL,
wbxml_syncmlc11_tags,
NULL,
NULL
};
/* SyncML 1.2
*
* SyncML Representation Protocol
***************************************/
/***** Global extension tokens *****/
/***** Tag tokens *****/
static const value_string wbxml_syncmlc12_tags_cp0[] = { /* SyncML 1.2 */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Add" },
{ 0x06, "Alert" },
{ 0x07, "Archive" },
{ 0x08, "Atomic" },
{ 0x09, "Chal" },
{ 0x0a, "Cmd" },
{ 0x0b, "CmdID" },
{ 0x0c, "CmdRef" },
{ 0x0d, "Copy" },
{ 0x0e, "Cred" },
{ 0x0f, "Data" },
{ 0x10, "Delete" },
{ 0x11, "Exec" },
{ 0x12, "Final" },
{ 0x13, "Get" },
{ 0x14, "Item" },
{ 0x15, "Lang" },
{ 0x16, "LocName" },
{ 0x17, "LocURI" },
{ 0x18, "Map" },
{ 0x19, "MapItem" },
{ 0x1a, "Meta" },
{ 0x1b, "MsgID" },
{ 0x1c, "MsgRef" },
{ 0x1d, "NoResp" },
{ 0x1e, "NoResults" },
{ 0x1f, "Put" },
{ 0x20, "Replace" },
{ 0x21, "RespURI" },
{ 0x22, "Results" },
{ 0x23, "Search" },
{ 0x24, "Sequence" },
{ 0x25, "SessionID" },
{ 0x26, "SftDel" },
{ 0x27, "Source" },
{ 0x28, "SourceRef" },
{ 0x29, "Status" },
{ 0x2a, "Sync" },
{ 0x2b, "SyncBody" },
{ 0x2c, "SyncHdr" },
{ 0x2d, "SyncML" },
{ 0x2e, "Target" },
{ 0x2f, "TargetRef" },
/* 0x30 - Reserved */
{ 0x31, "VerDTD" },
{ 0x32, "VerProto" },
{ 0x33, "NumberOfChanges" },
{ 0x34, "MoreData" },
{ 0x35, "Field" },
{ 0x36, "Filter" },
{ 0x37, "Record" },
{ 0x38, "FilterType" },
{ 0x39, "SourceParent" },
{ 0x3a, "TargetParent" },
{ 0x3b, "Move" },
{ 0x3c, "Correlator" },
{ 0x00, NULL }
};
static const value_string wbxml_syncmlc12_tags_cp1[] = { /* MetInf 1.2 */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Anchor" },
{ 0x06, "EMI" },
{ 0x07, "Format" },
{ 0x08, "FreeID" },
{ 0x09, "FreeMem" },
{ 0x0A, "Last" },
{ 0x0B, "Mark" },
{ 0x0C, "MaxMsgSize" },
{ 0x0D, "Mem" },
{ 0x0E, "MetInf" },
{ 0x0F, "Next" },
{ 0x10, "NextNonce" },
{ 0x11, "SharedMem" },
{ 0x12, "Size" },
{ 0x13, "Type" },
{ 0x14, "Version" },
{ 0x15, "MaxObjSize" },
{ 0x16, "FieldLevel" },
{ 0x17, "FP" }, /* Extensions on certain devices */
{ 0x18, "ID" }, /* Extensions on certain devices */
{ 0x19, "IDContainer" }, /* Extensions on certain devices */
{ 0x1a, "IDPair" }, /* Extensions on certain devices */
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
/***** Attribute Value tokens *****/
/***** Token code page aggregation *****/
static const value_valuestring wbxml_syncmlc12_tags[] = {
{ 0, wbxml_syncmlc12_tags_cp0 }, /* -//SYNCML//DTD SyncML 1.2//EN */
{ 1, wbxml_syncmlc12_tags_cp1 }, /* -//SYNCML//DTD MetInf 1.2//EN */
/* Note: 02 reserved for DM use */
{ 0, NULL }
};
static const wbxml_decoding decode_syncmlc_12 = {
"SyncML Representation Protocol 1.2",
"SyncML 1.2",
{ NULL, NULL, NULL },
default_opaque_binary_tag,
default_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
NULL,
wbxml_syncmlc12_tags,
NULL,
NULL
};
/* Microsoft ActiveSync 1.0 (Actual Version Unknown - either 1.0 or 2.0, taken from [MS-ASWBXML].pdf)
*
* ActiveSync Representation Protocol
***************************************/
/***** Global extension tokens *****/
/***** Tag tokens *****/
static const value_string wbxml_mssyncc10_tags_cp0[] = { /* ActiveSync 'AirSync:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Sync" },
{ 0x06, "Responses" },
{ 0x07, "Add" },
{ 0x08, "Change" },
{ 0x09, "Delete" },
{ 0x0A, "Fetch" },
{ 0x0B, "SyncKey" },
{ 0x0C, "ClientId" },
{ 0x0D, "ServerId" },
{ 0x0E, "Status" },
{ 0x0F, "Collection" },
{ 0x10, "Class" },
{ 0x12, "CollectionId" },
{ 0x13, "GetChanges" },
{ 0x14, "MoreAvailable" },
{ 0x15, "WindowSize" },
{ 0x16, "Commands" },
{ 0x17, "Options" },
{ 0x18, "FilterType" },
{ 0x1B, "Conflict" },
{ 0x1C, "Collections" },
{ 0x1D, "ApplicationData" },
{ 0x1E, "DeletesAsMoves" },
{ 0x20, "Supported" },
{ 0x21, "SoftDelete" },
{ 0x22, "MIMESupport" },
{ 0x23, "MIMETruncation" },
{ 0x24, "Wait" },
{ 0x25, "Limit" },
{ 0x26, "Partial" },
{ 0x27, "ConversationMode" },
{ 0x28, "MaxItems" },
{ 0x29, "HeartbeatInterval" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp1[] = { /* ActiveSync 'Contacts:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Anniversary" },
{ 0x06, "AssistantName" },
{ 0x07, "AssistantTelephoneNumber" },
{ 0x08, "Birthday" },
{ 0x0C, "Business2PhoneNumber" },
{ 0x0D, "BusinessCity" },
{ 0x0E, "BusinessCountry" },
{ 0x0F, "BusinessPostalCode" },
{ 0x10, "BusinessState" },
{ 0x11, "BusinessStreet" },
{ 0x12, "BusinessFaxNumber" },
{ 0x13, "BusinessPhoneNumber" },
{ 0x14, "CarPhoneNumber" },
{ 0x15, "Categories" },
{ 0x16, "Category" },
{ 0x17, "Children" },
{ 0x18, "Child" },
{ 0x19, "CompanyName" },
{ 0x1A, "Department" },
{ 0x1B, "Email1Address" },
{ 0x1C, "Email2Address" },
{ 0x1D, "Email3Address" },
{ 0x1E, "FileAs" },
{ 0x1F, "FirstName" },
{ 0x20, "Home2PhoneNumber" },
{ 0x21, "HomeCity" },
{ 0x22, "HomeCountry" },
{ 0x23, "HomePostalCode" },
{ 0x24, "HomeState" },
{ 0x25, "HomeStreet" },
{ 0x26, "HomeFaxNumber" },
{ 0x27, "HomePhoneNumber" },
{ 0x29, "JobTitle" },
{ 0x2A, "MiddleName" },
{ 0x2B, "MobilePhoneNumber" },
{ 0x2C, "OfficeLocation" },
{ 0x2D, "OtherCity" },
{ 0x2E, "OtherCountry" },
{ 0x2F, "OtherPostalCode" },
{ 0x30, "OtherState" },
{ 0x31, "OtherStreet" },
{ 0x32, "PagerNumber" },
{ 0x33, "RadioPhoneNumber" },
{ 0x34, "Spouse" },
{ 0x35, "Suffix" },
{ 0x36, "Title" },
{ 0x37, "Webpage" },
{ 0x38, "YomiCompanyName" },
{ 0x39, "YomiFirstName" },
{ 0x3A, "YomiLastName" },
{ 0x3C, "Picture" },
{ 0x3D, "Alias" },
{ 0x3E, "WeightedRank" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp2[] = { /* ActiveSync 'Email:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x0F, "DateReceived" },
{ 0x11, "DisplayTo" },
{ 0x12, "Importance" },
{ 0x13, "MessageClass" },
{ 0x14, "Subject" },
{ 0x15, "Read" },
{ 0x16, "To" },
{ 0x17, "CC" },
{ 0x18, "From" },
{ 0x19, "ReplyTo" },
{ 0x1A, "AllDayEvent" },
{ 0x1B, "Categories" },
{ 0x1C, "Category" },
{ 0x1D, "DTStamp" },
{ 0x1E, "EndTime" },
{ 0x1F, "InstanceType" },
{ 0x20, "BusyStatus" },
{ 0x21, "Location" },
{ 0x22, "MeetingRequest" },
{ 0x23, "Organizer" },
{ 0x24, "RecurrenceId" },
{ 0x25, "Reminder" },
{ 0x26, "ResponseRequest" },
{ 0x27, "Recurrences" },
{ 0x28, "Recurrence" },
{ 0x29, "Recurrence_Type" },
{ 0x2A, "Recurrence_Until" },
{ 0x2B, "Recurrence_Occurrences" },
{ 0x2C, "Recurrence_Interval" },
{ 0x2D, "Recurrence_DayOfWeek" },
{ 0x2E, "Recurrence_DayOfMonth" },
{ 0x2F, "Recurrence_WeekOfMonth" },
{ 0x30, "Recurrence_MonthOfYear" },
{ 0x31, "StartTime" },
{ 0x32, "Sensitivity" },
{ 0x33, "TimeZone" },
{ 0x34, "GlobalObjId" },
{ 0x35, "ThreadTopic" },
{ 0x39, "InternetCPID" },
{ 0x3A, "Flag" },
{ 0x3B, "FlagStatus" },
{ 0x3C, "ContentClass" },
{ 0x3D, "FlagType" },
{ 0x3E, "CompleteTime" },
{ 0x3F, "DisallowNewTimeProposal" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp4[] = { /* ActiveSync 'Calendar:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "TimeZone" },
{ 0x06, "AllDAyEvent" },
{ 0x07, "Attendees" },
{ 0x08, "Attendee" },
{ 0x09, "Attendee_Email" },
{ 0x0A, "Attendee_Name" },
{ 0x0D, "BusyStatus" },
{ 0x0E, "Categories" },
{ 0x0F, "Category" },
{ 0x11, "DTStamp" },
{ 0x12, "EndTime" },
{ 0x13, "Exception" },
{ 0x14, "Exceptions" },
{ 0x15, "Exception_Deleted" },
{ 0x16, "Exception_StartTime" },
{ 0x17, "Location" },
{ 0x18, "MeetingStatus" },
{ 0x19, "Organizer_Email" },
{ 0x1A, "Organizer_Name" },
{ 0x1B, "Recurrence" },
{ 0x1C, "Recurrence_Type" },
{ 0x1D, "Recurrence_Until" },
{ 0x1E, "Recurrence_Occurrences" },
{ 0x1F, "Recurrence_Interval" },
{ 0x20, "Recurrence_DayOfWeek" },
{ 0x21, "Recurrence_DayOfMonth" },
{ 0x22, "Recurrence_WeekOfMonth" },
{ 0x23, "Recurrence_MonthOfYear" },
{ 0x24, "Reminder" },
{ 0x25, "Sensitivity" },
{ 0x26, "Subject" },
{ 0x27, "StartTime" },
{ 0x28, "UID" },
{ 0x29, "Attendee_Status" },
{ 0x2A, "Attendee_Type" },
{ 0x33, "DisallowNewTimeProposal" },
{ 0x34, "ResponseRequested" },
{ 0x35, "AppointmentReplyTime" },
{ 0x36, "ResponseType" },
{ 0x37, "CalendarType" },
{ 0x38, "IsLeapMonth" },
{ 0x39, "FirstDayOfWeek" },
{ 0x3A, "OnlineMeetingConfLink" },
{ 0x3B, "OnlineMeetingExternalLink" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp5[] = { /* ActiveSync 'Move:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "MoveItems" },
{ 0x06, "Move" },
{ 0x07, "SrcMsgId" },
{ 0x08, "SrcFldId" },
{ 0x09, "DstFldId" },
{ 0x0A, "Response" },
{ 0x0B, "Status" },
{ 0x0C, "DstMsgId" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp6[] = { /* ActiveSync 'GetItemEstimate:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "GetItemEstimate" },
{ 0x06, "Version" },
{ 0x07, "Collections" },
{ 0x08, "Collection" },
{ 0x09, "Class" },
{ 0x0A, "CollectionId" },
{ 0x0B, "DateTime" },
{ 0x0C, "Estimate" },
{ 0x0D, "Response" },
{ 0x0E, "Status" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp7[] = { /* ActiveSync 'FolderHierarchy:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x07, "DisplayName" },
{ 0x08, "ServerId" },
{ 0x09, "ParentId" },
{ 0x0A, "Type" },
{ 0x0C, "Status" },
{ 0x0E, "Changes" },
{ 0x0F, "Add" },
{ 0x10, "Delete" },
{ 0x11, "Update" },
{ 0x12, "SyncKey" },
{ 0x13, "FolderCreate" },
{ 0x14, "FolderDelete" },
{ 0x15, "FolderUpdate" },
{ 0x16, "FolderSync" },
{ 0x17, "Count" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp8[] = { /* ActiveSync 'MeetingResponse:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "CalendarId" },
{ 0x06, "CollectionId" },
{ 0x07, "MeetingResponse" },
{ 0x08, "RequestId" },
{ 0x09, "Request" },
{ 0x0A, "Result" },
{ 0x0B, "Status" },
{ 0x0C, "UserResponse" },
{ 0x0E, "InstanceId" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp9[] = { /* ActiveSync 'Tasks:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x08, "Categories" },
{ 0x09, "Category" },
{ 0x0A, "Complete" },
{ 0x0B, "DateCompleted" },
{ 0x0D, "DueDate" },
{ 0x0E, "Importance" },
{ 0x0F, "Recurrence" },
{ 0x10, "Recurrence_Type" },
{ 0x11, "Recurrence_Start" },
{ 0x12, "Recurrence_Until" },
{ 0x13, "Recurrence_Occurrences" },
{ 0x14, "Recurrence_Interval" },
{ 0x15, "Recurrence_DayOfMonth" },
{ 0x16, "Recurrence_DayOfWeek" },
{ 0x17, "Recurrence_WeekOfMonth" },
{ 0x18, "Recurrence_MonthOfYear" },
{ 0x19, "Recurrence_Regenerate" },
{ 0x1A, "Recurrence_DeadOccur" },
{ 0x1B, "ReminderSet" },
{ 0x1C, "ReminderTime" },
{ 0x1D, "Sensitivity" },
{ 0x1E, "StartDate" },
{ 0x1F, "UTCStartDate" },
{ 0x20, "Subject" },
{ 0x22, "OrdinalDate" },
{ 0x23, "SubOrdinalDate" },
{ 0x24, "CalendarType" },
{ 0x25, "IsLeapMonth" },
{ 0x26, "FirstDayOfWeek" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp10[] = { /* ActiveSync 'ResolveRecipients:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "ResolveRecipients" },
{ 0x06, "Response" },
{ 0x07, "Status" },
{ 0x08, "Type" },
{ 0x09, "Recipient" },
{ 0x0A, "DisplayName" },
{ 0x0B, "EmailAddress" },
{ 0x0C, "Certificates" },
{ 0x0D, "Certificate" },
{ 0x0E, "MiniCertificate" },
{ 0x0F, "Options" },
{ 0x10, "To" },
{ 0x11, "CertificateRetrieval" },
{ 0x12, "RecipientCount" },
{ 0x13, "MaxCertificates" },
{ 0x14, "MaxAmbiguousRecipients" },
{ 0x15, "CertificateCount" },
{ 0x16, "Availability" },
{ 0x17, "StartTime" },
{ 0x18, "EndTime" },
{ 0x19, "MergedFreeBusy" },
{ 0x1A, "Picture" },
{ 0x1B, "MaxSize" },
{ 0x1C, "Data" },
{ 0x1D, "MaxPictures" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp11[] = { /* ActiveSync 'ValidateCert:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "ValidateCert" },
{ 0x06, "Certificates" },
{ 0x07, "Certificate" },
{ 0x08, "CertificateChain" },
{ 0x09, "CheckCRL" },
{ 0x0A, "Status" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp12[] = { /* ActiveSync 'Contacts2:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "CustomerId" },
{ 0x06, "GovernmentId" },
{ 0x07, "IMAddress" },
{ 0x08, "IMAddress2" },
{ 0x09, "IMAddress3" },
{ 0x0A, "ManagerName" },
{ 0x0B, "CompanyMainPhone" },
{ 0x0C, "AccountName" },
{ 0x0D, "NickName" },
{ 0x0E, "MMS" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp13[] = { /* ActiveSync 'Ping:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Ping" },
{ 0x06, "AutdState" },
{ 0x07, "Status" },
{ 0x08, "HeartbeatInterval" },
{ 0x09, "Folders" },
{ 0x0A, "Folder" },
{ 0x0B, "Id" },
{ 0x0C, "Class" },
{ 0x0D, "MaxFolders" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp14[] = { /* ActiveSync 'Provision:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Provision" },
{ 0x06, "Policies" },
{ 0x07, "Policy" },
{ 0x08, "PolicyType" },
{ 0x09, "PolicyKey" },
{ 0x0A, "Data" },
{ 0x0B, "Status" },
{ 0x0C, "RemoteWipe" },
{ 0x0D, "EASProvisionDoc" },
{ 0x0E, "DevicePasswordEnabled" },
{ 0x0F, "AlphanumericDevicePasswordRequired" },
{ 0x10, "DeviceEncryptionEnabled" },
{ 0x11, "PasswordRecoveryEnabled" },
{ 0x13, "AttachmentsEnabled" },
{ 0x14, "MinDevicePasswordLength" },
{ 0x15, "MaxInactivityTimeDeviceLock" },
{ 0x16, "MaxDevicePasswordFailedAttempts" },
{ 0x17, "MaxAttachmentSize" },
{ 0x18, "AllowSimpleDevicePassword" },
{ 0x19, "DevicePasswordExpiration" },
{ 0x1A, "DevicePasswordHistory" },
{ 0x1B, "AllowStorageCard" },
{ 0x1C, "AllowCamera" },
{ 0x1D, "RequireDeviceEncryption" },
{ 0x1E, "AllowUnsignedApplications" },
{ 0x1F, "AllowUnsignedInstallationPackages" },
{ 0x20, "MinDevicePasswordComplexCharacters" },
{ 0x21, "AllowWiFi" },
{ 0x22, "AllowTextMessaging" },
{ 0x23, "AllowPOPIMAPEmail" },
{ 0x24, "AllowBluetooth" },
{ 0x25, "AllowIrDA" },
{ 0x26, "RequireManualSyncWhenRoaming" },
{ 0x27, "AllowDesktopSync" },
{ 0x28, "MaxCalendarAgeFilter" },
{ 0x29, "AllowHTMLEmail" },
{ 0x2A, "MaxEmailAgeFilter" },
{ 0x2B, "MaxEmailBodyTruncationSize" },
{ 0x2C, "MaxEmailHTMLBodyTruncationSize" },
{ 0x2D, "RequireSignedSMIMEMessages" },
{ 0x2E, "RequireEncryptedSMIMEMessages" },
{ 0x2F, "RequireSignedSMIMEAlgorithm" },
{ 0x30, "RequireEncryptionSMIMEAlgorithm" },
{ 0x31, "AllowSMIMEEncryptionAlgorithmNegotiation" },
{ 0x32, "AllowSMIMESoftCerts" },
{ 0x33, "AllowBrowser" },
{ 0x34, "AllowConsumerEmail" },
{ 0x35, "AllowRemoteDesktop" },
{ 0x36, "AllowInternetSharing" },
{ 0x37, "UnapprovedInROMApplicationList" },
{ 0x38, "ApplicationName" },
{ 0x39, "ApprovedApplicationList" },
{ 0x3A, "Hash" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp15[] = { /* ActiveSync 'Search:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Search" },
{ 0x07, "Store" },
{ 0x08, "Name" },
{ 0x09, "Query" },
{ 0x0A, "Options" },
{ 0x0B, "Range" },
{ 0x0C, "Status" },
{ 0x0D, "Response" },
{ 0x0E, "Result" },
{ 0x0F, "Properties" },
{ 0x10, "Total" },
{ 0x11, "EqualTo" },
{ 0x12, "Value" },
{ 0x13, "And" },
{ 0x14, "Or" },
{ 0x15, "FreeText" },
{ 0x17, "DeepTraversal" },
{ 0x18, "LongId" },
{ 0x19, "RebuildResults" },
{ 0x1A, "LessThan" },
{ 0x1B, "GreaterThan" },
{ 0x1E, "UserName" },
{ 0x1F, "Password" },
{ 0x20, "ConversationId" },
{ 0x21, "Picture" },
{ 0x22, "MaxSize" },
{ 0x23, "MaxPictures" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp16[] = { /* ActiveSync 'Gal:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "DisplayName" },
{ 0x06, "Phone" },
{ 0x07, "Office" },
{ 0x08, "Title" },
{ 0x09, "Company" },
{ 0x0A, "Alias" },
{ 0x0B, "FirstName" },
{ 0x0C, "LastName" },
{ 0x0D, "HomePhone" },
{ 0x0E, "MobilePhone" },
{ 0x0F, "EmailAddress" },
{ 0x10, "Picture" },
{ 0x11, "Status" },
{ 0x12, "Data" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp17[] = { /* ActiveSync 'AirSyncBase:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "BodyPreference" },
{ 0x06, "Type" },
{ 0x07, "TruncationSize" },
{ 0x08, "AllOrNone" },
{ 0x0A, "Body" },
{ 0x0B, "Data" },
{ 0x0C, "EstimatedDataSize" },
{ 0x0D, "Truncated" },
{ 0x0E, "Attachments" },
{ 0x0F, "Attachment" },
{ 0x10, "DisplayName" },
{ 0x11, "FileReference" },
{ 0x12, "Method" },
{ 0x13, "ContentId" },
{ 0x14, "ContentLocation" },
{ 0x15, "IsInline" },
{ 0x16, "NativeBodyType" },
{ 0x17, "ContentType" },
{ 0x18, "Preview" },
{ 0x19, "BodyPartReference" },
{ 0x1A, "BodyPart" },
{ 0x1B, "Status" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp18[] = { /* ActiveSync 'Settings:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Settings" },
{ 0x06, "Status" },
{ 0x07, "Get" },
{ 0x08, "Set" },
{ 0x09, "Oof" },
{ 0x0A, "OofState" },
{ 0x0B, "StartTime" },
{ 0x0C, "EndTime" },
{ 0x0D, "OofMessage" },
{ 0x0E, "AppliesToInteral" },
{ 0x0F, "AppliesToExternalKnown" },
{ 0x10, "AppliesToExternalUnknown" },
{ 0x11, "Enabled" },
{ 0x12, "ReplyMessage" },
{ 0x13, "BodyType" },
{ 0x14, "DevicePassword" },
{ 0x15, "Password" },
{ 0x16, "DeviceInformation" },
{ 0x17, "Model" },
{ 0x18, "IMEI" },
{ 0x19, "FriendlyName" },
{ 0x1A, "OS" },
{ 0x1B, "OSLanguage" },
{ 0x1C, "PhoneNumber" },
{ 0x1D, "UserInformation" },
{ 0x1E, "EmailAddresses" },
{ 0x1F, "SmtpAddress" },
{ 0x20, "UserAgent" },
{ 0x21, "EnableOutboundSMS" },
{ 0x22, "MobileOperator" },
{ 0x23, "PrimarySmtpAddress" },
{ 0x24, "Accounts" },
{ 0x25, "Account" },
{ 0x26, "AccountId" },
{ 0x27, "AccountName" },
{ 0x28, "UserDisplayName" },
{ 0x29, "SendDisabled" },
{ 0x2B, "RightsManagementInformation" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp19[] = { /* ActiveSync 'DocumentLibrary:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "LinkId" },
{ 0x06, "DisplayName" },
{ 0x07, "IsFolder" },
{ 0x09, "CreationDate" },
{ 0x0A, "LastModifiedDate" },
{ 0x0B, "ContentLength" },
{ 0x0C, "ContentType" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp20[] = { /* ActiveSync 'ItemOperations:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "ItemOperations" },
{ 0x06, "Fetch" },
{ 0x07, "Store" },
{ 0x08, "Options" },
{ 0x09, "Range" },
{ 0x0A, "Total" },
{ 0x0B, "Properties" },
{ 0x0C, "Data" },
{ 0x0D, "Status" },
{ 0x0E, "Response" },
{ 0x0F, "Version" },
{ 0x10, "Schema" },
{ 0x11, "Part" },
{ 0x12, "EmptyFolderContents" },
{ 0x13, "DeleteSubFolders" },
{ 0x14, "UserName" },
{ 0x15, "Password" },
{ 0x16, "Move" },
{ 0x17, "DstFldId" },
{ 0x18, "ConversationId" },
{ 0x19, "MoveAlways" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp21[] = { /* ActiveSync 'ComposeMail:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "SendMail" },
{ 0x06, "SmartForward" },
{ 0x07, "SmartReply" },
{ 0x08, "SaveInSentItems" },
{ 0x09, "ReplaceMime" },
{ 0x0B, "Source" },
{ 0x0C, "FolderId" },
{ 0x0D, "ItemId" },
{ 0x0E, "LongId" },
{ 0x0F, "InstanceId" },
{ 0x10, "MIME" },
{ 0x11, "ClientId" },
{ 0x12, "Status" },
{ 0x13, "AccountId" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp22[] = { /* ActiveSync 'Email2:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "UmCallerID" },
{ 0x06, "UmUserNotes" },
{ 0x07, "UmAttDuration" },
{ 0x08, "UmAttOrder" },
{ 0x09, "ConversationId" },
{ 0x0A, "ConversationIndex" },
{ 0x0B, "LastVerbExecuted" },
{ 0x0C, "LastVerbExecutionTime" },
{ 0x0D, "ReceivedAsBcc" },
{ 0x0E, "Sender" },
{ 0x0F, "CalendarType" },
{ 0x10, "IsLeapMonth" },
{ 0x11, "AccountId" },
{ 0x12, "FirstDayOfWeek" },
{ 0x13, "MeetingMessageType" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp23[] = { /* ActiveSync 'Notes:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Subject" },
{ 0x06, "MessageClass" },
{ 0x07, "LastModifiedDate" },
{ 0x08, "Categories" },
{ 0x09, "Category" },
{ 0x00, NULL }
};
static const value_string wbxml_mssyncc10_tags_cp24[] = { /* ActiveSync 'RightsManagement:' Page */
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "RightsManagementSupport" },
{ 0x06, "RightsManagementTemplates" },
{ 0x07, "RightsManagementTemplate" },
{ 0x08, "RightsManagementLicense" },
{ 0x09, "EditAllowed" },
{ 0x0A, "ReplyAllowed" },
{ 0x0B, "ReplyAllAllowed" },
{ 0x0C, "ForwardAllowed" },
{ 0x0D, "ModifyRecipientsAllowed" },
{ 0x0E, "ExtractAllowed" },
{ 0x0F, "PrintAllowed" },
{ 0x10, "ExportAllowed" },
{ 0x11, "ProgrammaticAccessAllowed" },
{ 0x12, "RMOwner" },
{ 0x13, "ContentExpiryDate" },
{ 0x14, "TemplateId" },
{ 0x15, "TemplateName" },
{ 0x16, "TemplateDescription" },
{ 0x17, "ContentOwner" },
{ 0x18, "RemoveRightsManagementDistribution" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
/***** Attribute Value tokens *****/
/***** Token code page aggregation *****/
static const value_valuestring wbxml_mssyncc10_tags[] = {
{ 0x00, wbxml_mssyncc10_tags_cp0 }, /* AirSync: */
{ 0x01, wbxml_mssyncc10_tags_cp1 }, /* Contacts: */
{ 0x02, wbxml_mssyncc10_tags_cp2 }, /* Email: */
{ 0x04, wbxml_mssyncc10_tags_cp4 }, /* Calendar: */
{ 0x05, wbxml_mssyncc10_tags_cp5 }, /* Move: */
{ 0x06, wbxml_mssyncc10_tags_cp6 }, /* GetItemEstimate: */
{ 0x07, wbxml_mssyncc10_tags_cp7 }, /* FolderHierarchy: */
{ 0x08, wbxml_mssyncc10_tags_cp8 }, /* MeetingResponse: */
{ 0x09, wbxml_mssyncc10_tags_cp9 }, /* Tasks: */
{ 0x0A, wbxml_mssyncc10_tags_cp10 }, /* ResolveRecipients: */
{ 0x0B, wbxml_mssyncc10_tags_cp11 }, /* ValidateCert: */
{ 0x0C, wbxml_mssyncc10_tags_cp12 }, /* Contacts2: */
{ 0x0D, wbxml_mssyncc10_tags_cp13 }, /* Ping: */
{ 0x0E, wbxml_mssyncc10_tags_cp14 }, /* Provision: */
{ 0x0F, wbxml_mssyncc10_tags_cp15 }, /* Search: */
{ 0x10, wbxml_mssyncc10_tags_cp16 }, /* Gal: */
{ 0x11, wbxml_mssyncc10_tags_cp17 }, /* AirSyncBase: */
{ 0x12, wbxml_mssyncc10_tags_cp18 }, /* Settings: */
{ 0x13, wbxml_mssyncc10_tags_cp19 }, /* DocumentLibrary: */
{ 0x14, wbxml_mssyncc10_tags_cp20 }, /* ItemOperations: */
{ 0x15, wbxml_mssyncc10_tags_cp21 }, /* ComposeMail: */
{ 0x16, wbxml_mssyncc10_tags_cp22 }, /* Email2: */
{ 0x17, wbxml_mssyncc10_tags_cp23 }, /* Notes: */
{ 0x18, wbxml_mssyncc10_tags_cp24 }, /* RightsManagement: */
{ 0x00, NULL }
};
static const wbxml_decoding decode_mssync_10 = {
"Microsoft ActiveSync",
"ActiveSync",
{ NULL, NULL, NULL },
default_opaque_binary_tag,
default_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
NULL,
wbxml_mssyncc10_tags,
NULL,
NULL
};
/* CHANNEL 1.0
*
* WTA Channel
***************************************/
/***** Global extension tokens *****/
/***** Tag tokens *****/
static const value_string wbxml_channelc10_tags_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "channel" },
{ 0x06, "title" },
{ 0x07, "abstract" },
{ 0x08, "resource" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
static const value_string wbxml_channelc10_attrStart_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "maxspace=" },
{ 0x06, "base=" },
{ 0x07, "href=" },
{ 0x08, "href='http://'" },
{ 0x09, "href='https://'" },
{ 0x0A, "lastmod=" },
{ 0x0B, "etag=" },
{ 0x0C, "md5=" },
{ 0x0D, "success=" },
{ 0x0E, "success='http://'" },
{ 0x0F, "success='https://'" },
{ 0x10, "failure=" },
{ 0x11, "failure='http://'" },
{ 0x12, "failure='https://'" },
{ 0x13, "EventId=" },
{ 0x00, NULL }
};
/***** Attribute Value tokens *****/
/***** Token code page aggregation *****/
static const value_valuestring wbxml_channelc10_tags[] = {
{ 0, wbxml_channelc10_tags_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_channelc10_attrStart[] = {
{ 0, wbxml_channelc10_attrStart_cp0 },
{ 0, NULL }
};
static const wbxml_decoding decode_channelc_10 = {
"Wireless Telephony Application (WTA) Channel 1.0",
"CHANNEL 1.0",
{ NULL, NULL, NULL },
default_opaque_binary_tag,
default_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
NULL,
wbxml_channelc10_tags,
wbxml_channelc10_attrStart,
NULL
};
/* application/x-wap-prov.browser-settings
* application/x-wap-prov.browser-bookmarks
*
* Nokia OTA Provisioning document format
***************************************/
/***** Global extension tokens *****/
/***** Tag tokens *****/
static const value_string wbxml_nokiaprovc70_tags_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "CHARACTERISTIC-LIST" },
{ 0x06, "CHARACTERISTIC" },
{ 0x07, "PARM" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
static const value_string wbxml_nokiaprovc70_attrStart_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x06, "TYPE='ADDRESS'" },
{ 0x07, "TYPE='URL'" },
{ 0x08, "TYPE='NAME'" },
{ 0x10, "NAME=" },
{ 0x11, "VALUE=" },
{ 0x12, "NAME='BEARER'" },
{ 0x13, "NAME='PROXY'" },
{ 0x14, "NAME='PORT'" },
{ 0x15, "NAME='NAME'" },
{ 0x16, "NAME='PROXY_TYPE'" },
{ 0x17, "NAME='URL'" },
{ 0x18, "NAME='PROXY_AUTHNAME'" },
{ 0x19, "NAME='PROXY_AUTHSECRET'" },
{ 0x1A, "NAME='SMS_SMSC_ADDRESS'" },
{ 0x1B, "NAME='USSD_SERVICE_CODE'" },
{ 0x1C, "NAME='GPRS_ACCESSPOINTNAME'" },
{ 0x1D, "NAME='PPP_LOGINTYPE'" },
{ 0x1E, "NAME='PROXY_LOGINTYPE'" },
{ 0x21, "NAME='CSD_DIALSTRING'" },
{ 0x22, "NAME='PPP_AUTHTYPE'" },
{ 0x23, "NAME='PPP_AUTHNAME'" },
{ 0x24, "NAME='PPP_AUTHSECRET'" },
{ 0x28, "NAME='CSD_CALLTYPE'" },
{ 0x29, "NAME='CSD_CALLSPEED'" },
{ 0x45, "VALUE='GSM/CSD'" },
{ 0x46, "VALUE='GSM/SMS'" },
{ 0x47, "VALUE='GSM/USSD'" },
{ 0x48, "VALUE='IS-136/CSD'" },
{ 0x49, "VALUE='GPRS'" },
{ 0x60, "VALUE='9200'" },
{ 0x61, "VALUE='9201'" },
{ 0x62, "VALUE='9202'" },
{ 0x63, "VALUE='9203'" },
{ 0x64, "VALUE='AUTOMATIC'" },
{ 0x65, "VALUE='MANUAL'" },
{ 0x6A, "VALUE='AUTO'" },
{ 0x6B, "VALUE='9600'" },
{ 0x6C, "VALUE='14400'" },
{ 0x6D, "VALUE='19200'" },
{ 0x6E, "VALUE='28800'" },
{ 0x6F, "VALUE='38400'" },
{ 0x70, "VALUE='PAP'" },
{ 0x71, "VALUE='CHAP'" },
{ 0x72, "VALUE='ANALOGUE'" },
{ 0x73, "VALUE='ISDN'" },
{ 0x74, "VALUE='43200'" },
{ 0x75, "VALUE='57600'" },
{ 0x76, "VALUE='MSISDN_NO'" },
{ 0x77, "VALUE='IPV4'" },
{ 0x78, "VALUE='MS_CHAP'" },
{ 0x7C, "TYPE='MMSURL'" },
{ 0x7D, "TYPE='ID'" },
{ 0x7E, "NAME='ISP_NAME'" },
{ 0x7F, "TYPE='BOOKMARK'" },
{ 0x00, NULL }
};
/***** Attribute Value tokens *****/
/***** Token code page aggregation *****/
static const value_valuestring wbxml_nokiaprovc70_tags[] = {
{ 0, wbxml_nokiaprovc70_tags_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_nokiaprovc70_attrStart[] = {
{ 0, wbxml_nokiaprovc70_attrStart_cp0 },
{ 0, NULL }
};
static const wbxml_decoding decode_nokiaprovc_70 = {
"Nokia Client Provisioning 7.0",
"Nokia Client Provisioning 7.0",
{ NULL, NULL, NULL },
default_opaque_binary_tag,
default_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
NULL,
wbxml_nokiaprovc70_tags,
wbxml_nokiaprovc70_attrStart,
NULL
};
/* UAProf [WAP-248]
*
* User-Agent Profile (used in profile-diff WSP header)
***************************************/
/***** Global extension tokens *****/
/***** Tag tokens *****/
/* CodePage 0 RDF */
static const value_string wbxml_uaprof_tags_cp0[] = {
{0x05, "rdf:RDF"},
{0x06, "rdf:Description"},
{0x07, "rdf:Alt"},
{0x08, "rdf:Bag"},
{0x09, "rdf:Seq"},
{0x0A, "rdf:li"},
{0x0B, "rdf:type"},
{0x0C, "rdf:value"},
{0x0D, "rdf:subject"},
{0x0E, "rdf:predicate"},
{0x0F, "rdf:object"},
{ 0x00, NULL }
};
/* CodePage 1 Core Vocabulary */
static const value_string wbxml_uaprof_tags_cp1[] = {
{0x06, "rdf:Description"},
{0x07, "rdf:Alt"},
{0x08, "rdf:Bag"},
{0x09, "rdf:Seq"},
{0x0A, "rdf:li"},
{0x0B, "rdf:type"},
{0x0C, "prf:component"},
{0x0D, "prf:defaults"},
{0x0E, "prf:BitsPerPixel"},
{0x0F, "prf:ColorCapable"},
{0x10, "prf:CPU"},
{0x11, "prf:ImageCapable"},
{0x12, "prf:InputCharSet"},
{0x13, "prf:Keyboard"},
{0x15, "prf:Model"},
{0x16, "prf:OutputCharSet"},
{0x17, "prf:PointingResolution"},
{0x18, "prf:ScreenSize"},
{0x19, "prf:ScreenSizeChar"},
{0x1A, "prf:NumberOfSoftKeys"},
{0x1B, "prf:SoundOutputCapable"},
{0x1C, "prf:TextInputCapable"},
{0x1D, "prf:Vendor"},
{0x1E, "prf:VoiceInputCapable"},
{0x1F, "prf:AcceptDownloadableSoftware"},
{0x20, "prf:AudioInputEncoder"},
{0x21, "prf:DownloadableSoftwareSupport"},
{0x22, "prf:JavaEnabled"},
{0x23, "prf:JVMVersion"},
{0x24, "prf:MexeClassmark"},
{0x25, "prf:MexeSpec"},
{0x26, "prf:OSName"},
{0x27, "prf:OSVendor"},
{0x28, "prf:OSVersion"},
{0x29, "prf:RecipientAppAgent"},
{0x2A, "prf:SoftwareNumber"},
{0x2B, "prf:VideoInputEncoder"},
{0x2C, "prf:CurrentBearerService"},
{0x2D, "prf:SecuritySupport"},
{0x2E, "prf:SupportedBearers"},
{0x2F, "prf:WapDeviceClass"},
{0x30, "prf:WapPushMsgPriority"}, /* Deprecated */
{0x31, "prf:WapPushMsgSize"}, /* Deprecated */
{0x32, "prf:WapVersion"},
{0x33, "prf:WmlDeckSize"},
{0x34, "prf:WmlScriptLibraries"},
{0x35, "prf:WmlScriptVersion"},
{0x36, "prf:WmlVersion"},
{0x37, "prf:WtaiLibraries"},
{0x38, "prf:WtaVersion"},
{0x39, "prf:PixelAspectRatio"},
{0x3A, "prf:StandardFontProportional"},
{0x3B, "prf:WapSupportedApplications"}, /* Deprecated */
{0x3C, "prf:BluetoothProfile"},
{0x3D, "prf:MexeClassmarks"},
{0x3E, "prf:MexeSecureDomains"},
{ 0x00, NULL }
};
/* CodePage 4 Core Vocabulary (continued) */
static const value_string wbxml_uaprof_tags_cp4[] = {
{0x10, "prf:SupportedBluetoothVersion"},
{0x11, "prf:SupportedPictogramSet"},
{0x12, "prf:CcppAccept"},
{0x13, "prf:CcppAccept-Charset"},
{0x14, "prf:CcppAccept-Encoding"},
{0x15, "prf:CcppAccept-Language"},
{ 0x00, NULL }
};
/* CodePage 2 BrowserUA */
static const value_string wbxml_uaprof_tags_cp2[] = {
{0x05, "rdf:Description"},
{0x06, "rdf:Alt"},
{0x07, "rdf:Bag"},
{0x08, "rdf:Seq"},
{0x09, "rdf:li"},
{0x0A, "rdf:type"},
{0x0B, "prf:component"},
{0x0C, "prf:defaults"},
{0x0D, "prf:BrowserName"},
{0x0E, "prf:BrowserVersion"},
{0x0F, "prf:CcppAccept"}, /* Deprecated */
{0x10, "prf:CcppAccept-Charset"}, /* Deprecated */
{0x11, "prf:CcppAccept-Encoding"}, /* Deprecated */
{0x12, "prf:CcppAccept-Language"}, /* Deprecated */
{0x13, "prf:DownloadableBrowserApps"},
{0x14, "prf:FramesCapable"},
{0x15, "prf:HtmlVersion"},
{0x16, "prf:JavaAppletEnabled"},
{0x17, "prf:JavaScriptEnabled"},
{0x18, "prf:JavaScriptVersion"},
{0x19, "prf:PreferenceForFrames"},
{0x1A, "prf:TablesCapable"},
{0x1B, "Prf:XhtmlVersion"},
{0x1C, "prf:XhtmlModules"},
{ 0x00, NULL }
};
/* CodePage 3 PushCharacteristics */
static const value_string wbxml_uaprof_tags_cp3[] = {
{0x05, "rdf:Description"},
{0x06, "rdf:Alt"},
{0x07, "rdf:Bag"},
{0x08, "rdf:Seq"},
{0x09, "rdf:li"},
{0x0A, "rdf:type"},
{0x0B, "prf:component"},
{0x0C, "prf:defaults"},
{0x0D, "prf:Push-Accept"},
{0x0E, "prf:Push-Accept-Charset"},
{0x0F, "prf:Push-Accept-Encoding"},
{0x10, "prf:Push-Accept-Language"},
{0x11, "prf:Push-Accept-AppID"},
{0x12, "prf:Push-MsgSize"},
{0x13, "prf:Push-MaxPushReq"},
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
/* CodePage 0 RDF */
static const value_string wbxml_uaprof_attrStart_cp0[] = {
{0x05, "ID"},
{0x06, "rdf:about"},
{0x07, "rdf:aboutEach"},
{0x08, "rdf:aboutEachPrefix"},
{0x09, "rdf:bagID"},
{0x0A, "rdf:type"},
{0x0B, "rdf:resource"},
{0x0C, "rdf:parseType='Literal'"},
{0x0D, "rdf:parseType='Resource'"},
{0x0E, "xml:lang"},
{0x0F, "xmlns:prf"},
{0x10, "xmlns:rdf"},
{ 0x00, NULL }
};
/* CodePage 1 Core Vocabulary */
static const value_string wbxml_uaprof_attrStart_cp1[] = {
{0x05, "rdf:resource"},
{0x06, "rdf:resource='http://www.wapforum.org/profiles/UAPROF/"
"ccppschema-20010430#HardwarePlatform'"},
{0x07, "rdf:resource='http://www.wapforum.org/profiles/UAPROF/"
"ccppschema-20010430#SoftwarePlatform'"},
{0x08, "rdf:resource='http://www.wapforum.org/profiles/UAPROF/"
"ccppschema-20010430#NetworkCharacteristics'"},
{0x09, "rdf:resource='http://www.wapforum.org/profiles/UAPROF/"
"ccppschema-20010430#WapCharacteristics'"},
{0x0A, "rdf:resource='http://www.wapforum.org/profiles/UAPROF/"
"ccppschema-20010430#BrowserUA'"},
{0x0B, "rdf:resource='http://www.wapforum.org/profiles/UAPROF/"
"ccppschema-20010430#PushCharacteristics'"},
{0x10, "prf:BitsPerPixel"},
{0x11, "prf:ColorCapable='Yes'"},
{0x12, "prf:ColorCapable='No'"},
{0x13, "prf:CPU"},
{0x14, "prf:ImageCapable='Yes'"},
{0x15, "prf:ImageCapable='No'"},
{0x16, "prf:InputCharSet"},
{0x17, "prf:Keyboard"},
{0x19, "prf:Model"},
{0x1A, "prf:OutputCharSet"},
{0x1B, "prf:PointingResolution"},
{0x1C, "prf:ScreenSize"},
{0x1D, "prf:ScreenSizeChar"},
{0x1E, "prf:NumberOfSoftKeys='Yes'"},
{0x20, "prf:SoundOutputCapable='Yes'"},
{0x21, "prf:SoundOutputCapable='No'"},
{0x22, "prf:TextInputCapable='Yes'"},
{0x23, "prf:TextInputCapable='No'"},
{0x24, "prf:Vendor"},
{0x25, "prf:VoiceInputCapable='Yes'"},
{0x26, "prf:VoiceInputCapable='No'"},
{0x27, "prf:PixelAspectRatio"},
{0x28, "prf:StandardFontProportional='Yes'"},
{0x29, "prf:StandardFontProportional='No'"},
{0x30, "prf:AcceptDownloadableSoftware='Yes'"},
{0x31, "prf:AcceptDownloadableSoftware='No'"},
{0x32, "prf:AudioInputEncoder"},
{0x33, "prf:DownloadableSoftwareSupport"},
{0x35, "prf:JavaEnabled='Yes'"},
{0x36, "prf:JavaEnabled='No'"},
{0x37, "prf:JVMVersion"},
{0x38, "prf:MexeClassmark"},
{0x39, "prf:MexeSpec"},
{0x3A, "prf:OSName"},
{0x3B, "prf:OSVendor"},
{0x3C, "prf:OSVersion"},
{0x3D, "prf:RecipientAppAgent"},
{0x3E, "prf:SoftwareNumber"},
{0x21, "prf:SoundOutputCapable='No'"},
{0x22, "prf:TextInputCapable='Yes'"},
{0x23, "prf:TextInputCapable='No'"},
{0x24, "prf:Vendor"},
{0x25, "prf:VoiceInputCapable='Yes'"},
{0x26, "prf:VoiceInputCapable='No'"},
{0x27, "prf:PixelAspectRatio"},
{0x28, "prf:StandardFontProportional='Yes'"},
{0x29, "prf:StandardFontProportional='No'"},
{0x30, "prf:AcceptDownloadableSoftware='Yes'"},
{0x31, "prf:AcceptDownloadableSoftware='No'"},
{0x32, "prf:AudioInputEncoder"},
{0x33, "prf:DownloadableSoftwareSupport"},
{0x35, "prf:JavaEnabled='Yes'"},
{0x36, "prf:JavaEnabled='No'"},
{0x37, "prf:JVMVersion"},
{0x38, "prf:MexeClassmark"},
{0x39, "prf:MexeSpec"},
{0x3A, "prf:OSName"},
{0x3B, "prf:OSVendor"},
{0x3C, "prf:OSVersion"},
{0x3D, "prf:RecipientAppAgent"},
{0x3E, "prf:SoftwareNumber"},
{0x3F, "prf:VideoInputEncoder"},
{0x50, "prf:CurrentBearerService"},
{0x51, "prf:SecuritySupport"},
{0x52, "prf:SupportedBearers"},
{0x60, "prf:WapDeviceClass"},
{0x61, "prf:WapPushMsgPriority"}, /* Deprecated */
{0x62, "prf:WapPushMsgSize"}, /* Deprecated */
{0x63, "prf:WapVersion"},
{0x64, "prf:WmlDeckSize"},
{0x65, "prf:WmlScriptLibraries"},
{0x66, "prf:WmlScriptVersion"},
{0x67, "prf:WmlVersion"},
{0x68, "prf:WtaiLibraries"},
{0x69, "prf:WtaVersion"},
{0x70, "prf:WapSupportedApplications"}, /* Deprecated */
{0x71, "prf:BluetoothProfile"},
{0x72, "prf:MexeClassmarks"},
{0x73, "prf:MexeSecureDomains='YES'"},
{0x74, "prf:MexeSecureDomains='NO'"},
{0x75, "prf:SupportedBluetoothVersion"},
{0x76, "prf:SupportedPictogramSet"},
{0x77, "prf:CcppAccept"},
{0x78, "prf:CcppAccept-Charset"},
{0x79, "prf:CcppAccept-Encoding"},
{0x7F, "prf:CcppAccept-Language"},
{ 0x00, NULL }
};
/* CodePage 2 BrowserUA */
static const value_string wbxml_uaprof_attrStart_cp2[] = {
{0x05, "prf:CcppAccept"}, /* Deprecated */
{0x06, "prf:CcppAccept-Charset"}, /* Deprecated */
{0x07, "prf:CcppAccept-Encoding"}, /* Deprecated */
{0x08, "prf:CcppAccept-Language"}, /* Deprecated */
{0x09, "prf:DownloadableBrowserApps"},
{0x0A, "prf:FramesCapable='Yes'"},
{0x0B, "prf:FramesCapable='No'"},
{0x0C, "prf:HtmlVersion='3.2'"},
{0x0D, "prf:HtmlVersion='4.0'"},
{0x0E, "prf:JavaAppletEnabled='Yes'"},
{0x0F, "prf:JavaAppletEnabled='No'"},
{0x10, "prf:JavaScriptEnabled='Yes'"},
{0x11, "prf:JavaScriptEnabled='No'"},
{0x12, "prf:JavaScriptVersion"},
{0x13, "prf:PreferenceForFrames='Yes'"},
{0x14, "prf:PreferenceForFrames='No'"},
{0x15, "prf:TablesCapable='Yes'"},
{0x16, "prf:TablesCapable='No'"},
{0x17, "prf:XhtmlVersion"},
{0x18, "prf:XhtmlModules"},
{0x19, "prf:BrowserName"},
{0x1A, "prf:BrowserVersion"},
{ 0x00, NULL }
};
/* CodePage 3 PushCharacteristics */
static const value_string wbxml_uaprof_attrStart_cp3[] = {
{0x05, "prf:Push-Accept"},
{0x06, "prf:Push-Accept-Charset"},
{0x07, "prf:Push-Accept-Encoding"},
{0x08, "prf:Push-Accept-Language"},
{0x09, "prf:Push-Accept-AppID"},
{0x0A, "prf:Push-MsgSize"},
{0x0B, "prf:Push-MaxPushReq"},
{ 0x00, NULL }
};
/***** Attribute Value tokens *****/
/* CodePage 0 RDF */
static const value_string wbxml_uaprof_attrValue_cp0[] = {
{0x85, "rdf:Statement"},
{0x86, "http://"},
{0x87, "http://www."},
{0x88, "https://"},
{0x89, "https://www."},
{0x8A, "www."},
{0x8B, ".com/"},
{0x8C, ".edu/"},
{0x8D, ".net/"},
{0x8E, ".org/"},
{ 0x00, NULL }
};
/* CodePage 1 CoreVocabularyAttrValue */
static const value_string wbxml_uaprof_attrValue_cp1[] = {
{0x85, "No"},
{0x86, "Yes"},
{ 0x00, NULL }
};
/* CodePage 2 BrowserUAAttrValue */
static const value_string wbxml_uaprof_attrValue_cp2[] = {
{0x85, "No"},
{0x86, "Yes"},
{ 0x00, NULL }
};
/***** Token code page aggregation *****/
static const value_valuestring wbxml_uaprof_tags[] = {
{ 0, wbxml_uaprof_tags_cp0 },
{ 1, wbxml_uaprof_tags_cp1 },
{ 2, wbxml_uaprof_tags_cp2 },
{ 3, wbxml_uaprof_tags_cp3 },
{ 4, wbxml_uaprof_tags_cp4 },
{ 0, NULL }
};
static const value_valuestring wbxml_uaprof_attrStart[] = {
{ 0, wbxml_uaprof_attrStart_cp0 },
{ 1, wbxml_uaprof_attrStart_cp1 },
{ 2, wbxml_uaprof_attrStart_cp2 },
{ 3, wbxml_uaprof_attrStart_cp3 },
{ 0, NULL }
};
static const value_valuestring wbxml_uaprof_attrValue[] = {
{ 0, wbxml_uaprof_attrValue_cp0 },
{ 1, wbxml_uaprof_attrValue_cp1 },
{ 2, wbxml_uaprof_attrValue_cp2 },
{ 0, NULL }
};
static const wbxml_decoding decode_uaprof_wap_248 = {
"User-Agent Profile (WAP-174, WAP-248)",
"UAProf (WAP-174, WAP-248)",
{ NULL, NULL, NULL },
default_opaque_binary_tag,
default_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
NULL,
wbxml_uaprof_tags,
wbxml_uaprof_attrStart,
wbxml_uaprof_attrValue
};
/* WV-CSP 1.0
*
* Wireless Village Client Server Protocol
***************************************/
/***** Global extension tokens *****/
/***** Tag tokens *****/
/* Common code page (0x00) */
static const value_string wbxml_wv_csp_10_tags_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Acceptance" },
{ 0x06, "AddList" },
{ 0x07, "AddNickList" },
{ 0x08, "Attribute" },
{ 0x09, "AttributeList" },
{ 0x0A, "ClientID" },
{ 0x0B, "Code" },
{ 0x0C, "ContactList" },
{ 0x0D, "ContentData" },
{ 0x0E, "ContentEncoding" },
{ 0x0F, "ContentSize" },
{ 0x10, "ContentType" },
{ 0x11, "DateTime" },
{ 0x12, "Description" },
{ 0x13, "DetailedResult" },
{ 0x14, "EntityList" },
{ 0x15, "Group" },
{ 0x16, "GroupID" },
{ 0x17, "GroupList" },
{ 0x18, "InUse" },
{ 0x19, "Logo" },
{ 0x1A, "MessageCount" },
{ 0x1B, "MessageID" },
{ 0x1C, "MessageURI" },
{ 0x1D, "MSISDN" },
{ 0x1E, "Name" },
{ 0x1F, "NickList" },
{ 0x20, "NickName" },
{ 0x21, "Poll" },
{ 0x22, "Presence" },
{ 0x23, "PresenceSubList" },
{ 0x24, "PresenceValue" },
{ 0x25, "Property" },
{ 0x26, "Qualifier" },
{ 0x27, "Recipient" },
{ 0x28, "RemoveList" },
{ 0x29, "RemoveNickList" },
{ 0x2A, "Result" },
{ 0x2B, "ScreenName" },
{ 0x2C, "Sender" },
{ 0x2D, "Session" },
{ 0x2E, "SessionDescriptor" },
{ 0x2F, "SessionID" },
{ 0x30, "SessionType" },
{ 0x31, "Status" },
{ 0x32, "Transaction" },
{ 0x33, "TransactionContent" },
{ 0x34, "TransactionDescriptor" },
{ 0x35, "TransactionID" },
{ 0x36, "TransactionMode" },
{ 0x37, "URL" },
{ 0x38, "URLList" },
{ 0x39, "User" },
{ 0x3A, "UserID" },
{ 0x3B, "UserList" },
{ 0x3C, "Validity" },
{ 0x3D, "Value" },
{ 0x3E, "WV-CSP-Message" },
{ 0x00, NULL }
};
/* Access code page (0x01) */
static const value_string wbxml_wv_csp_10_tags_cp1[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "AllFunctions" },
{ 0x06, "AllFunctionsRequest" },
{ 0x07, "CancelInvite-Request" },
{ 0x08, "CancelInviteUser-Request" },
{ 0x09, "Capability" },
{ 0x0A, "CapabilityList" },
{ 0x0B, "CapabilityRequest" },
{ 0x0C, "ClientCapability-Request" },
{ 0x0D, "ClientCapability-Response" },
{ 0x0E, "DigestBytes" },
{ 0x0F, "DigestSchema" },
{ 0x10, "Disconnect" },
{ 0x11, "Functions" },
{ 0x12, "GetSPInfo-Request" },
{ 0x13, "GetSPInfo-Response" },
{ 0x14, "InviteID" },
{ 0x15, "InviteNote" },
{ 0x16, "Invite-Request" },
{ 0x17, "Invite-Response" },
{ 0x18, "InviteType" },
{ 0x19, "InviteUser-Request" },
{ 0x1A, "InviteUser-Response" },
{ 0x1B, "KeepAlive-Request" },
{ 0x1C, "KeepAliveTime" },
{ 0x1D, "Login-Request" },
{ 0x1E, "Login-Response" },
{ 0x1F, "Logout-Request" },
{ 0x20, "Nonce" },
{ 0x21, "Password" },
{ 0x22, "Polling-Request" },
{ 0x23, "ResponseNote" },
{ 0x24, "SearchElement" },
{ 0x25, "SearchFindings" },
{ 0x26, "SearchID" },
{ 0x27, "SearchIndex" },
{ 0x28, "SearchLimit" },
{ 0x29, "SearchOnlineStatus" },
{ 0x2A, "SearchPairList" },
{ 0x2B, "Search-Request" },
{ 0x2C, "Search-Response" },
{ 0x2D, "SearchResult" },
{ 0x2E, "Service-Request" },
{ 0x2F, "Service-Response" },
{ 0x30, "SessionCookie" },
{ 0x31, "StopSearch-Request" },
{ 0x32, "TimeToLive" },
{ 0x00, NULL }
};
/* Service code page (0x02) */
static const value_string wbxml_wv_csp_10_tags_cp2[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "ADDGM" },
{ 0x06, "AttListFunc" },
{ 0x07, "BLENT" },
{ 0x08, "CAAUT" },
{ 0x09, "CAINV" },
{ 0x0A, "CALI" },
{ 0x0B, "CCLI" },
{ 0x0C, "ContListFunc" },
{ 0x0D, "CREAG" },
{ 0x0E, "DALI" },
{ 0x0F, "DCLI" },
{ 0x10, "DELGR" },
{ 0x11, "FundamentalFeat" },
{ 0x12, "FWMSG" },
{ 0x13, "GALS" },
{ 0x14, "GCLI" },
{ 0x15, "GETGM" },
{ 0x16, "GETGP" },
{ 0x17, "GETLM" },
{ 0x18, "GETM" },
{ 0x19, "GETPR" },
{ 0x1A, "GETSPI" },
{ 0x1B, "GETWL" },
{ 0x1C, "GLBLU" },
{ 0x1D, "GRCHN" },
{ 0x1E, "GroupAuthFunc" },
{ 0x1F, "GroupFeat" },
{ 0x20, "GroupMgmtFunc" },
{ 0x21, "GroupUseFunc" },
{ 0x22, "IMAuthFunc" },
{ 0x23, "IMFeat" },
{ 0x24, "IMReceiveFunc" },
{ 0x25, "IMSendFunc" },
{ 0x26, "INVIT" },
{ 0x27, "InviteFunc" },
{ 0x28, "MBRAC" },
{ 0x29, "MCLS" },
{ 0x2A, "MDELIV" },
{ 0x2B, "NEWM" },
{ 0x2C, "NOTIF" },
{ 0x2D, "PresenceAuthFunc" },
{ 0x2E, "PresenceDeliverFunc" },
{ 0x2F, "PresenceFeat" },
{ 0x30, "REACT" },
{ 0x31, "REJCM" },
{ 0x32, "REJEC" },
{ 0x33, "RMVGM" },
{ 0x34, "SearchFunc" },
{ 0x35, "ServiceFunc" },
{ 0x36, "SETD" },
{ 0x37, "SETGP" },
{ 0x38, "SRCH" },
{ 0x39, "STSRC" },
{ 0x3A, "SUBGCN" },
{ 0x3B, "UPDPR" },
{ 0x3C, "WVCSPFeat" },
{ 0x00, NULL }
};
/* Client capability code page (0x03) */
static const value_string wbxml_wv_csp_10_tags_cp3[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "AcceptedCharset" },
{ 0x06, "AcceptedContentLength" },
{ 0x07, "AcceptedContentType" },
{ 0x08, "AcceptedTransferEncoding" },
{ 0x09, "AnyContent" },
{ 0x0A, "ClientType" },
{ 0x0B, "InitialDeliveryMethod" },
{ 0x0C, "MultiTrans" },
{ 0x0D, "ParserSize" },
{ 0x0E, "ServerPollMin" },
{ 0x0F, "SupportedBearer" },
{ 0x10, "SupportedCIRMethod" },
{ 0x11, "TCPAddress" },
{ 0x12, "TCPPort" },
{ 0x13, "UDPPort" },
{ 0x00, NULL }
};
/* Presence primitive code page (0x04) */
static const value_string wbxml_wv_csp_10_tags_cp4[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "CancelAuth-Request" },
{ 0x06, "ContactListProperties" },
{ 0x07, "CreateAttributeList-Request" },
{ 0x08, "CreateList-Request" },
{ 0x09, "DefaultAttributeList" },
{ 0x0A, "DefaultContactList" },
{ 0x0B, "DefaultList" },
{ 0x0C, "DeleteAttributeList-Request" },
{ 0x0D, "DeleteList-Request" },
{ 0x0E, "GetAttributeList-Request" },
{ 0x0F, "GetAttributeList-Response" },
{ 0x10, "GetList-Request" },
{ 0x11, "GetList-Response" },
{ 0x12, "GetPresence-Request" },
{ 0x13, "GetPresence-Response" },
{ 0x14, "GetWatcherList-Request" },
{ 0x15, "GetWatcherList-Response" },
{ 0x16, "ListManage-Request" },
{ 0x17, "ListManage-Response" },
{ 0x18, "Presence" },
{ 0x19, "PresenceAuth-Request" },
{ 0x1A, "PresenceAuth-Response" },
{ 0x1B, "PresenceNotification-Request" },
{ 0x1C, "PresenceValueList" },
{ 0x1D, "SubscribePresence-Request" },
{ 0x1E, "UnsubscribePresence-Request" },
{ 0x1F, "UpdatePresence-Request" },
{ 0x00, NULL }
};
/* Presence attribute code page (0x05) */
static const value_string wbxml_wv_csp_10_tags_cp5[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Accuracy" },
{ 0x06, "Address" },
{ 0x07, "AddrPref" },
{ 0x08, "Alias" },
{ 0x09, "Altitude" },
{ 0x0A, "Building" },
{ 0x0B, "CAddr" },
{ 0x0C, "City" },
{ 0x0D, "ClientInfo" },
{ 0x0E, "ClientProducer" },
{ 0x0F, "ClientType" },
{ 0x10, "ClientVersion" },
{ 0x11, "CommC" },
{ 0x12, "CommCap" },
{ 0x13, "ContactInfo" },
{ 0x14, "ContainedvCard" },
{ 0x15, "Country" },
{ 0x16, "Crossing1" },
{ 0x17, "Crossing2" },
{ 0x18, "DevManufacturer" },
{ 0x19, "DirectContent" },
{ 0x1A, "FreeTextLocation" },
{ 0x1B, "GeoLocation" },
{ 0x1C, "Language" },
{ 0x1D, "Latitude" },
{ 0x1E, "Longitude" },
{ 0x1F, "Model" },
{ 0x20, "NamedArea" },
{ 0x21, "OnlineStatus" },
{ 0x22, "PLMN" },
{ 0x23, "PrefC" },
{ 0x24, "PreferredContacts" },
{ 0x25, "PreferredLanguage" },
{ 0x26, "ReferredContent" },
{ 0x27, "ReferredvCard" },
{ 0x28, "Registration" },
{ 0x29, "StatusContent" },
{ 0x2A, "StatusMood" },
{ 0x2B, "StatusText" },
{ 0x2C, "Street" },
{ 0x2D, "TimeZone" },
{ 0x2E, "UserAvailability" },
{ 0x00, NULL }
};
/* Messaging code page (0x06) */
static const value_string wbxml_wv_csp_10_tags_cp6[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "BlockList" },
{ 0x06, "BlockUser-Request" },
{ 0x07, "DeliveryMethod" },
{ 0x08, "DeliveryReport" },
{ 0x09, "DeliveryReport-Request" },
{ 0x0A, "ForwardMessage-Request" },
{ 0x0B, "GetBlockedList-Request" },
{ 0x0C, "GetBlockedList-Response" },
{ 0x0D, "GetMessageList-Request" },
{ 0x0E, "GetMessageList-Response" },
{ 0x0F, "GetMessage-Request" },
{ 0x10, "GetMessage-Response" },
{ 0x11, "GrantList" },
{ 0x12, "MessageDelivered" },
{ 0x13, "MessageInfo" },
{ 0x14, "MessageNotification" },
{ 0x15, "NewMessage" },
{ 0x16, "RejectMessage-Request" },
{ 0x17, "SendMessage-Request" },
{ 0x18, "SendMessage-Response" },
{ 0x19, "SetDeliveryMethod-Request" },
{ 0x00, NULL }
};
/* Group code page (0x07) */
static const value_string wbxml_wv_csp_10_tags_cp7[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "AddGroupMembers-Request" },
{ 0x06, "Admin" },
{ 0x07, "CreateGroup-Request" },
{ 0x08, "DeleteGroup-Request" },
{ 0x09, "GetGroupMembers-Request" },
{ 0x0A, "GetGroupMembers-Response" },
{ 0x0B, "GetGroupProps-Request" },
{ 0x0C, "GetGroupProps-Response" },
{ 0x0D, "GroupChangeNotice" },
{ 0x0E, "GroupProperties" },
{ 0x0F, "Joined" },
{ 0x10, "JoinedRequest" },
{ 0x11, "JoinGroup-Request" },
{ 0x12, "JoinGroup-Response" },
{ 0x13, "LeaveGroup-Request" },
{ 0x14, "LeaveGroup-Response" },
{ 0x15, "Left" },
{ 0x16, "MemberAccess-Request" },
{ 0x17, "Mod" },
{ 0x18, "OwnProperties" },
{ 0x19, "RejectList-Request" },
{ 0x1A, "RejectList-Response" },
{ 0x1B, "RemoveGroupMembers-Request" },
{ 0x1C, "SetGroupProps-Request" },
{ 0x1D, "SubscribeGroupNotice-Request" },
{ 0x1E, "SubscribeGroupNotice-Response" },
{ 0x1F, "Users" },
{ 0x20, "WelcomeNote" },
{ 0x00, NULL }
};
/*
* Attribute start tokens
*/
/* common code page (0x00) */
static const value_string wbxml_wv_csp_10_attrStart_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "xmlns='http://www.wireless-village.org/CSP'" },
{ 0x06, "xmlns='http://www.wireless-village.org/PA'" },
{ 0x07, "xmlns='http://www.wireless-village.org/TRC'" },
{ 0x00, NULL }
};
/*
* Attribute value tokens
*/
/* Common value tokens (0x00) */
static const value_string wbxml_wv_csp_10_attrValue_cp0[] = {
/* 0x80 -- 0x84 GLOBAL */
{ 0x85, "AccessType" },
{ 0x86, "ActiveUsers" },
{ 0x87, "Admin" },
{ 0x88, "application/" },
{ 0x89, "application/vnd.wap.mms-message" },
{ 0x8A, "application/x-sms" },
{ 0x8B, "BASE64" },
{ 0x8C, "Closed" },
{ 0x8D, "Default" },
{ 0x8E, "DisplayName" },
{ 0x8F, "False (No)" },
{ 0x90, "Get" },
{ 0x91, "Group (GR)" },
{ 0x92, "http://" },
{ 0x93, "https://" },
{ 0x94, "image/" },
{ 0x95, "Inband" },
{ 0x96, "Instant Messaging (IM)" },
{ 0x97, "MaxActiveUsers" },
{ 0x98, "Mod" },
{ 0x99, "Name" },
{ 0x9A, "None" },
{ 0x9B, "Notify/Get" },
{ 0x9C, "Open" },
{ 0x9D, "Outband" },
{ 0x9E, "Presence (PR)" },
{ 0x9F, "Private" },
{ 0xA0, "PrivateMessaging" },
{ 0xA1, "PrivilegeLevel" },
{ 0xA2, "Public" },
{ 0xA3, "Push" },
{ 0xA4, "Request" },
{ 0xA5, "Response" },
{ 0xA6, "ScreenName" },
{ 0xA7, "Searchable" },
{ 0xA8, "Set" },
{ 0xA9, "Shared Content (SC)" },
{ 0xAA, "text/" },
{ 0xAB, "text/plain" },
{ 0xAC, "text/x-vCalendar" },
{ 0xAD, "text/x-vCard" },
{ 0xAE, "Topic" },
{ 0xAF, "True (Yes)" },
{ 0xB0, "Type" },
{ 0xB1, "Unset" },
{ 0xB2, "User (US)" },
{ 0xB3, "www.wireless-village.org" },
{ 0x00, NULL }
};
/* Access value tokens (0x01) */
static const value_string wbxml_wv_csp_10_attrValue_cp1[] = {
/* 0x80 -- 0x84 GLOBAL */
{ 0x85, "GROUP_ID" },
{ 0x86, "GROUP_NAME" },
{ 0x87, "GROUP_TOPIC" },
{ 0x88, "GROUP_USER_ID_JOINED" },
{ 0x89, "HTTP" },
{ 0x8A, "SMS" },
{ 0x8B, "STCP" },
{ 0x8C, "SUDP" },
{ 0x8D, "USER_ALIAS" },
{ 0x8E, "USER_EMAIL_ADDRESS" },
{ 0x8F, "USER_FIRST_NAME" },
{ 0x90, "USER_ID" },
{ 0x91, "USER_LAST_NAME" },
{ 0x92, "USER_MOBILE_NUMBER" },
{ 0x93, "WAPSMS" },
{ 0x94, "WAPUDP" },
{ 0x95, "WSP" },
{ 0x00, NULL }
};
/* Presence value tokens (0x05) */
static const value_string wbxml_wv_csp_10_attrValue_cp5[] = {
/* 0x80 -- 0x84 GLOBAL */
{ 0x85, "ANGRY" },
{ 0x86, "ANXIOUS" },
{ 0x87, "ASHAMED" },
{ 0x88, "AUDIO_CALL" },
{ 0x89, "AVAILABLE" },
{ 0x8A, "BORED" },
{ 0x8B, "CALL" },
{ 0x8C, "CLI" },
{ 0x8D, "COMPUTER" },
{ 0x8E, "DISCREET" },
{ 0x8F, "EMAIL" },
{ 0x90, "EXCITED" },
{ 0x91, "HAPPY" },
{ 0x92, "IM" },
{ 0x93, "IM_OFFLINE" },
{ 0x94, "IM_ONLINE" },
{ 0x95, "IN_LOVE" },
{ 0x96, "INVINCIBLE" },
{ 0x97, "JEALOUS" },
{ 0x98, "MMS" },
{ 0x99, "MOBILE_PHONE" },
{ 0x9A, "NOT_AVAILABLE" },
{ 0x9B, "OTHER" },
{ 0x9C, "PDA" },
{ 0x9D, "SAD" },
{ 0x9E, "SLEEPY" },
{ 0x9F, "SMS" },
{ 0xA0, "VIDEO_CALL" },
{ 0xA1, "VIDEO_STREAM" },
{ 0x00, NULL }
};
/***** Token code page aggregation *****/
static const value_valuestring wbxml_wv_csp_10_tags[] = {
{ 0, wbxml_wv_csp_10_tags_cp0 },
{ 1, wbxml_wv_csp_10_tags_cp1 },
{ 2, wbxml_wv_csp_10_tags_cp2 },
{ 3, wbxml_wv_csp_10_tags_cp3 },
{ 4, wbxml_wv_csp_10_tags_cp4 },
{ 5, wbxml_wv_csp_10_tags_cp5 },
{ 6, wbxml_wv_csp_10_tags_cp6 },
{ 7, wbxml_wv_csp_10_tags_cp7 },
{ 0, NULL }
};
static const value_valuestring wbxml_wv_csp_10_attrStart[] = {
{ 0, wbxml_wv_csp_10_attrStart_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_wv_csp_10_attrValue[] = {
{ 0, wbxml_wv_csp_10_attrValue_cp0 },
{ 1, wbxml_wv_csp_10_attrValue_cp1 },
{ 5, wbxml_wv_csp_10_attrValue_cp5 },
{ 0, NULL }
};
static const wbxml_decoding decode_wv_cspc_10 = {
"Wireless-Village Client-Server Protocol 1.0",
"WV-CSP 1.0",
{ NULL, NULL, NULL },
wv_csp10_opaque_binary_tag,
wv_csp10_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
NULL,
wbxml_wv_csp_10_tags,
wbxml_wv_csp_10_attrStart,
wbxml_wv_csp_10_attrValue
};
/* WV-CSP 1.1
*
* Wireless Village Client Server Protocol
***************************************/
/***** Global extension tokens *****/
static const value_string wbxml_wv_csp_11_global_cp0[] = {
{ 0x80, "Common Value" }, /* EXT_T_0 */
{ 0x00, NULL }
};
/***** Tag tokens *****/
/* Common code page */
static const value_string wbxml_wv_csp_11_tags_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Acceptance" },
{ 0x06, "AddList" },
{ 0x07, "AddNickList" },
{ 0x08, "SName" }, /* Was: Attribute */
{ 0x09, "WV-CSP-Message" }, /* Was: AttributeList */
{ 0x0A, "ClientID" },
{ 0x0B, "Code" },
{ 0x0C, "ContactList" },
{ 0x0D, "ContentData" },
{ 0x0E, "ContentEncoding" },
{ 0x0F, "ContentSize" },
{ 0x10, "ContentType" },
{ 0x11, "DateTime" },
{ 0x12, "Description" },
{ 0x13, "DetailedResult" },
{ 0x14, "EntityList" },
{ 0x15, "Group" },
{ 0x16, "GroupID" },
{ 0x17, "GroupList" },
{ 0x18, "InUse" },
{ 0x19, "Logo" },
{ 0x1A, "MessageCount" },
{ 0x1B, "MessageID" },
{ 0x1C, "MessageURI" },
{ 0x1D, "MSISDN" },
{ 0x1E, "Name" },
{ 0x1F, "NickList" },
{ 0x20, "NickName" },
{ 0x21, "Poll" },
{ 0x22, "Presence" },
{ 0x23, "PresenceSubList" },
{ 0x24, "PresenceValue" },
{ 0x25, "Property" },
{ 0x26, "Qualifier" },
{ 0x27, "Recipient" },
{ 0x28, "RemoveList" },
{ 0x29, "RemoveNickList" },
{ 0x2A, "Result" },
{ 0x2B, "ScreenName" },
{ 0x2C, "Sender" },
{ 0x2D, "Session" },
{ 0x2E, "SessionDescriptor" },
{ 0x2F, "SessionID" },
{ 0x30, "SessionType" },
{ 0x31, "Status" },
{ 0x32, "Transaction" },
{ 0x33, "TransactionContent" },
{ 0x34, "TransactionDescriptor" },
{ 0x35, "TransactionID" },
{ 0x36, "TransactionMode" },
{ 0x37, "URL" },
{ 0x38, "URLList" },
{ 0x39, "User" },
{ 0x3A, "UserID" },
{ 0x3B, "UserList" },
{ 0x3C, "Validity" },
{ 0x3D, "Value" },
/* 0x3E - Removed: WV-CSP-Message */
{ 0x00, NULL }
};
/* Access code page */
static const value_string wbxml_wv_csp_11_tags_cp1[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "AllFunctions" },
{ 0x06, "AllFunctionsRequest" },
{ 0x07, "CancelInvite-Request" },
{ 0x08, "CancelInviteUser-Request" },
{ 0x09, "Capability" },
{ 0x0A, "CapabilityList" },
{ 0x0B, "CapabilityRequest" },
{ 0x0C, "ClientCapability-Request" },
{ 0x0D, "ClientCapability-Response" },
{ 0x0E, "DigestBytes" },
{ 0x0F, "DigestSchema" },
{ 0x10, "Disconnect" },
{ 0x11, "Functions" },
{ 0x12, "GetSPInfo-Request" },
{ 0x13, "GetSPInfo-Response" },
{ 0x14, "InviteID" },
{ 0x15, "InviteNote" },
{ 0x16, "Invite-Request" },
{ 0x17, "Invite-Response" },
{ 0x18, "InviteType" },
{ 0x19, "InviteUser-Request" },
{ 0x1A, "InviteUser-Response" },
{ 0x1B, "KeepAlive-Request" },
{ 0x1C, "KeepAliveTime" },
{ 0x1D, "Login-Request" },
{ 0x1E, "Login-Response" },
{ 0x1F, "Logout-Request" },
{ 0x20, "Nonce" },
{ 0x21, "Password" },
{ 0x22, "Polling-Request" },
{ 0x23, "ResponseNote" },
{ 0x24, "SearchElement" },
{ 0x25, "SearchFindings" },
{ 0x26, "SearchID" },
{ 0x27, "SearchIndex" },
{ 0x28, "SearchLimit" },
{ 0x29, "KeepAlive-Response" },
{ 0x2A, "SearchPairList" },
{ 0x2B, "Search-Request" },
{ 0x2C, "Search-Response" },
{ 0x2D, "SearchResult" },
{ 0x2E, "Service-Request" },
{ 0x2F, "Service-Response" },
{ 0x30, "SessionCookie" },
{ 0x31, "StopSearch-Request" },
{ 0x32, "TimeToLive" },
/* New in WV-CSP 1.1 */
{ 0x33, "SearchString" },
{ 0x34, "CompletionFlag" },
{ 0x00, NULL }
};
/* Service code page */
/* Same as cp2 of WV-CSP 1.0 */
#define wbxml_wv_csp_11_tags_cp2 wbxml_wv_csp_10_tags_cp2
/* Client capability code page */
static const value_string wbxml_wv_csp_11_tags_cp3[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "AcceptedCharset" },
{ 0x06, "AcceptedContentLength" },
{ 0x07, "AcceptedContentType" },
{ 0x08, "AcceptedTransferEncoding" },
{ 0x09, "AnyContent" },
{ 0x0A, "DefaultLanguage" }, /* Was: ClientType */
{ 0x0B, "InitialDeliveryMethod" },
{ 0x0C, "MultiTrans" },
{ 0x0D, "ParserSize" },
{ 0x0E, "ServerPollMin" },
{ 0x0F, "SupportedBearer" },
{ 0x10, "SupportedCIRMethod" },
{ 0x11, "TCPAddress" },
{ 0x12, "TCPPort" },
{ 0x13, "UDPPort" },
{ 0x00, NULL }
};
/* Presence primitive code page */
static const value_string wbxml_wv_csp_11_tags_cp4[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "CancelAuth-Request" },
{ 0x06, "ContactListProperties" },
{ 0x07, "CreateAttributeList-Request" },
{ 0x08, "CreateList-Request" },
{ 0x09, "DefaultAttributeList" },
{ 0x0A, "DefaultContactList" },
{ 0x0B, "DefaultList" },
{ 0x0C, "DeleteAttributeList-Request" },
{ 0x0D, "DeleteList-Request" },
{ 0x0E, "GetAttributeList-Request" },
{ 0x0F, "GetAttributeList-Response" },
{ 0x10, "GetList-Request" },
{ 0x11, "GetList-Response" },
{ 0x12, "GetPresence-Request" },
{ 0x13, "GetPresence-Response" },
{ 0x14, "GetWatcherList-Request" },
{ 0x15, "GetWatcherList-Response" },
{ 0x16, "ListManage-Request" },
{ 0x17, "ListManage-Response" },
{ 0x18, "UnsubscribePresence-Request" }, /* Was: Presence */
{ 0x19, "PresenceAuth-Request" },
{ 0x1A, "PresenceAuth-User" }, /* Was: PresenceAuth-Response */
{ 0x1B, "PresenceNotification-Request" },
{ 0x1C, "UpdatePresence-Request" }, /* Was: PresenceValueList */
{ 0x1D, "SubscribePresence-Request" },
/* 0x1E - Removed: UnsubscribePresence-Request */
/* 0x1F - Removed: UpdatePresence-Request */
{ 0x00, NULL }
};
/* Presence attribute code page */
static const value_string wbxml_wv_csp_11_tags_cp5[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Accuracy" },
{ 0x06, "Address" },
{ 0x07, "AddrPref" },
{ 0x08, "Alias" },
{ 0x09, "Altitude" },
{ 0x0A, "Building" },
{ 0x0B, "Caddr" },
{ 0x0C, "City" },
{ 0x0D, "ClientInfo" },
{ 0x0E, "ClientProducer" },
{ 0x0F, "ClientType" },
{ 0x10, "ClientVersion" },
{ 0x11, "CommC" },
{ 0x12, "CommCap" },
{ 0x13, "ContactInfo" },
{ 0x14, "ContainedvCard" },
{ 0x15, "Country" },
{ 0x16, "Crossing1" },
{ 0x17, "Crossing2" },
{ 0x18, "DevManufacturer" },
{ 0x19, "DirectContent" },
{ 0x1A, "FreeTextLocation" },
{ 0x1B, "GeoLocation" },
{ 0x1C, "Language" },
{ 0x1D, "Latitude" },
{ 0x1E, "Longitude" },
{ 0x1F, "Model" },
{ 0x20, "NamedArea" },
{ 0x21, "OnlineStatus" },
{ 0x22, "PLMN" },
{ 0x23, "PrefC" },
{ 0x24, "PreferredContacts" },
{ 0x25, "PreferredLanguage" },
{ 0x26, "ReferredContent" },
{ 0x27, "ReferredvCard" },
{ 0x28, "Registration" },
{ 0x29, "StatusContent" },
{ 0x2A, "StatusMood" },
{ 0x2B, "StatusText" },
{ 0x2C, "Street" },
{ 0x2D, "TimeZone" },
{ 0x2E, "UserAvailability" },
/* New in WV-CSP 1.1 */
{ 0x2F, "Cap" },
{ 0x30, "Cname" },
{ 0x31, "Contact" },
{ 0x32, "Cpriority" },
{ 0x33, "Cstatus" },
{ 0x34, "Note" },
{ 0x35, "Zone" },
{ 0x00, NULL }
};
/* Messaging code page */
static const value_string wbxml_wv_csp_11_tags_cp6[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "BlockList" },
{ 0x06, "BlockUser-Request" },
{ 0x07, "DeliveryMethod" },
{ 0x08, "DeliveryReport" },
{ 0x09, "DeliveryReport-Request" },
{ 0x0A, "ForwardMessage-Request" },
{ 0x0B, "GetBlockedList-Request" },
{ 0x0C, "GetBlockedList-Response" },
{ 0x0D, "GetMessageList-Request" },
{ 0x0E, "GetMessageList-Response" },
{ 0x0F, "GetMessage-Request" },
{ 0x10, "GetMessage-Response" },
{ 0x11, "GrantList" },
{ 0x12, "MessageDelivered" },
{ 0x13, "MessageInfo" },
{ 0x14, "MessageNotification" },
{ 0x15, "NewMessage" },
{ 0x16, "RejectMessage-Request" },
{ 0x17, "SendMessage-Request" },
{ 0x18, "SendMessage-Response" },
{ 0x19, "SetDeliveryMethod-Request" },
/* New in WV-CSP 1.1 */
{ 0x1A, "DeliveryTime" },
{ 0x00, NULL }
};
/* Group code page */
static const value_string wbxml_wv_csp_11_tags_cp7[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "AddGroupMembers-Request" },
{ 0x06, "Admin" },
{ 0x07, "CreateGroup-Request" },
{ 0x08, "DeleteGroup-Request" },
{ 0x09, "GetGroupMembers-Request" },
{ 0x0A, "GetGroupMembers-Response" },
{ 0x0B, "GetGroupProps-Request" },
{ 0x0C, "GetGroupProps-Response" },
{ 0x0D, "GroupChangeNotice" },
{ 0x0E, "GroupProperties" },
{ 0x0F, "Joined" },
{ 0x10, "JoinedRequest" },
{ 0x11, "JoinGroup-Request" },
{ 0x12, "JoinGroup-Response" },
{ 0x13, "LeaveGroup-Request" },
{ 0x14, "LeaveGroup-Response" },
{ 0x15, "Left" },
{ 0x16, "MemberAccess-Request" },
{ 0x17, "Mod" },
{ 0x18, "OwnProperties" },
{ 0x19, "RejectList-Request" },
{ 0x1A, "RejectList-Response" },
{ 0x1B, "RemoveGroupMembers-Request" },
{ 0x1C, "SetGroupProps-Request" },
{ 0x1D, "SubscribeGroupNotice-Request" },
{ 0x1E, "SubscribeGroupNotice-Response" },
{ 0x1F, "Users" },
{ 0x20, "WelcomeNote" },
/* New in WV-CSP 1.1 */
{ 0x21, "JoinGroup" },
{ 0x22, "SubscribeNotification" },
{ 0x23, "SubscribeType" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
/* Common code page */
/* Same as cp0 of WV-CSP 1.0 */
#define wbxml_wv_csp_11_attrStart_cp0 wbxml_wv_csp_10_attrStart_cp0
/***** Attribute Value tokens *****/
/*
* Element value tokens
*
* NOTE - WV-CSP uses the EXT_T_0 token in a peculiar way: the mb_u_int32
* does *not* reference an offset in the string table, but it refers to
* the index in the following value_string.
*
* Please note that:
* - Values 'T' and 'F' are Boolean values representing "True" and "False"
* (or "Yes" and "No" in some circumstances) respectively.
* - Values 'GR', 'IM', 'PR', 'SC', 'GM' and 'US' are enumerated values
* representing "Group", "Instant Messaging", "Presence", "Shared Content",
* "Group membership" and "User" respectively.
* - Values 'G', 'S' and 'U' are enumerated values representing "Get", "Set"
* and "Unset" respectively.
* - Values 'N' and 'P' are enumerated values representing "Notify/Get" and
* "Push" respectively.
*
* I repeat: this is NOT a attrValue[] array hence it is not called
* wbxml_wv_XXX but vals_wv_XXX.
*
* Result: the attribute value token definitions from WV-CSP 1.0 are dropped.
*/
static const value_string vals_wv_csp_11_element_value_tokens[] = {
/*
* Common value tokens
*/
{ 0x00, "AccessType" },
{ 0x01, "ActiveUsers" },
{ 0x02, "Admin" },
{ 0x03, "application/" },
{ 0x04, "application/vnd.wap.mms-message" },
{ 0x05, "application/x-sms" },
{ 0x06, "AutoJoin" },
{ 0x07, "BASE64" },
{ 0x08, "Closed" },
{ 0x09, "Default" },
{ 0x0A, "DisplayName" },
{ 0x0B, "F" },
{ 0x0C, "G" },
{ 0x0D, "GR" },
{ 0x0E, "http://" },
{ 0x0F, "https://" },
{ 0x10, "image/" },
{ 0x11, "Inband" },
{ 0x12, "IM" },
{ 0x13, "MaxActiveUsers" },
{ 0x14, "Mod" },
{ 0x15, "Name" },
{ 0x16, "None" },
{ 0x17, "N" },
{ 0x18, "Open" },
{ 0x19, "Outband" },
{ 0x1A, "PR" },
{ 0x1B, "Private" },
{ 0x1C, "PrivateMessaging" },
{ 0x1D, "PrivilegeLevel" },
{ 0x1E, "Public" },
{ 0x1F, "P" },
{ 0x20, "Request" },
{ 0x21, "Response" },
{ 0x22, "Restricted" },
{ 0x23, "ScreenName" },
{ 0x24, "Searchable" },
{ 0x25, "S" },
{ 0x26, "SC" },
{ 0x27, "text/" },
{ 0x28, "text/plain" },
{ 0x29, "text/x-vCalendar" },
{ 0x2A, "text/x-vCard" },
{ 0x2B, "Topic" },
{ 0x2C, "T" },
{ 0x2D, "Type" },
{ 0x2E, "U" },
{ 0x2F, "US" },
{ 0x30, "www.wireless-village.org" },
/*
* Access value tokens
*/
{ 0x3D, "GROUP_ID" },
{ 0x3E, "GROUP_NAME" },
{ 0x3F, "GROUP_TOPIC" },
{ 0x40, "GROUP_USER_ID_JOINED" },
{ 0x41, "GROUP_USER_ID_OWNER" },
{ 0x42, "HTTP" },
{ 0x43, "SMS" },
{ 0x44, "STCP" },
{ 0x45, "SUDP" },
{ 0x46, "USER_ALIAS" },
{ 0x47, "USER_EMAIL_ADDRESS" },
{ 0x48, "USER_FIRST_NAME" },
{ 0x49, "USER_ID" },
{ 0x4A, "USER_LAST_NAME" },
{ 0x4B, "USER_MOBILE_NUMBER" },
{ 0x4C, "USER_ONLINE_STATUS" },
{ 0x4D, "WAPSMS" },
{ 0x4E, "WAPUDP" },
{ 0x4F, "WSP" },
/*
* Presence value tokens
*/
{ 0x5B, "ANGRY" },
{ 0x5C, "ANXIOUS" },
{ 0x5D, "ASHAMED" },
{ 0x5E, "AUDIO_CALL" },
{ 0x5F, "AVAILABLE" },
{ 0x60, "BORED" },
{ 0x61, "CALL" },
{ 0x62, "CLI" },
{ 0x63, "COMPUTER" },
{ 0x64, "DISCREET" },
{ 0x65, "EMAIL" },
{ 0x66, "EXCITED" },
{ 0x67, "HAPPY" },
{ 0x68, "IM" },
{ 0x69, "IM_OFFLINE" },
{ 0x6A, "IM_ONLINE" },
{ 0x6B, "IN_LOVE" },
{ 0x6C, "INVINCIBLE" },
{ 0x6D, "JEALOUS" },
{ 0x6E, "MMS" },
{ 0x6F, "MOBILE_PHONE" },
{ 0x70, "NOT_AVAILABLE" },
{ 0x71, "OTHER" },
{ 0x72, "PDA" },
{ 0x73, "SAD" },
{ 0x74, "SLEEPY" },
{ 0x75, "SMS" },
{ 0x76, "VIDEO_CALL" },
{ 0x77, "VIDEO_STREAM" },
{ 0x00, NULL }
};
static value_string_ext vals_wv_csp_11_element_value_tokens_ext = VALUE_STRING_EXT_INIT(vals_wv_csp_11_element_value_tokens);
/***** Token code page aggregation *****/
static char *
ext_t_0_wv_cspc_11(tvbuff_t *tvb _U_, guint32 value, guint32 str_tbl _U_)
{
char *str = wmem_strdup_printf(wmem_packet_scope(), "Common Value: '%s'",
val_to_str_ext(value, &vals_wv_csp_11_element_value_tokens_ext,
"<Unknown WV-CSP 1.1 Common Value token 0x%X>"));
return str;
}
static const value_valuestring wbxml_wv_csp_11_global[] = {
{ 0, wbxml_wv_csp_11_global_cp0 },
{ 0, NULL }
};
static const value_valuestring wbxml_wv_csp_11_tags[] = {
{ 0, wbxml_wv_csp_11_tags_cp0 },
{ 1, wbxml_wv_csp_11_tags_cp1 },
{ 2, wbxml_wv_csp_11_tags_cp2 },
{ 3, wbxml_wv_csp_11_tags_cp3 },
{ 4, wbxml_wv_csp_11_tags_cp4 },
{ 5, wbxml_wv_csp_11_tags_cp5 },
{ 6, wbxml_wv_csp_11_tags_cp6 },
{ 7, wbxml_wv_csp_11_tags_cp7 },
{ 0, NULL }
};
static const value_valuestring wbxml_wv_csp_11_attrStart[] = {
{ 0, wbxml_wv_csp_11_attrStart_cp0 },
{ 0, NULL }
};
static const wbxml_decoding decode_wv_cspc_11 = {
"Wireless-Village Client-Server Protocol 1.1",
"WV-CSP 1.1",
{ ext_t_0_wv_cspc_11, NULL, NULL },
wv_csp11_opaque_binary_tag,
wv_csp11_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
wbxml_wv_csp_11_global,
wbxml_wv_csp_11_tags,
wbxml_wv_csp_11_attrStart,
NULL
};
/* WV-CSP 1.2
*
* Wireless Village Client Server Protocol
***************************************/
/***** Global extension tokens *****/
/* Same as WV-CSP 1.1 */
/***** Tag tokens *****/
/* Common code page */
/* Same as cp0 of WV-CSP 1.1 */
#define wbxml_wv_csp_12_tags_cp0 wbxml_wv_csp_11_tags_cp0
/* Note that the table continues in code page 0x09 */
/* Access code page (0x01) */
static const value_string wbxml_wv_csp_12_tags_cp1[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "AllFunctions" },
{ 0x06, "AllFunctionsRequest" },
{ 0x07, "CancelInvite-Request" },
{ 0x08, "CancelInviteUser-Request" },
{ 0x09, "Capability" },
{ 0x0A, "CapabilityList" },
{ 0x0B, "CapabilityRequest" },
{ 0x0C, "ClientCapability-Request" },
{ 0x0D, "ClientCapability-Response" },
{ 0x0E, "DigestBytes" },
{ 0x0F, "DigestSchema" },
{ 0x10, "Disconnect" },
{ 0x11, "Functions" },
{ 0x12, "GetSPInfo-Request" },
{ 0x13, "GetSPInfo-Response" },
{ 0x14, "InviteID" },
{ 0x15, "InviteNote" },
{ 0x16, "Invite-Request" },
{ 0x17, "Invite-Response" },
{ 0x18, "InviteType" },
{ 0x19, "InviteUser-Request" },
{ 0x1A, "InviteUser-Response" },
{ 0x1B, "KeepAlive-Request" },
{ 0x1C, "KeepAliveTime" },
{ 0x1D, "Login-Request" },
{ 0x1E, "Login-Response" },
{ 0x1F, "Logout-Request" },
{ 0x20, "Nonce" },
{ 0x21, "Password" },
{ 0x22, "Polling-Request" },
{ 0x23, "ResponseNote" },
{ 0x24, "SearchElement" },
{ 0x25, "SearchFindings" },
{ 0x26, "SearchID" },
{ 0x27, "SearchIndex" },
{ 0x28, "SearchLimit" },
{ 0x29, "KeepAlive-Response" },
{ 0x2A, "SearchPairList" },
{ 0x2B, "Search-Request" },
{ 0x2C, "Search-Response" },
{ 0x2D, "SearchResult" },
{ 0x2E, "Service-Request" },
{ 0x2F, "Service-Response" },
{ 0x30, "SessionCookie" },
{ 0x31, "StopSearch-Request" },
{ 0x32, "TimeToLive" },
/* New in WV-CSP 1.1 */
{ 0x33, "SearchString" },
{ 0x34, "CompletionFlag" },
/* New in WV-CSP 1.2 */
{ 0x36, "ReceiveList" },
{ 0x37, "VerifyID-Request" },
{ 0x38, "Extended-Request" },
{ 0x39, "Extended-Response" },
{ 0x3A, "AgreedCapabilityList" },
{ 0x3B, "ExtendedData" },
{ 0x3C, "OtherServer" },
{ 0x3D, "PresenceAttributeNSName" },
{ 0x3E, "SessionNSName" },
{ 0x3F, "TransactionNSName" },
{ 0x00, NULL }
};
/* Note that the table continues in code page 0x0A */
/* Service code page (0x02) */
static const value_string wbxml_wv_csp_12_tags_cp2[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "ADDGM" },
{ 0x06, "AttListFunc" },
{ 0x07, "BLENT" },
{ 0x08, "CAAUT" },
{ 0x09, "CAINV" },
{ 0x0A, "CALI" },
{ 0x0B, "CCLI" },
{ 0x0C, "ContListFunc" },
{ 0x0D, "CREAG" },
{ 0x0E, "DALI" },
{ 0x0F, "DCLI" },
{ 0x10, "DELGR" },
{ 0x11, "FundamentalFeat" },
{ 0x12, "FWMSG" },
{ 0x13, "GALS" },
{ 0x14, "GCLI" },
{ 0x15, "GETGM" },
{ 0x16, "GETGP" },
{ 0x17, "GETLM" },
{ 0x18, "GETM" },
{ 0x19, "GETPR" },
{ 0x1A, "GETSPI" },
{ 0x1B, "GETWL" },
{ 0x1C, "GLBLU" },
{ 0x1D, "GRCHN" },
{ 0x1E, "GroupAuthFunc" },
{ 0x1F, "GroupFeat" },
{ 0x20, "GroupMgmtFunc" },
{ 0x21, "GroupUseFunc" },
{ 0x22, "IMAuthFunc" },
{ 0x23, "IMFeat" },
{ 0x24, "IMReceiveFunc" },
{ 0x25, "IMSendFunc" },
{ 0x26, "INVIT" },
{ 0x27, "InviteFunc" },
{ 0x28, "MBRAC" },
{ 0x29, "MCLS" },
{ 0x2A, "MDELIV" },
{ 0x2B, "NEWM" },
{ 0x2C, "NOTIF" },
{ 0x2D, "PresenceAuthFunc" },
{ 0x2E, "PresenceDeliverFunc" },
{ 0x2F, "PresenceFeat" },
{ 0x30, "REACT" },
{ 0x31, "REJCM" },
{ 0x32, "REJEC" },
{ 0x33, "RMVGM" },
{ 0x34, "SearchFunc" },
{ 0x35, "ServiceFunc" },
{ 0x36, "SETD" },
{ 0x37, "SETGP" },
{ 0x38, "SRCH" },
{ 0x39, "STSRC" },
{ 0x3A, "SUBGCN" },
{ 0x3B, "UPDPR" },
{ 0x3C, "WVCSPFeat" },
/* New in WV-CSP 1.2 */
{ 0x3D, "MF" },
{ 0x3E, "MG" },
{ 0x3F, "MM" },
{ 0x00, NULL }
};
/* Note that the table continues in code page 0x08 */
/* Client capability code page (0x03) */
static const value_string wbxml_wv_csp_12_tags_cp3[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "AcceptedCharset" },
{ 0x06, "AcceptedContentLength" },
{ 0x07, "AcceptedContentType" },
{ 0x08, "AcceptedTransferEncoding" },
{ 0x09, "AnyContent" },
{ 0x0A, "DefaultLanguage" },
{ 0x0B, "InitialDeliveryMethod" },
{ 0x0C, "MultiTrans" },
{ 0x0D, "ParserSize" },
{ 0x0E, "ServerPollMin" },
{ 0x0F, "SupportedBearer" },
{ 0x10, "SupportedCIRMethod" },
{ 0x11, "TCPAddress" },
{ 0x12, "TCPPort" },
{ 0x13, "UDPPort" },
{ 0x14, "CIRURL" },
{ 0x00, NULL }
};
/* Presence primitive code page (0x04) */
static const value_string wbxml_wv_csp_12_tags_cp4[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "CancelAuth-Request" },
{ 0x06, "ContactListProperties" },
{ 0x07, "CreateAttributeList-Request" },
{ 0x08, "CreateList-Request" },
{ 0x09, "DefaultAttributeList" },
{ 0x0A, "DefaultContactList" },
{ 0x0B, "DefaultList" },
{ 0x0C, "DeleteAttributeList-Request" },
{ 0x0D, "DeleteList-Request" },
{ 0x0E, "GetAttributeList-Request" },
{ 0x0F, "GetAttributeList-Response" },
{ 0x10, "GetList-Request" },
{ 0x11, "GetList-Response" },
{ 0x12, "GetPresence-Request" },
{ 0x13, "GetPresence-Response" },
{ 0x14, "GetWatcherList-Request" },
{ 0x15, "GetWatcherList-Response" },
{ 0x16, "ListManage-Request" },
{ 0x17, "ListManage-Response" },
{ 0x18, "UnsubscribePresence-Request" },
{ 0x19, "PresenceAuth-Request" },
{ 0x1A, "PresenceAuth-User" },
{ 0x1B, "PresenceNotification-Request" },
{ 0x1C, "UpdatePresence-Request" },
{ 0x1D, "SubscribePresence-Request" },
/* New in WV-CSP 1.2 */
{ 0x1E, "Auto-Subscribe" },
/* 0x1E was defined in WV-CSP 1.0: UnsubscribePresence-Request */
{ 0x1F, "GetReactiveAuthStatus-Request" },
/* 0x1F was defined in WV-CSP 1.0: UpdatePresence-Request */
{ 0x20, "GetReactiveAuthStatus-Response" },
{ 0x00, NULL }
};
/* Presence attribute code page (0x05) */
static const value_string wbxml_wv_csp_12_tags_cp5[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Accuracy" },
{ 0x06, "Address" },
{ 0x07, "AddrPref" },
{ 0x08, "Alias" },
{ 0x09, "Altitude" },
{ 0x0A, "Building" },
{ 0x0B, "Caddr" },
{ 0x0C, "City" },
{ 0x0D, "ClientInfo" },
{ 0x0E, "ClientProducer" },
{ 0x0F, "ClientType" },
{ 0x10, "ClientVersion" },
{ 0x11, "CommC" },
{ 0x12, "CommCap" },
{ 0x13, "ContactInfo" },
{ 0x14, "ContainedvCard" },
{ 0x15, "Country" },
{ 0x16, "Crossing1" },
{ 0x17, "Crossing2" },
{ 0x18, "DevManufacturer" },
{ 0x19, "DirectContent" },
{ 0x1A, "FreeTextLocation" },
{ 0x1B, "GeoLocation" },
{ 0x1C, "Language" },
{ 0x1D, "Latitude" },
{ 0x1E, "Longitude" },
{ 0x1F, "Model" },
{ 0x20, "NamedArea" },
{ 0x21, "OnlineStatus" },
{ 0x22, "PLMN" },
{ 0x23, "PrefC" },
{ 0x24, "PreferredContacts" },
{ 0x25, "PreferredLanguage" },
{ 0x26, "ReferredContent" },
{ 0x27, "ReferredvCard" },
{ 0x28, "Registration" },
{ 0x29, "StatusContent" },
{ 0x2A, "StatusMood" },
{ 0x2B, "StatusText" },
{ 0x2C, "Street" },
{ 0x2D, "TimeZone" },
{ 0x2E, "UserAvailability" },
/* New in WV-CSP 1.1 */
{ 0x2F, "Cap" },
{ 0x30, "Cname" },
{ 0x31, "Contact" },
{ 0x32, "Cpriority" },
{ 0x33, "Cstatus" },
{ 0x34, "Note" },
{ 0x35, "Zone" },
/* New in WV-CSP 1.2 */
{ 0x36, "ContentType" },
{ 0x37, "Inf_link" },
{ 0x38, "InfoLink" },
{ 0x39, "Link" },
{ 0x3A, "Text" },
{ 0x00, NULL }
};
/* Messaging code page (0x06) */
static const value_string wbxml_wv_csp_12_tags_cp6[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "BlockList" },
{ 0x06, "BlockEntity-Request" }, /* Was: BlockUser-Request */
{ 0x07, "DeliveryMethod" },
{ 0x08, "DeliveryReport" },
{ 0x09, "DeliveryReport-Request" },
{ 0x0A, "ForwardMessage-Request" },
{ 0x0B, "GetBlockedList-Request" },
{ 0x0C, "GetBlockedList-Response" },
{ 0x0D, "GetMessageList-Request" },
{ 0x0E, "GetMessageList-Response" },
{ 0x0F, "GetMessage-Request" },
{ 0x10, "GetMessage-Response" },
{ 0x11, "GrantList" },
{ 0x12, "MessageDelivered" },
{ 0x13, "MessageInfo" },
{ 0x14, "MessageNotification" },
{ 0x15, "NewMessage" },
{ 0x16, "RejectMessage-Request" },
{ 0x17, "SendMessage-Request" },
{ 0x18, "SendMessage-Response" },
{ 0x19, "SetDeliveryMethod-Request" },
{ 0x1A, "DeliveryTime" },
{ 0x00, NULL }
};
/* Group code page (0x07) */
static const value_string wbxml_wv_csp_12_tags_cp7[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "AddGroupMembers-Request" },
{ 0x06, "Admin" },
{ 0x07, "CreateGroup-Request" },
{ 0x08, "DeleteGroup-Request" },
{ 0x09, "GetGroupMembers-Request" },
{ 0x0A, "GetGroupMembers-Response" },
{ 0x0B, "GetGroupProps-Request" },
{ 0x0C, "GetGroupProps-Response" },
{ 0x0D, "GroupChangeNotice" },
{ 0x0E, "GroupProperties" },
{ 0x0F, "Joined" },
{ 0x10, "JoinedRequest" },
{ 0x11, "JoinGroup-Request" },
{ 0x12, "JoinGroup-Response" },
{ 0x13, "LeaveGroup-Request" },
{ 0x14, "LeaveGroup-Response" },
{ 0x15, "Left" },
{ 0x16, "MemberAccess-Request" },
{ 0x17, "Mod" },
{ 0x18, "OwnProperties" },
{ 0x19, "RejectList-Request" },
{ 0x1A, "RejectList-Response" },
{ 0x1B, "RemoveGroupMembers-Request" },
{ 0x1C, "SetGroupProps-Request" },
{ 0x1D, "SubscribeGroupNotice-Request" },
{ 0x1E, "SubscribeGroupNotice-Response" },
{ 0x1F, "Users" },
{ 0x20, "WelcomeNote" },
/* New in WV-CSP 1.1 */
{ 0x21, "JoinGroup" },
{ 0x22, "SubscribeNotification" },
{ 0x23, "SubscribeType" },
/* New in WV-CSP 1.2 */
{ 0x24, "GetJoinedUsers-Request" },
{ 0x25, "GetJoinedUsers-Response" },
{ 0x26, "AdminMapList" },
{ 0x27, "AdminMapping" },
{ 0x28, "Mapping" },
{ 0x29, "ModMapping" },
{ 0x2A, "UserMapList" },
{ 0x2B, "UserMapping" },
{ 0x00, NULL }
};
/* Service negotiation code page - continued (0x08) */
static const value_string wbxml_wv_csp_12_tags_cp8[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "MP" },
{ 0x06, "GETAUT" },
{ 0x07, "GETJU" },
{ 0x08, "VRID" },
{ 0x09, "VerifyIDFunc" },
{ 0x00, NULL }
};
/* Common code page - continued (0x09) */
static const value_string wbxml_wv_csp_12_tags_cp9[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "CIR" },
{ 0x06, "Domain" },
{ 0x07, "ExtBlock" },
{ 0x08, "HistoryPeriod" },
{ 0x09, "IDList" },
{ 0x0A, "MaxWatcherList" },
{ 0x0B, "ReactiveAuthState" },
{ 0x0C, "ReactiveAuthStatus" },
{ 0x0D, "ReactiveAuthStatusList" },
{ 0x0E, "Watcher" },
{ 0x0F, "WatcherStatus" },
{ 0x00, NULL }
};
/* Access code page - continued (0x0A) */
static const value_string wbxml_wv_csp_12_tags_cp10[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "WV-CSP-NSDiscovery-Request" },
{ 0x06, "WV-CSP-NSDiscovery-Response" },
{ 0x07, "VersionList" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
/* Common code page (0x00) */
static const value_string wbxml_wv_csp_12_attrStart_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "xmlns='http://www.wireless-village.org/CSP'" },
{ 0x06, "xmlns='http://www.wireless-village.org/PA'" },
{ 0x07, "xmlns='http://www.wireless-village.org/TRC'" },
/* New in WV-CSP 1.2 */
{ 0x08, "xmlns='http://www.openmobilealliance.org/DTD/WV-CSP'" },
{ 0x09, "xmlns='http://www.openmobilealliance.org/DTD/WV-PA'" },
{ 0x0A, "xmlns http://www.openmobilealliance.org/DTD/WV-TRC'" },
{ 0x00, NULL }
};
/***** Attribute Value tokens *****/
/*
* Element value tokens
*
* NOTE - WV-CSP uses the EXT_T_0 token in a peculiar way: the mb_u_int32
* does *not* reference an offset in the string table, but it refers to
* the index in the following value_string.
*
* Please note that:
* - Values 'T' and 'F' are Boolean values representing "True" and "False"
* (or "Yes" and "No" in some circumstances) respectively.
* - Values 'GR', 'IM', 'PR', 'SC', 'GM' and 'US' are enumerated values
* representing "Group", "Instant Messaging", "Presence", "Shared Content",
* "Group membership" and "User" respectively.
* - Values 'G', 'S' and 'U' are enumerated values representing "Get", "Set"
* and "Unset" respectively.
* - Values 'N' and 'P' are enumerated values representing "Notify/Get" and
* "Push" respectively.
*
* I repeat: this is NOT a attrValue[] array hence it is not called
* wbxml_wv_XXX but vals_wv_XXX.
*/
static const value_string vals_wv_csp_12_element_value_tokens[] = {
/*
* Common value tokens
*/
{ 0x00, "AccessType" },
{ 0x01, "ActiveUsers" },
{ 0x02, "Admin" },
{ 0x03, "application/" },
{ 0x04, "application/vnd.wap.mms-message" },
{ 0x05, "application/x-sms" },
{ 0x06, "AutoJoin" },
{ 0x07, "BASE64" },
{ 0x08, "Closed" },
{ 0x09, "Default" },
{ 0x0A, "DisplayName" },
{ 0x0B, "F" },
{ 0x0C, "G" },
{ 0x0D, "GR" },
{ 0x0E, "http://" },
{ 0x0F, "https://" },
{ 0x10, "image/" },
{ 0x11, "Inband" },
{ 0x12, "IM" },
{ 0x13, "MaxActiveUsers" },
{ 0x14, "Mod" },
{ 0x15, "Name" },
{ 0x16, "None" },
{ 0x17, "N" },
{ 0x18, "Open" },
{ 0x19, "Outband" },
{ 0x1A, "PR" },
{ 0x1B, "Private" },
{ 0x1C, "PrivateMessaging" },
{ 0x1D, "PrivilegeLevel" },
{ 0x1E, "Public" },
{ 0x1F, "P" },
{ 0x20, "Request" },
{ 0x21, "Response" },
{ 0x22, "Restricted" },
{ 0x23, "ScreenName" },
{ 0x24, "Searchable" },
{ 0x25, "S" },
{ 0x26, "SC" },
{ 0x27, "text/" },
{ 0x28, "text/plain" },
{ 0x29, "text/x-vCalendar" },
{ 0x2A, "text/x-vCard" },
{ 0x2B, "Topic" },
{ 0x2C, "T" },
{ 0x2D, "Type" },
{ 0x2E, "U" },
{ 0x2F, "US" },
{ 0x30, "www.wireless-village.org" },
/* New in WV-CSP 1.2 */
{ 0x31, "AutoDelete" },
{ 0x32, "GM" },
{ 0x33, "Validity" },
{ 0x34, "DENIED" },
{ 0x35, "GRANTED" },
{ 0x36, "PENDING" },
{ 0x37, "ShowID" },
/*
* Access value tokens
*/
{ 0x3D, "GROUP_ID" },
{ 0x3E, "GROUP_NAME" },
{ 0x3F, "GROUP_TOPIC" },
{ 0x40, "GROUP_USER_ID_JOINED" },
{ 0x41, "GROUP_USER_ID_OWNER" },
{ 0x42, "HTTP" },
{ 0x43, "SMS" },
{ 0x44, "STCP" },
{ 0x45, "SUDP" },
{ 0x46, "USER_ALIAS" },
{ 0x47, "USER_EMAIL_ADDRESS" },
{ 0x48, "USER_FIRST_NAME" },
{ 0x49, "USER_ID" },
{ 0x4A, "USER_LAST_NAME" },
{ 0x4B, "USER_MOBILE_NUMBER" },
{ 0x4C, "USER_ONLINE_STATUS" },
{ 0x4D, "WAPSMS" },
{ 0x4E, "WAPUDP" },
{ 0x4F, "WSP" },
/* New in WV-CSP 1.2 */
{ 0x50, "GROUP_USER_ID_AUTOJOIN" },
/*
* Presence value tokens
*/
{ 0x5B, "ANGRY" },
{ 0x5C, "ANXIOUS" },
{ 0x5D, "ASHAMED" },
{ 0x5E, "AUDIO_CALL" },
{ 0x5F, "AVAILABLE" },
{ 0x60, "BORED" },
{ 0x61, "CALL" },
{ 0x62, "CLI" },
{ 0x63, "COMPUTER" },
{ 0x64, "DISCREET" },
{ 0x65, "EMAIL" },
{ 0x66, "EXCITED" },
{ 0x67, "HAPPY" },
/* { 0x68, "IM" }, Obsolete */
{ 0x69, "IM_OFFLINE" },
{ 0x6A, "IM_ONLINE" },
{ 0x6B, "IN_LOVE" },
{ 0x6C, "INVINCIBLE" },
{ 0x6D, "JEALOUS" },
{ 0x6E, "MMS" },
{ 0x6F, "MOBILE_PHONE" },
{ 0x70, "NOT_AVAILABLE" },
{ 0x71, "OTHER" },
{ 0x72, "PDA" },
{ 0x73, "SAD" },
{ 0x74, "SLEEPY" },
{ 0x75, "SMS" },
{ 0x76, "VIDEO_CALL" },
{ 0x77, "VIDEO_STREAM" },
/*
* Access value tokens - continued
*/
{ 0xA4, "SSMS" },
{ 0xA5, "SHTTP" },
{ 0x00, NULL }
};
/***** Token code page aggregation *****/
static char *
ext_t_0_wv_cspc_12(tvbuff_t *tvb _U_, guint32 value, guint32 str_tbl _U_)
{
char *str = wmem_strdup_printf(wmem_packet_scope(), "Common Value: '%s'",
val_to_str(value, vals_wv_csp_12_element_value_tokens,
"<Unknown WV-CSP 1.2 Common Value token 0x%X>"));
return str;
}
#define wbxml_wv_csp_12_global wbxml_wv_csp_11_global
static const value_valuestring wbxml_wv_csp_12_tags[] = {
{ 0, wbxml_wv_csp_12_tags_cp0 },
{ 1, wbxml_wv_csp_12_tags_cp1 },
{ 2, wbxml_wv_csp_12_tags_cp2 },
{ 3, wbxml_wv_csp_12_tags_cp3 },
{ 4, wbxml_wv_csp_12_tags_cp4 },
{ 5, wbxml_wv_csp_12_tags_cp5 },
{ 6, wbxml_wv_csp_12_tags_cp6 },
{ 7, wbxml_wv_csp_12_tags_cp7 },
{ 8, wbxml_wv_csp_12_tags_cp8 },
{ 9, wbxml_wv_csp_12_tags_cp9 },
{ 10, wbxml_wv_csp_12_tags_cp10 },
{ 0, NULL }
};
static const value_valuestring wbxml_wv_csp_12_attrStart[] = {
{ 0, wbxml_wv_csp_12_attrStart_cp0 },
{ 0, NULL }
};
static const wbxml_decoding decode_wv_cspc_12 = {
"Wireless-Village Client-Server Protocol 1.2",
"WV-CSP 1.2",
{ ext_t_0_wv_cspc_12, NULL, NULL },
wv_csp12_opaque_binary_tag,
wv_csp12_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
wbxml_wv_csp_12_global,
wbxml_wv_csp_12_tags,
wbxml_wv_csp_12_attrStart,
NULL
};
/* WV-CSP 1.3
*
* Wireless Village Client Server Protocol
***************************************/
/***** Global extension tokens *****/
/* Same as WV-CSP 1.1 */
/***** Tag tokens *****/
/* Common code page */
static const value_string wbxml_wv_csp_13_tags_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Acceptance"},
{ 0x06, "AddList" },
{ 0x07, "AddNickList"},
{ 0x09, "WV-CSP-Message"},
{ 0x0A, "ClientID"},
{ 0x0B, "Code"},
{ 0x0C, "ContactList"},
{ 0x0D, "ContentData"},
{ 0x0E, "ContentEncoding"},
{ 0x0F, "ContentSize" },
{ 0x10, "ContentType"},
{ 0x11, "DateTime" },
{ 0x12, "Description" },
{ 0x13, "DetailedResult"},
{ 0x14, "EntityList"},
{ 0x15, "Group" },
{ 0x16, "GroupID"},
{ 0x17, "GroupList"},
{ 0x19, "Logo"},
{ 0x1A, "MessageCount" },
{ 0x1B, "MessageID" },
{ 0x1C, "MessageURI"},
{ 0x1D, "MSISDN" },
{ 0x1E, "Name"},
{ 0x1F, "NickList"},
{ 0x20, "NickName"},
{ 0x21, "Poll"},
{ 0x22, "Presence"},
{ 0x23, "PresenceSubList" },
{ 0x24, "PresenceValue"},
{ 0x25, "Property" },
{ 0x26, "Qualifier" },
{ 0x27, "Recipient" },
{ 0x28, "RemoveList"},
{ 0x29, "RemoveNickList" },
{ 0x2A, "Result" },
{ 0x2B, "ScreenName"},
{ 0x2C, "Sender" },
{ 0x2D, "Session"},
{ 0x2E, "SessionDescriptor" },
{ 0x2F, "SessionID"},
{ 0x30, "SessionType" },
{ 0x08, "SName" },
{ 0x31, "Status"},
{ 0x32, "Transaction" },
{ 0x33, "TransactionContent" },
{ 0x34, "TransactionDescriptor"},
{ 0x35, "TransactionID"},
{ 0x36, "TransactionMode" },
{ 0x37, "URL" },
{ 0x38, "URLList"},
{ 0x39, "User"},
{ 0x3A, "UserID" },
{ 0x3B, "UserList" },
{ 0x3C, "Validity" },
{ 0x3D, "Value" },
{ 0x00, NULL }
};
/* Note that the table continues in code page 0x09 */
/* Access code page (0x01) */
static const value_string wbxml_wv_csp_13_tags_cp1[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "AllFunctions" },
{ 0x06, "AllFunctionsRequest" },
{ 0x07, "CancelInvite-Request" },
{ 0x08, "CancelInviteUser-Request" },
/* { 0x09, "Capability" }, - removed in WV 1.3*/
{ 0x0A, "CapabilityList" },
{ 0x0B, "CapabilityRequest" },
{ 0x0C, "ClientCapability-Request" },
{ 0x0D, "ClientCapability-Response" },
{ 0x0E, "DigestBytes" },
{ 0x0F, "DigestSchema" },
{ 0x10, "Disconnect" },
{ 0x11, "Functions" },
{ 0x12, "GetSPInfo-Request" },
{ 0x13, "GetSPInfo-Response" },
{ 0x14, "InviteID" },
{ 0x15, "InviteNote" },
{ 0x16, "Invite-Request" },
{ 0x17, "Invite-Response" },
{ 0x18, "InviteType" },
{ 0x19, "InviteUser-Request" },
{ 0x1A, "InviteUser-Response" },
{ 0x1B, "KeepAlive-Request" },
{ 0x1C, "KeepAliveTime" },
{ 0x1D, "Login-Request" },
{ 0x1E, "Login-Response" },
{ 0x1F, "Logout-Request" },
{ 0x20, "Nonce" },
{ 0x21, "Password" },
{ 0x22, "Polling-Request" },
{ 0x23, "ResponseNote" },
{ 0x24, "SearchElement" },
{ 0x25, "SearchFindings" },
{ 0x26, "SearchID" },
{ 0x27, "SearchIndex" },
{ 0x28, "SearchLimit" },
{ 0x29, "KeepAlive-Response" },
{ 0x2A, "SearchPairList" },
{ 0x2B, "Search-Request" },
{ 0x2C, "Search-Response" },
{ 0x2D, "SearchResult" },
{ 0x2E, "Service-Request" },
{ 0x2F, "Service-Response" },
{ 0x30, "SessionCookie" },
{ 0x31, "StopSearch-Request" },
{ 0x32, "TimeToLive" },
/* New in WV-CSP 1.1 */
{ 0x33, "SearchString" },
{ 0x34, "CompletionFlag" },
/* New in WV-CSP 1.2 */
{ 0x36, "ReceiveList" },
{ 0x37, "VerifyID-Request" },
{ 0x38, "Extended-Request" },
{ 0x39, "Extended-Response" },
{ 0x3A, "AgreedCapabilityList" },
{ 0x3B, "ExtendedData" },
{ 0x3C, "OtherServer" },
{ 0x3D, "PresenceAttributeNSName" },
{ 0x3E, "SessionNSName" },
{ 0x3F, "TransactionNSName" },
{ 0x00, NULL }
};
/* Note that the table continues in code page 0x0A */
/* Service code page (0x02) */
static const value_string wbxml_wv_csp_13_tags_cp2[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "ADDGM" },
/* { 0x06, "AttListFunc" }, removed in WV 1.3 */
{ 0x07, "BLENT" },
/* { 0x08, "CAAUT" }, removed in WV 1.3 */
{ 0x09, "CAINV" },
/* { 0x0A, "CALI" }, removed in WV 1.3 */
{ 0x0B, "CCLI" },
{ 0x0C, "ContListFunc" },
{ 0x0D, "CREAG" },
{ 0x0E, "DALI" },
{ 0x0F, "DCLI" },
{ 0x10, "DELGR" },
{ 0x11, "FundamentalFeat" },
{ 0x12, "FWMSG" },
/* { 0x13, "GALS" }, removed in WV 1.3 */
{ 0x14, "GCLI" },
{ 0x15, "GETGM" },
{ 0x16, "GETGP" },
{ 0x17, "GETLM" },
{ 0x18, "GETM" },
{ 0x19, "GETPR" },
{ 0x1A, "GETSPI" },
{ 0x1B, "GETWL" },
{ 0x1C, "GLBLU" },
{ 0x1D, "GRCHN" },
{ 0x1E, "GroupAuthFunc" },
{ 0x1F, "GroupFeat" },
{ 0x20, "GroupMgmtFunc" },
{ 0x21, "GroupUseFunc" },
{ 0x22, "IMAuthFunc" },
{ 0x23, "IMFeat" },
{ 0x24, "IMReceiveFunc" },
{ 0x25, "IMSendFunc" },
{ 0x26, "INVIT" },
{ 0x27, "InviteFunc" },
{ 0x28, "MBRAC" },
{ 0x29, "MCLS" },
{ 0x2A, "MDELIV" },
{ 0x2B, "NEWM" },
{ 0x2C, "NOTIF" },
{ 0x2D, "PresenceAuthFunc" },
{ 0x2E, "PresenceDeliverFunc"},
{ 0x2F, "PresenceFeat" },
/* { 0x30, "REACT" }, removed in WV 1.3 */
{ 0x31, "REJCM" },
{ 0x32, "REJEC" },
{ 0x33, "RMVGM" },
{ 0x34, "SearchFunc" },
{ 0x35, "ServiceFunc" },
{ 0x36, "SETD" },
{ 0x37, "SETGP" },
{ 0x38, "SRCH" },
{ 0x39, "STSRC" },
{ 0x3A, "SUBGCN" },
{ 0x3B, "UPDPR" },
{ 0x3C, "WVCSPFeat" },
/* New in WV-CSP 1.2 */
{ 0x3D, "MF" },
{ 0x3E, "MG" },
{ 0x3F, "MM" },
{ 0x00, NULL }
};
/* Note that the table continues in code page 0x08 */
/* Client capability code page (0x03) */
static const value_string wbxml_wv_csp_13_tags_cp3[] = {
/* 0x00 -- 0x04 GLOBAL */
/* {0x05, "AcceptedCharset"}, - removed in WV 1.3 */
/* { 0x06, "AcceptedContentLength"}, - removed in WV 1.3 */
{ 0x07, "AcceptedContentType"},
{ 0x08, "AcceptedTransferEncoding"},
{ 0x09, "AnyContent"},
{ 0x0A, "DefaultLanguage"},
{ 0x0B, "InitialDeliveryMethod"},
{ 0x0C, "MultiTrans"},
{ 0x0D, "ParserSize"},
{ 0x0E, "ServerPollMin"},
{ 0x0F, "SupportedBearer"},
{ 0x10, "SupportedCIRMethod"},
{ 0x11, "TCPAddress"},
{ 0x12, "TCPPort"},
{ 0x13, "UDPPort"},
/* New in WV-CSP 1.3*/
{ 0x14, "CIRHTTPAddress"},
{ 0x15, "UDPAddress"},
{ 0x16, "AcceptedPullLength"},
{ 0x17, "AcceptedPushLength"},
{ 0x18, "AcceptedRichContentLength"},
{ 0x19, "AcceptedTextContentLength"},
{ 0x1A, "OfflineETEMHandling"},
{ 0x1B, "PlainTextCharset"},
{ 0x1C, "SessionPriority"},
{ 0x1D, "SupportedOfflineBearer"},
{ 0x1F, "UserSessionLimit"},
{ 0x20, "CIRSMSAddress"},
{ 0x21, "MultiTransPerMessage"},
{ 0x22, "OnlineETEMHandling"},
{ 0x23,"ContentPolicy"},
{ 0x24, "ContentPolicyLimit"},
{ 0x00, NULL }
};
/* Presence primitive code page (0x04) */
static const value_string wbxml_wv_csp_13_tags_cp4[] = {
/* 0x00 -- 0x04 GLOBAL */
/* { 0x05, "CancelAuth-Request" }, - removed in WV 1.3 */
{ 0x06, "ContactListProperties" },
{ 0x07, "CreateAttributeList-Request" },
{ 0x08, "CreateList-Request" },
{ 0x09, "DefaultAttributeList" },
{ 0x0A, "DefaultContactList" },
{ 0x0B, "DefaultList" },
{ 0x0C, "DeleteAttributeList-Request" },
{ 0x0D, "DeleteList-Request" },
{ 0x0E, "GetAttributeList-Request" },
{ 0x0F, "GetAttributeList-Response" },
{ 0x10, "GetList-Request" },
{ 0x11, "GetList-Response" },
{ 0x12, "GetPresence-Request" },
{ 0x13, "GetPresence-Response" },
{ 0x14, "GetWatcherList-Request" },
{ 0x15, "GetWatcherList-Response" },
{ 0x16, "ListManage-Request" },
{ 0x17, "ListManage-Response" },
{ 0x18, "UnsubscribePresence-Request" },
{ 0x19, "PresenceAuth-Request" },
{ 0x1A, "PresenceAuth-User" },
{ 0x1B, "PresenceNotification-Request" },
{ 0x1C, "UpdatePresence-Request" },
{ 0x1D, "SubscribePresence-Request" },
/* New in WV-CSP 1.2 */
/* { 0x1E, "Auto-Subscribe" }, - removed in WV 1.3 */
/* { 0x1F, "GetReactiveAuthStatus-Request" }, */
/* { 0x20, "GetReactiveAuthStatus-Response" }, */
/* New in WV-CSP 1.3 */
{ 0x21, "CreateList-Response"},
{ 0x00, NULL }
};
/* Presence attribute code page (0x05) */
static const value_string wbxml_wv_csp_13_tags_cp5[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "Accuracy" },
{ 0x06, "Address" },
{ 0x07, "AddrPref" },
{ 0x08, "Alias" },
{ 0x09, "Altitude" },
{ 0x0A, "Building" },
{ 0x0B, "Caddr" },
{ 0x0C, "City" },
{ 0x0D, "ClientInfo" },
{ 0x0E, "ClientProducer" },
{ 0x0F, "ClientType" },
{ 0x10, "ClientVersion" },
{ 0x11, "CommC" },
{ 0x12, "CommCap" },
{ 0x13, "ContactInfo" },
{ 0x14, "ContainedvCard" },
{ 0x15, "Country" },
{ 0x16, "Crossing1" },
{ 0x17, "Crossing2" },
{ 0x18, "DevManufacturer" },
{ 0x19, "DirectContent" },
{ 0x1A, "FreeTextLocation" },
{ 0x1B, "GeoLocation" },
{ 0x1C, "Language" },
{ 0x1D, "Latitude" },
{ 0x1E, "Longitude" },
{ 0x1F, "Model" },
{ 0x20, "NamedArea" },
{ 0x21, "OnlineStatus" },
{ 0x22, "PLMN" },
{ 0x23, "PrefC" },
{ 0x24, "PreferredContacts" },
{ 0x25, "PreferredLanguage" },
{ 0x26, "ReferredContent" },
{ 0x27, "ReferredvCard" },
{ 0x28, "Registration" },
{ 0x29, "StatusContent" },
{ 0x2A, "StatusMood" },
{ 0x2B, "StatusText" },
{ 0x2C, "Street" },
{ 0x2D, "TimeZone" },
{ 0x2E, "UserAvailability" },
/* New in WV-CSP 1.1 */
{ 0x2F, "Cap" },
{ 0x30, "Cname" },
{ 0x31, "Contact" },
{ 0x32, "Cpriority" },
{ 0x33, "Cstatus" },
{ 0x34, "Note" },
{ 0x35, "Zone" },
/* New in WV-CSP 1.2 */
{ 0x36, "ContentType" },
{ 0x37, "Inf_link" },
{ 0x38, "InfoLink" },
{ 0x39, "Link" },
{ 0x3A, "Text" },
/* New in WV-CSP 1.3 */
{ 0x3B, "ClientContentLimit"},
{ 0x3C, "ClientIMPriority"},
{ 0x3D, "MaxPullLength"},
{ 0x3E, "MaxPushLength"},
{ 0x00, NULL }
};
/* Messaging code page (0x06) */
static const value_string wbxml_wv_csp_13_tags_cp6[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "BlockList" },
{ 0x06, "BlockEntity-Request" }, /* Was: BlockUser-Request */
{ 0x07, "DeliveryMethod" },
{ 0x08, "DeliveryReport" },
{ 0x09, "DeliveryReport-Request" },
{ 0x0A, "ForwardMessage-Request" },
{ 0x0B, "GetBlockedList-Request" },
{ 0x0C, "GetBlockedList-Response" },
{ 0x0D, "GetMessageList-Request" },
{ 0x0E, "GetMessageList-Response" },
{ 0x0F, "GetMessage-Request" },
{ 0x10, "GetMessage-Response" },
{ 0x11, "GrantList" },
{ 0x12, "MessageDelivered" },
{ 0x13, "MessageInfo" },
{ 0x14, "MessageNotification" },
{ 0x15, "NewMessage" },
{ 0x16, "RejectMessage-Request" },
{ 0x17, "SendMessage-Request" },
{ 0x18, "SendMessage-Response" },
{ 0x19, "SetDeliveryMethod-Request" },
{ 0x1A, "DeliveryTime" },
/* New in WV-CSP 1.3 */
{ 0x20, "MessageInfoList"},
{ 0x21, "ForwardMessage-Response"},
{ 0x00, NULL }
};
/* Group code page (0x07) */
static const value_string wbxml_wv_csp_13_tags_cp7[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "AddGroupMembers-Request" },
{ 0x06, "Admin" },
{ 0x07, "CreateGroup-Request" },
{ 0x08, "DeleteGroup-Request" },
{ 0x09, "GetGroupMembers-Request" },
{ 0x0A, "GetGroupMembers-Response" },
{ 0x0B, "GetGroupProps-Request" },
{ 0x0C, "GetGroupProps-Response" },
{ 0x0D, "GroupChangeNotice" },
{ 0x0E, "GroupProperties" },
{ 0x0F, "Joined" },
{ 0x10, "JoinedRequest" },
{ 0x11, "JoinGroup-Request" },
{ 0x12, "JoinGroup-Response" },
{ 0x13, "LeaveGroup-Request" },
{ 0x14, "LeaveGroup-Response" },
{ 0x15, "Left" },
{ 0x16, "MemberAccess-Request" },
{ 0x17, "Mod" },
{ 0x18, "OwnProperties" },
{ 0x19, "RejectList-Request" },
{ 0x1A, "RejectList-Response" },
{ 0x1B, "RemoveGroupMembers-Request" },
{ 0x1C, "SetGroupProps-Request" },
{ 0x1D, "SubscribeGroupNotice-Request" },
{ 0x1E, "SubscribeGroupNotice-Response" },
/* { 0x1F, "Users" }, - removed in WV 1.3 */
{ 0x20, "WelcomeNote" },
/* New in WV-CSP 1.1 */
{ 0x21, "JoinGroup" },
{ 0x22, "SubscribeNotification" },
{ 0x23, "SubscribeType" },
/* New in WV-CSP 1.2 */
{ 0x24, "GetJoinedUsers-Request" },
{ 0x25, "GetJoinedUsers-Response" },
{ 0x26, "AdminMapList" },
{ 0x27, "AdminMapping" },
{ 0x28, "Mapping" },
{ 0x29, "ModMapping" },
{ 0x2A, "UserMapList" },
{ 0x2B, "UserMapping" },
/* New in WV-CSP 1.3 */
{ 0x2C, "JoinedBlocked" },
{ 0x2D, "LeftBlocked" },
{ 0x00, NULL }
};
/* Service negotiation code page - continued (0x08) */
static const value_string wbxml_wv_csp_13_tags_cp8[] = {
/* 0x00 -- 0x04 GLOBAL */
/* New in WV-CSP 1.2 */
{ 0x05, "MP" },
{ 0x06, "GETAUT" },
{ 0x07, "GETJU" },
{ 0x08, "VRID" },
{ 0x09, "VerifyIDFunc" },
/* New in WV-CSP 1.3 */
{ 0x0A, "GETMAP" },
{ 0x0B, "SGMNT" },
{ 0x0C, "EXCON" },
{ 0x0D, "OFFNOTIF" },
{ 0x0E, "ADVSR" },
{ 0x00, NULL }
};
/* Common code page - continued (0x09) */
static const value_string wbxml_wv_csp_13_tags_cp9[] = {
/* 0x00 -- 0x04 GLOBAL */
/* New in WV-CSP 1.2 */
{ 0x05, "CIR" },
{ 0x06, "Domain" },
{ 0x07, "ExtBlock" },
{ 0x08, "HistoryPeriod" },
{ 0x09, "IDList" },
{ 0x0A, "MaxWatcherList" },
/* { 0x0B, "ReactiveAuthState" }, - removed in WV 1.3 */
/* { 0x0C, "ReactiveAuthStatus" }, - removed in WV 1.3 */
/* { 0x0D, "ReactiveAuthStatusList" }, - removed in WV 1.3 */
{ 0x0E, "Watcher" },
{ 0x0F, "WatcherStatus" },
/* New in WV-CSP 1.3 */
{ 0x1B, "AnswerOption"},
{ 0x1C, "AnswerOptionID" },
{ 0x1D, "AnswerOptions"},
{ 0x0B, "AnswerOptionText"},
{ 0x1E, "ApplicationID"},
{ 0x1F, "AuthorizeAndGrant"},
{ 0x20, "ChosenOptionID"},
{ 0x19, "ClearPublicProfile"},
{ 0x13, "Color"},
{ 0x21, "ContactListNotify"},
{ 0x14, "ContentName"},
{ 0x22, "DefaultNotify"},
{ 0x39, "ExtBlockETEM"},
{ 0x36, "ExtendConversationID"},
{ 0x23, "ExtendConversationUser"},
{ 0x10, "Font"},
{ 0x18, "FriendlyName"},
{ 0x34 , "GetMap-Request"},
{ 0x35, "GetMap-Response"},
{ 0x3A, "GroupContentLimit" },
{ 0x24, "InText"},
{ 0x15, "Map"},
{ 0x3B, "MessageTotalCount"},
{ 0x16, "NotificationType"},
{ 0x17, "NotificationTypeList"},
{ 0x1A, "PublicProfile"},
{ 0x38, "RequiresResponse"},
{ 0x25, "SegmentCount"},
{ 0x26, "SegmentID" },
{ 0x27, "SegmentInfo"},
{ 0x28, "SegmentReference"},
{ 0x11, "Size"},
{ 0x12, "Style" },
{ 0x29, "SystemMessage"},
{ 0x2A, "SystemMessageID"},
{ 0x2B, "SystemMessageList"},
{ 0x2C, "SystemMessageResponse"},
{ 0x2D, "SystemMessageResponseList" },
{ 0x2F, "SystemMessageText"},
{ 0x30, "TryAgainTimeout"},
{ 0x3C, "UnrecognizedUserID"},
{ 0x3F , "UserIDList"},
{ 0x3D, "UserIDPair"},
{ 0x31, "UserNotify"},
{ 0x3E, "ValidUserID"},
{ 0x32, "VerificationKey"},
{ 0x33, "VerificationMechanism"},
{ 0x37, "WatcherCount"},
{ 0x00, NULL }
};
/* Access code page - continued (0x0A) */
static const value_string wbxml_wv_csp_13_tags_cp10[] = {
/* 0x00 -- 0x04 GLOBAL */
/* New in WV-CSP 1.2 */
{ 0x05, "WV-CSP-NSDiscovery-Request" },
{ 0x06, "WV-CSP-NSDiscovery-Response" },
{ 0x07, "VersionList"},
/* New in WV-CSP 1.3 */
{ 0x08, "SubscribeNotification-Request" },
{ 0x09, "UnsubscribeNotification-Request" },
{ 0x0A, "Notification-Request" },
{ 0x0B, "AdvancedCriteria" },
{ 0x0C, "PairID" },
{ 0x0D, "GetPublicProfile-Request" },
{ 0x0E, "GetPublicProfile-Response" },
{ 0x0F, "UpdatePublicProfile-Request" },
{ 0x10, "DropSegment-Request" },
{ 0x11, "ExtendConversation-Response" },
{ 0x12, "ExtendConversation-Request" },
{ 0x13, "GetSegment-Request" },
{ 0x14, "GetSegment-Response" },
{ 0x15, "SystemMessage-Request" },
{ 0x16, "SystemMessage-User" },
{ 0x17, "SearchPair" },
{ 0x18, "SegmentContent" },
{ 0x00, NULL }
};
/* Common code page - continued (0x0B) */
static const value_string wbxml_wv_csp_13_tags_cp11[] = {
/* 0x00 -- 0x04 GLOBAL */
/* New in WV-CSP 1.3 */
{ 0x05, "GrantListInUse" },
{ 0x06, "BlockListInUse" },
{ 0x07, "ContactListIDList" },
{ 0x08, "AnswerOptionsText" },
{ 0x00, NULL }
};
/***** Attribute Start tokens *****/
/* Common code page (0x00) */
static const value_string wbxml_wv_csp_13_attrStart_cp0[] = {
/* 0x00 -- 0x04 GLOBAL */
{ 0x05, "xmlns='http://www.wireless-village.org/CSP'" },
{ 0x06, "xmlns='http://www.wireless-village.org/PA'" },
{ 0x07, "xmlns='http://www.wireless-village.org/TRC'" },
/* New in WV-CSP 1.2 */
{ 0x08, "xmlns='http://www.openmobilealliance.org/DTD/WV-CSP'" },
{ 0x09, "xmlns='http://www.openmobilealliance.org/DTD/WV-PA'" },
{ 0x0A, "xmlns='http://www.openmobilealliance.org/DTD/WV-TRC'" },
/* New in WV-CSP 1.3 */
{ 0x0B, "xmlns='http://www.openmobilealliance.org/DTD/IMPS-CSP'" },
{ 0x0C, "xmlns='http://www.openmobilealliance.org/DTD/IMPS-PA'" },
{ 0x0D, "xmlns='http://www.openmobilealliance.org/DTD/IMPS-TRC'" },
{ 0x00, NULL }
};
/***** Attribute Value tokens *****/
/*
* Element value tokens
*/
static const value_string vals_wv_csp_13_element_value_tokens[] = {
/*
* Common value tokens
*/
{ 0x52, "AC" },
{ 0x00, "AccessType" },
{ 0x01, "ActiveUsers" },
{ 0x02, "Admin" },
{ 0x3C, "ANC" },
{ 0x51, "AND" },
{ 0x5A, "ANU" },
{ 0x68, "AP" },
{ 0x03, "application/" },
{ 0x04, "application/vnd.wap.mms-message" },
{ 0x05, "application/x-sms" },
{ 0x8F, "Aqua" },
{ 0x90, "ATCL" },
{ 0x31, "AutoDelete" },
{ 0x06, "AutoJoin" },
{ 0x07, "BASE64" },
{ 0x7B, "Big" },
{ 0x80, "Black" },
{ 0x53, "BLC" },
{ 0x54, "BLUC" },
{ 0x8D, "Blue" },
{ 0x7D, "Bold" },
{ 0xBC, "C" },
{ 0x91, "CLC" },
{ 0x55, "CLCR" },
{ 0x56, "CLD" },
{ 0x08, "Closed" },
{ 0xBD, "CURRENT_SUBSCRIBER" },
{ 0x09, "Default" },
{ 0x34, "DENIED" },
{ 0xAB, "DETECT" },
{ 0x0A, "DisplayName" },
{ 0xA6, "DoNotNotify" },
{ 0xA0, "EC" },
{ 0xBA, "EG" },
{ 0x0B, "F" },
{ 0xAC, "FORKALL" },
{ 0xBE, "FORMER_SUBSCRIBER" },
{ 0x87, "Fuchsia" },
{ 0x0C, "G" },
{ 0x57, "GC" },
{ 0x58, "GD" },
{ 0x59, "GLC" },
{ 0xA1, "GLUC" },
{ 0x32, "GM" },
{ 0xA7, "GMAU" },
{ 0xA8, "GMG" },
{ 0xA9, "GMR" },
{ 0xAA, "GMU" },
{ 0x0D, "GR" },
{ 0x35, "GRANTED" },
{ 0x82, "Gray" },
{ 0x88, "Green" },
{ 0x3D, "History" },
{ 0x0E, "http://" },
{ 0x0F, "https://" },
{ 0x7C, "Huge" },
{ 0xA2, "IA" },
{ 0xA3, "IC" },
{ 0x10, "image/" },
{ 0x11, "Inband" },
{ 0x12, "IM" },
{ 0x9F, "IR" },
{ 0x7E, "Italic" },
{ 0x89, "Lime" },
{ 0x84, "Maroon" },
{ 0x13, "MaxActiveUsers" },
{ 0x7A, "Medium" },
{ 0xBB, "MinimumAge" },
{ 0x14, "Mod" },
{ 0x15, "Name" },
{ 0x8C, "Navy" },
{ 0x16, "None" },
{ 0x17, "N" },
{ 0xAD, "OEU" },
{ 0x8A, "Olive" },
{ 0x18, "Open" },
{ 0x19, "Outband" },
{ 0x36, "PENDING" },
{ 0x3A, "PPU" },
{ 0x1A, "PR" },
{ 0xBF, "PRESENCE_ACCESS" },
{ 0x1B, "Private" },
{ 0x1C, "PrivateMessaging" },
{ 0x1D, "PrivilegeLevel" },
{ 0x1E, "Public" },
{ 0x86, "Purple" },
{ 0x1F, "P" },
{ 0xC0, "R" },
{ 0x85, "Red" },
{ 0x20, "Request" },
{ 0x21, "Response" },
{ 0x22, "Restricted" },
{ 0x38, "RequireInvitation" },
{ 0x23, "ScreenName" },
{ 0x24, "Searchable" },
{ 0x25, "S" },
{ 0x26, "SC" },
{ 0xAE, "SERVERLOGIC" },
{ 0x37, "ShowID" },
{ 0x81, "Silver" },
{ 0x79, "Small" },
{ 0x3B, "SPA" },
{ 0x8E, "Teal" },
{ 0x27, "text/" },
{ 0x28, "text/plain" },
{ 0x29, "text/x-vCalendar" },
{ 0x2A, "text/x-vCard" },
{ 0x39, "Tiny" },
{ 0x2B, "Topic" },
{ 0x2C, "T" },
{ 0x2D, "Type" },
{ 0x2E, "U" },
{ 0x7F, "Underline" },
{ 0x2F, "US" },
{ 0x33, "Validity" },
{ 0x83, "White" },
{ 0x78, "www.openmobilealliance.org" },
{ 0x30, "www.wireless-village.org" },
{ 0x8B, "Yellow" },
/*
* Access value tokens
*/
{ 0x3D, "GROUP_ID" },
{ 0x3E, "GROUP_NAME" },
{ 0x3F, "GROUP_TOPIC" },
{ 0x40, "GROUP_USER_ID_JOINED" },
{ 0x41, "GROUP_USER_ID_OWNER" },
{ 0x42, "HTTP" },
{ 0x43, "SMS" },
{ 0x44, "STCP" },
{ 0x45, "SUDP" },
{ 0x46, "USER_ALIAS" },
{ 0x47, "USER_EMAIL_ADDRESS" },
{ 0x48, "USER_FIRST_NAME" },
{ 0x49, "USER_ID" },
{ 0x4A, "USER_LAST_NAME" },
{ 0x4B, "USER_MOBILE_NUMBER" },
{ 0x4C, "USER_ONLINE_STATUS" },
{ 0x4D, "WAPSMS" },
{ 0x4E, "WAPUDP" },
{ 0x4F, "WSP" },
{ 0x50, "GROUP_USER_ID_AUTOJOIN" },
/*
* Presence value tokens
*/
{ 0x5B, "ANGRY" },
{ 0x5C, "ANXIOUS" },
{ 0x5D, "ASHAMED" },
{ 0x5F, "AVAILABLE" },
{ 0x60, "BORED" },
{ 0x61, "CALL" },
{ 0x62, "CLI" },
{ 0x63, "COMPUTER" },
{ 0x64, "DISCREET" },
{ 0x65, "EMAIL" },
{ 0x66, "EXCITED" },
{ 0x67, "HAPPY" },
{ 0x6B, "IN_LOVE" },
{ 0x6C, "INVINCIBLE" },
{ 0x6D, "JEALOUS" },
{ 0x6E, "MMS" },
{ 0x6F, "MOBILE_PHONE" },
{ 0x70, "NOT_AVAILABLE" },
{ 0x71, "OTHER" },
{ 0x72, "PDA" },
{ 0x73, "SAD" },
{ 0x74, "SLEEPY" },
{ 0x75, "SMS" },
/*
* Access value tokens - continued
*/
{ 0x93, "USER_CITY" },
{ 0x94, "USER_COUNTRY" },
{ 0x95, "USER_FRIENDLY_NAME" },
{ 0x96, "USER_GENDER" },
{ 0x97, "USER_INTENTION" },
{ 0x98, "USER_INTERESTS_HOBBIES" },
{ 0x99, "USER_MARITAL_STATUS" },
{ 0x9A, "PRIORITYREJECT" },
{ 0x9B, "PRIORITYSTORE" },
{ 0x9C, "REJECT" },
{ 0x9D, "SENDREJECT" },
{ 0x9E, "SENDSTORE" },
{ 0xA4, "SSMS" },
{ 0xA5, "SHTTP" },
{ 0xAF, "PP_AGE" },
{ 0xB0, "PP_CITY" },
{ 0xB1, "PP_COUNTRY" },
{ 0xB2, "PP_FRIENDLY_NAME" },
{ 0xB3, "PP_FREE_TEXT" },
{ 0xB4, "PP_GENDER" },
{ 0xB5, "PP_INTENTION" },
{ 0xB6, "PP_INTERESTS" },
{ 0xB7, "PP_MARITAL_STATUS" },
{ 0xB8, "USER_AGE_MAX" },
{ 0xB9, "USER_AGE_MIN" },
{ 0x00, NULL }
};
/***** Token code page aggregation *****/
static char *
ext_t_0_wv_cspc_13(tvbuff_t *tvb _U_, guint32 value, guint32 str_tbl _U_)
{
char *str = wmem_strdup_printf(wmem_packet_scope(), "Common Value: '%s'",
val_to_str(value, vals_wv_csp_13_element_value_tokens,
"<Unknown WV-CSP 1.3 Common Value token 0x%X>"));
return str;
}
#define wbxml_wv_csp_13_global wbxml_wv_csp_12_global /*TODO*/
static const value_valuestring wbxml_wv_csp_13_tags[] = {
{ 0, wbxml_wv_csp_13_tags_cp0 },
{ 1, wbxml_wv_csp_13_tags_cp1 },
{ 2, wbxml_wv_csp_13_tags_cp2 },
{ 3, wbxml_wv_csp_13_tags_cp3 },
{ 4, wbxml_wv_csp_13_tags_cp4 },
{ 5, wbxml_wv_csp_13_tags_cp5 },
{ 6, wbxml_wv_csp_13_tags_cp6 },
{ 7, wbxml_wv_csp_13_tags_cp7 },
{ 8, wbxml_wv_csp_13_tags_cp8 },
{ 9, wbxml_wv_csp_13_tags_cp9 },
{ 10, wbxml_wv_csp_13_tags_cp10 },
{ 11, wbxml_wv_csp_13_tags_cp11 },
{ 0, NULL }
};
static const value_valuestring wbxml_wv_csp_13_attrStart[] = {
{ 0, wbxml_wv_csp_13_attrStart_cp0 },
{ 0, NULL }
};
static const wbxml_decoding decode_wv_cspc_13 = {
"Wireless-Village Client-Server Protocol 1.3",
"WV-CSP 1.3",
{ ext_t_0_wv_cspc_13, NULL, NULL },
wv_csp13_opaque_binary_tag,
wv_csp13_opaque_literal_tag,
default_opaque_binary_attr,
default_opaque_literal_attr,
wbxml_wv_csp_13_global,
wbxml_wv_csp_13_tags,
wbxml_wv_csp_13_attrStart,
NULL
};
/****************************** Discriminators ******************************/
/* Discriminator for WV-CSP; allows version detection based on parsing parts
* of the start of the WBXML body.
*/
static const wbxml_decoding *
wv_csp_discriminator(tvbuff_t *tvb, guint32 offset)
{
guint32 magic_1 = tvb_get_ntohl(tvb, offset + 0);
guint16 magic_2 = tvb_get_ntohs(tvb, offset + 4);
if (magic_1 == 0xFE050331 && magic_2 == 0x2e30)
{
/* FE 05 03 31 2E 30 --> WV-CSP 1.0 */
return &decode_wv_cspc_10;
}
else if (magic_1 == 0xC9050331 && magic_2 == 0x2e31)
{
/* C9 05 03 31 2E 31 --> WV-CSP 1.1 */
return &decode_wv_cspc_11;
}
else if (magic_1 == 0xC9080331 && magic_2 == 0x2e32)
{
/* C9 08 03 31 2E 32 --> WV-CSP 1.2 */
return &decode_wv_cspc_12;
}
else if ( magic_1 == 0xC90B0331 && magic_2 == 0x2E33)
{
/* C9 0B 03 31 2E 33 --> WV-CSP 1.3 */
return &decode_wv_cspc_13;
}
/* Default: WV-CSP 1.2 */
return &decode_wv_cspc_12;
}
/********************** WBXML token mapping aggregation **********************/
static const wbxml_decoding *get_wbxml_decoding_from_public_id (guint32 publicid);
static const wbxml_decoding *get_wbxml_decoding_from_content_type (
const char *content_type, tvbuff_t *tvb, guint32 offset);
/**
** Aggregation of content type and aggregated code pages
** Content type map lookup will stop at the 1st entry with 3rd member = FALSE
**/
/*
* The following map contains entries registered with a registered WBXML
* public ID. See WAP WINA or OMA OMNA for registered values:
* http://www.openmobilealliance.org/tech/omna/ */
static const wbxml_integer_list well_known_public_id_list[] = {
/* 0x00 - Unknown or missing Public ID */
/* 0x01 - LITERAL PublicID - see String Table */
{ 0x02, &decode_wmlc_10 }, /* WML 1.0 */
/* 0x03 - WTA 1.0 */
{ 0x04, &decode_wmlc_11 }, /* WML 1.1 */
{ 0x05, &decode_sic_10 }, /* SI 1.0 */
{ 0x06, &decode_slc_10 }, /* SL 1.0 */
{ 0x07, &decode_coc_10 }, /* CO 1.0 */
{ 0x08, &decode_channelc_10 }, /* CHANNEL 1.0 */
{ 0x09, &decode_wmlc_12 }, /* WML 1.2 */
{ 0x0A, &decode_wmlc_13 }, /* WML 1.3 */
{ 0x0B, &decode_provc_10 }, /* PROV 1.0 */
/* 0x0C - WTA-WML 1.2 */
{ 0x0D, &decode_emnc_10 }, /* EMN 1.0 */
/* 0x0E - DRMREL 1.0 */
{ 0x0F, &decode_wv_cspc_10 }, /* WV-CSP 1.0 */
{ 0x10, &decode_wv_cspc_11 }, /* WV-CSP 1.1 */
/*See http://www.openmobilealliance.org/tech/omna/omna-wbxml-public-docid.htm */
{ 0x11, &decode_wv_cspc_12 }, /* OMA IMPS - CSP protocol DTD v1.2 */
{ 0x12, &decode_wv_cspc_13 }, /* OMA IMPS - CSP protocol DTD v1.3 */
{ 0x020B, &decode_nokiaprovc_70 },/* Nokia OTA Provisioning 7.0 */
{ 0x0FD1, &decode_syncmlc_10 }, /* SyncML 1.0 */
{ 0x0FD3, &decode_syncmlc_11 }, /* SyncML 1.1 */
/* Note: I assumed WML+ 1.x would be not that different from WML 1.x,
* the real mapping should come from Phone.com (OpenWave)! */
{ 0x1108, &decode_wmlc_11 }, /* Phone.com WMLC+ 1.1 - not 100% correct */
{ 0x110D, &decode_wmlc_13 }, /* Phone.com WMLC+ 1.3 - not 100% correct */
{ 0x1201, &decode_syncmlc_12 }, /* SyncML 1.2 */
{ 0x00, NULL }
};
/* The following map contains entries only registered with a literal media
* type. */
static const wbxml_literal_list content_type_list[] = {
{ "application/x-wap-prov.browser-settings",
NULL,
&decode_nokiaprovc_70
},
{ "application/x-wap-prov.browser-bookmarks",
NULL,
&decode_nokiaprovc_70
},
{ "application/vnd.wv.csp.wbxml",
wv_csp_discriminator,
&decode_wv_cspc_11
},
{ "application/vnd.ms-sync.wbxml",
NULL,
&decode_mssync_10
},
{ "application/vnd.ms-sync",
NULL,
&decode_mssync_10
},
{ NULL, NULL, NULL }
};
/* Returns a pointer to the WBXML token map for the given WBXML public
* identifier value (see WINA for a table with defined identifiers). */
static const wbxml_decoding *get_wbxml_decoding_from_public_id (guint32 public_id)
{
const wbxml_decoding *map = NULL;
DebugLog(("get_wbxml_decoding_from_public_id: public_id = %u\n",
public_id));
if (public_id >= 2) {
const wbxml_integer_list *item = well_known_public_id_list;
while (item && item->public_id && item->map) {
if (item->public_id == public_id) {
map = item->map;
break;
}
item++;
}
}
return map;
}
static const wbxml_decoding *get_wbxml_decoding_from_content_type (
const char *content_type, tvbuff_t *tvb, guint32 offset)
{
const wbxml_decoding *map = NULL;
DebugLog(("get_wbxml_decoding_from_content_type: content_type = [%s]\n",
content_type));
if (content_type && content_type[0]) {
const wbxml_literal_list *item = content_type_list;
while (item && item->content_type) {
if (g_ascii_strcasecmp(content_type, item->content_type) == 0) {
/* Try the discriminator */
if (item->discriminator != NULL) {
map = item->discriminator(tvb, offset);
}
if (map == NULL) {
map = item->map;
}
break;
}
item++;
}
}
return map;
}
/* WBXML content token mapping depends on the following parameters:
* - Content type (guint32)
* - Token type (global, tags, attrStart, attrValue)
* - Code page for tag and attribute
*
* This results in the following steps:
* 1. Retrieve content type mapping
* 2. If exists, retrieve token type mapping
* 3. If exists, retrieve required code page
* 4. If exists, retrieve token mapping
*/
#define wbxml_UNDEFINED_TOKEN \
"(Requested token not defined for this content type)"
#define wbxml_UNDEFINED_TOKEN_CODE_PAGE \
"(Requested token code page not defined for this content type)"
#define wbxml_UNDEFINED_TOKEN_MAP \
"(Requested token map not defined for this content type)"
/* Return token mapping for a given content mapping entry. */
static const char *
map_token (const value_valuestring *token_map, guint8 codepage, guint8 token) {
const value_string *vs;
const char *s;
if (token_map) { /* Found map */
if ((vs = val_to_valstr (codepage, token_map))) {
/* Found codepage map */
s = try_val_to_str (token, vs);
if (s) { /* Found valid token */
DebugLog(("map_token(codepage = %u, token = %u: [%s]\n", codepage, token, s));
return s;
}
/* No valid token mapping in specified code page of token map */
DebugLog(("map_token(codepage = %u, token = %u: "
wbxml_UNDEFINED_TOKEN "\n", codepage, token));
return wbxml_UNDEFINED_TOKEN;
}
/* There is no token map entry for the requested code page */
DebugLog(("map_token(codepage = %u, token = %u: "
wbxml_UNDEFINED_TOKEN_CODE_PAGE "\n", codepage, token));
return wbxml_UNDEFINED_TOKEN_CODE_PAGE;
}
/* The token map does not exist */
DebugLog(("map_token(codepage = %u, token = %u: "
wbxml_UNDEFINED_TOKEN_MAP "\n", codepage, token));
return wbxml_UNDEFINED_TOKEN_MAP;
}
/************************** Function prototypes **************************/
static void
dissect_wbxml(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree);
static void
dissect_uaprof(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree);
static void
dissect_wbxml_common(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
const wbxml_decoding *override_content_map);
/* Parse and display the WBXML string table */
static void
show_wbxml_string_table (proto_tree *tree, tvbuff_t *tvb, guint32 str_tbl,
guint32 str_tbl_len);
/* Parse data while in STAG state */
static guint32
parse_wbxml_tag (proto_tree *tree, tvbuff_t *tvb, guint32 offset,
guint32 str_tbl, guint8 *level, guint8 *codepage_stag, guint8 *codepage_attr);
/* Parse data while in STAG state;
* interpret tokens as defined by content type */
static guint32
parse_wbxml_tag_defined (proto_tree *tree, tvbuff_t *tvb, guint32 offset,
guint32 str_tbl, guint8 *level, guint8 *codepage_stag, guint8 *codepage_attr,
const wbxml_decoding *map);
/* Parse data while in ATTR state */
static guint32
parse_wbxml_attribute_list (proto_tree *tree, tvbuff_t *tvb,
guint32 offset, guint32 str_tbl, guint8 level, guint8 *codepage_attr);
/* Parse data while in ATTR state;
* interpret tokens as defined by content type */
static guint32
parse_wbxml_attribute_list_defined (proto_tree *tree, tvbuff_t *tvb,
guint32 offset, guint32 str_tbl, guint8 level, guint8 *codepage_attr,
const wbxml_decoding *map);
/****************** WBXML protocol dissection functions ******************/
static void
dissect_wbxml(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree)
{
dissect_wbxml_common(tvb, pinfo, tree, NULL);
}
static void
dissect_uaprof(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree)
{
dissect_wbxml_common(tvb, pinfo, tree, &decode_uaprof_wap_248);
}
/* Code to actually dissect the packets */
static void
dissect_wbxml_common(tvbuff_t *tvb, packet_info *pinfo, proto_tree *tree,
const wbxml_decoding *override_content_map)
{
/* Set up structures needed to add the protocol subtree and manage it */
proto_item *ti;
proto_tree *wbxml_tree; /* Main WBXML tree */
proto_tree *wbxml_str_tbl_tree; /* String table subtree */
proto_tree *wbxml_content_tree; /* Content subtree */
guint8 version;
guint offset = 0;
guint32 len;
guint32 charset = 0;
guint32 charset_len = 0;
guint32 publicid;
guint32 publicid_index = 0;
guint32 publicid_len;
guint32 str_tbl;
guint32 str_tbl_len;
guint32 str_tbl_len_len = 0;
guint8 level = 0; /* WBXML recursion level */
const wbxml_decoding *content_map = NULL;
gchar *summary = NULL;
guint8 codepage_stag = 0;
guint8 codepage_attr = 0;
DebugLog(("dissect_wbxml: Dissecting packet %u\n", pinfo->fd->num));
/* WBXML format
*
* Version 1.0: version publicid strtbl BODY
* Version 1.x: version publicid charset strtbl BODY
*
* Last valid format: WBXML 1.3
*/
switch ( version = tvb_get_guint8 (tvb, 0) ) {
case 0x00: /* WBXML/1.0 */
break;
case 0x01: /* WBXML/1.1 */
case 0x02: /* WBXML/1.2 */
case 0x03: /* WBXML/1.3 */
break;
default:
/* Put some information here, so that the user knows what's going on. */
/* Add summary to INFO column if it is enabled */
col_append_fstr(pinfo->cinfo, COL_INFO, " (Unknown WBXML version 0x%02x)", version);
ti = proto_tree_add_item (tree, proto_wbxml, tvb, 0, -1, ENC_NA);
proto_item_append_text(ti, ", Unknown version 0x%02x", version);
return;
}
/* In order to properly construct the packet summary,
* I need to read the entire WBXML header
* up to the string table length.
*/
/* Public ID */
publicid = tvb_get_guintvar(tvb, 1, &publicid_len);
if (! publicid) {
/* Public identifier in string table */
publicid_index = tvb_get_guintvar (tvb, 1+publicid_len, &len);
publicid_len += len;
}
offset = 1 + publicid_len;
/* Version-specific handling of Charset */
switch ( version ) {
case 0x00: /* WBXML/1.0 */
/* No charset */
break;
case 0x01: /* WBXML/1.1 */
case 0x02: /* WBXML/1.2 */
case 0x03: /* WBXML/1.3 */
/* Get charset */
charset = tvb_get_guintvar (tvb, offset, &charset_len);
offset += charset_len;
break;
default: /* Impossible since we returned already earlier */
DISSECTOR_ASSERT_NOT_REACHED();
break;
}
/* String table: read string table length in bytes */
tvb_get_guintvar (tvb, offset, &str_tbl_len_len);
str_tbl = offset + str_tbl_len_len; /* Start of 1st string in string table */
/* Compose the summary line */
if ( publicid ) {
summary = wmem_strdup_printf(wmem_packet_scope(), "%s, Public ID: \"%s\"",
val_to_str_ext (version, &vals_wbxml_versions_ext, "(unknown 0x%x)"),
val_to_str_ext (publicid, &vals_wbxml_public_ids_ext, "(unknown 0x%x)"));
} else {
/* Read length of Public ID from string table */
len = tvb_strsize (tvb, str_tbl + publicid_index);
summary = wmem_strdup_printf(wmem_packet_scope(), "%s, Public ID: \"%s\"",
val_to_str_ext (version, &vals_wbxml_versions_ext, "(unknown 0x%x)"),
tvb_format_text (tvb, str_tbl + publicid_index, len - 1));
}
/* Add summary to INFO column if it is enabled */
col_append_fstr(pinfo->cinfo, COL_INFO, " (WBXML %s)", summary);
/* create display subtree for the protocol */
ti = proto_tree_add_item (tree, proto_wbxml, tvb, 0, -1, ENC_NA);
proto_item_append_text(ti, ", Version: %s", summary);
/*
* Now show the protocol subtree, if tree is set.
*/
if ( tree ) {
wbxml_tree = proto_item_add_subtree(ti, ett_wbxml);
/* WBXML Version */
proto_tree_add_uint (wbxml_tree, hf_wbxml_version,
tvb, 0, 1, version);
/* Public ID */
if (publicid) { /* Known Public ID */
proto_tree_add_uint(wbxml_tree, hf_wbxml_public_id_known,
tvb, 1, publicid_len, publicid);
} else { /* Public identifier in string table */
proto_tree_add_item (wbxml_tree, hf_wbxml_public_id_literal,
tvb, 1, publicid_len, ENC_ASCII|ENC_NA);
}
offset = 1 + publicid_len;
if ( version ) { /* Charset */
proto_tree_add_uint (wbxml_tree, hf_wbxml_charset,
tvb, 1 + publicid_len, charset_len, charset);
offset += charset_len;
}
str_tbl_len = tvb_get_guintvar (tvb, offset, &len);
str_tbl = offset + len; /* Start of 1st string in string table */
/* String Table */
ti = proto_tree_add_text(wbxml_tree,
tvb, offset, len + str_tbl_len, "String table: %u bytes",
str_tbl_len);
if (wbxml_tree && str_tbl_len) { /* Display string table as subtree */
wbxml_str_tbl_tree = proto_item_add_subtree (ti,
ett_wbxml_str_tbl);
show_wbxml_string_table (wbxml_str_tbl_tree, tvb,
str_tbl, str_tbl_len);
}
/* Data starts HERE */
offset += len + str_tbl_len;
/* The WBXML BODY starts here */
if (disable_wbxml_token_parsing) {
proto_tree_add_text (wbxml_tree, tvb, offset, -1,
"Data representation not shown "
"(edit WBXML preferences to show)");
return;
} /* Else: render the WBXML tokens */
ti = proto_tree_add_text (wbxml_tree, tvb, offset, -1,
"Data representation");
wbxml_content_tree = proto_item_add_subtree (ti, ett_wbxml_content);
/* The parse_wbxml_X() functions will process the content correctly,
* irrespective of the WBXML version used. For the WBXML body, this
* means that there is a different processing for the global token
* RESERVED_2 (WBXML 1.0) or OPAQUE (WBXML 1.x with x > 0). */
if (wbxml_tree) { /* Show only if visible */
if (override_content_map != NULL) {
content_map = override_content_map;
proto_item_append_text(ti,
" is based on: %s",
content_map->name);
} else {
/* Retrieve the content token mapping if available */
content_map = get_wbxml_decoding_from_public_id (publicid);
if (! content_map) {
content_map = get_wbxml_decoding_from_content_type(
pinfo->match_string, tvb, offset);
if (! content_map) {
proto_tree_add_text (wbxml_content_tree,
tvb, offset, -1,
"[Rendering of this content type"
" not (yet) supported]");
} else {
proto_item_append_text(ti,
" is based on Content-Type: %s "
"(chosen decoding: %s)",
pinfo->match_string, content_map->name);
}
}
}
if (content_map && skip_wbxml_token_mapping) {
proto_tree_add_text (wbxml_content_tree,
tvb, offset, -1,
"[Rendering of this content type"
" has been disabled "
"(edit WBXML preferences to enable)]");
content_map = NULL;
}
proto_tree_add_text (wbxml_content_tree, tvb,
offset, -1,
"Level | State | Codepage "
"| WBXML Token Description "
"| Rendering");
if (content_map) {
len = parse_wbxml_tag_defined (wbxml_content_tree,
tvb, offset, str_tbl, &level, &codepage_stag,
&codepage_attr, content_map);
} else {
/* Default: WBXML only, no interpretation of the content */
len = parse_wbxml_tag (wbxml_content_tree, tvb, offset,
str_tbl, &level, &codepage_stag, &codepage_attr);
}
}
return;
}
}
/* Parse and display the WBXML string table (in a 3-column table format).
* This function displays:
* - the offset in the string table,
* - the length of the string
* - the string.
*/
static void
show_wbxml_string_table (proto_tree *tree, tvbuff_t *tvb, guint32 str_tbl,
guint32 str_tbl_len)
{
guint32 off = str_tbl;
guint32 len = 0;
guint32 end = str_tbl + str_tbl_len;
proto_tree_add_text (tree, tvb, off, end,
"Start | Length | String");
while (off < end) {
len = tvb_strsize (tvb, off);
proto_tree_add_text (tree, tvb, off, len,
"%6d | %6d | '%s'",
off - str_tbl, len,
tvb_format_text (tvb, off, len-1));
off += len;
}
}
/* Indentation code is based on a static const array of space characters.
* At least one single space is returned */
static const char indent_buffer[514] = " "
" "
" "
" "
" "
" "
" "
" "
" "
; /* Generate XML indentation (length = 1 + 2 * 256 + 1 for '\0') */
static const char * Indent (guint8 level) {
return indent_buffer + (512 - 2 * (level));
}
/********************
* WBXML tag tokens *
********************
*
* Bit Mask : Example
* -------------------
* 00.. .... : <tag />
*
* 01.. .... : <tag>
* CONTENT
* </tag>
*
* 10.. .... : <tag
* atrtribute1="value1"
* atrtribute2="value2"
* />
*
* 11.. .... : <tag
* atrtribute1="value1"
* atrtribute2="value2"
* >
* CONTENT
* </tag>
*
* NOTES
* - An XML PI is parsed as an attribute list (same syntax).
* - A code page switch only applies to the single token that follows.
*/
/* This function parses the WBXML and maps known token interpretations
* to the WBXML tokens. As a result, the original XML document can be
* recreated. Indentation is generated in order to ease reading.
*
* Attribute parsing is done in parse_wbxml_attribute_list_defined().
*
* The wbxml_decoding entry *map contains the actual token mapping.
*
* NOTE: In order to parse the content, some recursion is required.
* However, for performance reasons, recursion has been avoided
* where possible (tags without content within tags with content).
* This is achieved by means of the parsing_tag_content and tag_save*
* variables.
*
* NOTE: See above for known token mappings.
*
* NOTE: As tags can be opened and closed, a tag representation lookup
* may happen once or twice for a given tag. For efficiency reasons,
* the literal tag value is stored and used throughout the code.
* With the introduction of code page support, this solution is robust
* as the lookup only occurs once, removing the need for storage of
* the used code page.
*/
static guint32
parse_wbxml_tag_defined (proto_tree *tree, tvbuff_t *tvb, guint32 offset,
guint32 str_tbl, guint8 *level, guint8 *codepage_stag, guint8 *codepage_attr,
const wbxml_decoding *map)
{
guint32 tvb_len = tvb_reported_length (tvb);
guint32 off = offset, last_off;
guint32 len;
guint str_len;
guint32 ent;
guint32 idx;
guint8 peek;
guint32 tag_len; /* Length of the index (uintvar) from a LITERAL tag */
guint8 tag_save_known = 0; /* Will contain peek & 0x3F (tag identity) */
guint8 tag_new_known = 0; /* Will contain peek & 0x3F (tag identity) */
const char *tag_save_literal; /* Will contain the LITERAL tag identity */
const char *tag_new_literal; /* Will contain the LITERAL tag identity */
guint8 parsing_tag_content = FALSE; /* Are we parsing content from a
tag with content: <x>Content</x>
The initial state is FALSE.
This state will trigger recursion. */
tag_save_literal = NULL; /* Prevents compiler warning */
DebugLog(("parse_wbxml_tag_defined (level = %u, offset = %u)\n", *level, offset));
last_off = off;
while (off < tvb_len) {
peek = tvb_get_guint8 (tvb, off);
DebugLog(("STAG: (top of while) level = %3u, peek = 0x%02X, off = %u, tvb_len = %u\n", *level, peek, off, tvb_len));
if ((peek & 0x3F) < 4) switch (peek) { /* Global tokens in state = STAG
but not the LITERAL tokens */
case 0x00: /* SWITCH_PAGE */
*codepage_stag = tvb_get_guint8 (tvb, off+1);
proto_tree_add_text (tree, tvb, off, 2,
" | Tag | T -->%3d "
"| SWITCH_PAGE (Tag code page) "
"|",
*codepage_stag);
off += 2;
break;
case 0x01: /* END: only possible for Tag with Content */
if (tag_save_known) { /* Known TAG */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| END (Known Tag 0x%02X) "
"| %s</%s>",
*level, *codepage_stag,
tag_save_known, Indent (*level),
tag_save_literal); /* We already looked it up! */
} else { /* Literal TAG */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| END (Literal Tag) "
"| %s</%s>",
*level, *codepage_stag, Indent (*level),
tag_save_literal ? tag_save_literal : "");
}
(*level)--;
off++;
/* Reset code page: not needed as return from recursion */
DebugLog(("STAG: level = %u, Return: len = %u\n", *level, off - offset));
return (off - offset);
case 0x02: /* ENTITY */
ent = tvb_get_guintvar (tvb, off+1, &len);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| ENTITY "
"| %s'&#%u;'",
*level, *codepage_stag, Indent (*level), ent);
off += 1+len;
break;
case 0x03: /* STR_I */
len = tvb_strsize (tvb, off+1);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| STR_I (Inline string) "
"| %s\'%s\'",
*level, *codepage_stag, Indent(*level),
tvb_format_text (tvb, off+1, len-1));
off += 1+len;
break;
case 0x40: /* EXT_I_0 */
case 0x41: /* EXT_I_1 */
case 0x42: /* EXT_I_2 */
/* Extension tokens */
len = tvb_strsize (tvb, off+1);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| EXT_I_%1x (Extension Token) "
"| %s(%s: \'%s\')",
*level, *codepage_stag,
peek & 0x0f, Indent (*level),
map_token (map->global, 0, peek),
tvb_format_text (tvb, off+1, len-1));
off += 1+len;
break;
case 0x43: /* PI */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| PI (XML Processing Instruction) "
"| %s<?xml",
*level, *codepage_stag, Indent (*level));
len = parse_wbxml_attribute_list_defined (tree, tvb, off,
str_tbl, *level, codepage_attr, map);
/* Check that there is still room in packet */
off += len;
if (off >= tvb_len) {
DebugLog(("STAG: level = %u, ThrowException: len = %u (short frame)\n", *level, off - offset));
/*
* TODO - Do we need to free g_malloc()ed memory?
*/
THROW(ReportedBoundsError);
}
proto_tree_add_text (tree, tvb, off-1, 1,
" %3d | Tag | T %3d "
"| END (PI) "
"| %s?>",
*level, *codepage_stag, Indent (*level));
break;
case 0x80: /* EXT_T_0 */
case 0x81: /* EXT_T_1 */
case 0x82: /* EXT_T_2 */
/* Extension tokens */
idx = tvb_get_guintvar (tvb, off+1, &len);
{ char *s;
if (map->ext_t[peek & 0x03])
s = (map->ext_t[peek & 0x03])(tvb, idx, str_tbl);
else
s = wmem_strdup_printf(wmem_packet_scope(), "EXT_T_%1x (%s)", peek & 0x03,
map_token (map->global, 0, peek));
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| EXT_T_%1x (Extension Token) "
"| %s%s",
*level, *codepage_stag, peek & 0x0f, Indent (*level),
s);
}
off += 1+len;
break;
case 0x83: /* STR_T */
idx = tvb_get_guintvar (tvb, off+1, &len);
str_len = tvb_strsize (tvb, str_tbl+idx);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| STR_T (Tableref string) "
"| %s\'%s\'",
*level, *codepage_stag, Indent (*level),
tvb_format_text (tvb, str_tbl+idx, str_len-1));
off += 1+len;
break;
case 0xC0: /* EXT_0 */
case 0xC1: /* EXT_1 */
case 0xC2: /* EXT_2 */
/* Extension tokens */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| EXT_%1x (Extension Token) "
"| %s(%s)",
*level, *codepage_stag, peek & 0x0f, Indent (*level),
map_token (map->global, 0, peek));
off++;
break;
case 0xC3: /* OPAQUE - WBXML 1.1 and newer */
if (tvb_get_guint8 (tvb, 0)) { /* WBXML 1.x (x > 0) */
char *str;
if (tag_save_known) { /* Knwon tag */
if (map->opaque_binary_tag) {
str = map->opaque_binary_tag(tvb, off + 1,
tag_save_known, *codepage_stag, &len);
} else {
str = default_opaque_binary_tag(tvb, off + 1,
tag_save_known, *codepage_stag, &len);
}
} else { /* lITERAL tag */
if (map->opaque_literal_tag) {
str = map->opaque_literal_tag(tvb, off + 1,
tag_save_literal, *codepage_stag, &len);
} else {
str = default_opaque_literal_tag(tvb, off + 1,
tag_save_literal, *codepage_stag, &len);
}
}
proto_tree_add_text (tree, tvb, off, 1 + len,
" %3d | Tag | T %3d "
"| OPAQUE (Opaque data) "
"| %s%s",
*level, *codepage_stag, Indent (*level), str);
off += 1 + len;
} else { /* WBXML 1.0 - RESERVED_2 token (invalid) */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| RESERVED_2 (Invalid Token!) "
"| WBXML 1.0 parsing stops here.",
*level, *codepage_stag);
/* Stop processing as it is impossible to parse now */
off = tvb_len;
DebugLog(("STAG: level = %u, Return: len = %u\n", *level, off - offset));
return (off - offset);
}
break;
/* No default clause, as all cases have been treated */
} else { /* LITERAL or Known TAG */
/* We must store the initial tag, and also retrieve the new tag.
* For efficiency reasons, we store the literal tag representation
* for known tags too, so we can easily close the tag without the
* need of a new lookup and avoiding storage of token codepage.
*
* There are 4 possibilities:
*
* 1. Known tag followed by a known tag
* 2. Known tag followed by a LITERAL tag
* 3. LITERAL tag followed by Known tag
* 4. LITERAL tag followed by LITERAL tag
*/
/* Store the new tag */
tag_len = 0;
if ((peek & 0x3F) == 4) { /* LITERAL */
DebugLog(("STAG: LITERAL tag (peek = 0x%02X, off = %u) - TableRef follows!\n", peek, off));
idx = tvb_get_guintvar (tvb, off+1, &tag_len);
str_len = tvb_strsize (tvb, str_tbl+idx);
tag_new_literal = (const gchar*)tvb_get_ptr (tvb, str_tbl+idx, str_len);
tag_new_known = 0; /* invalidate known tag_new */
} else { /* Known tag */
tag_new_known = peek & 0x3F;
tag_new_literal = map_token (map->tags, *codepage_stag,
tag_new_known);
/* Stored looked up tag name string */
}
/* Parsing of TAG starts HERE */
if (peek & 0x40) { /* Content present */
/* Content follows
* [!] An explicit END token is expected in these cases!
* ==> Recursion possible if we encounter a tag with content;
* recursion will return at the explicit END token.
*/
if (parsing_tag_content) { /* Recurse */
DebugLog(("STAG: Tag in Tag - RECURSE! (off = %u)\n", off));
/* Do not process the attribute list:
* recursion will take care of it */
(*level)++;
len = parse_wbxml_tag_defined (tree, tvb, off, str_tbl,
level, codepage_stag, codepage_attr, map);
off += len;
} else { /* Now we will have content to parse */
/* Save the start tag so we can properly close it later. */
if ((peek & 0x3F) == 4) { /* Literal tag */
tag_save_literal = tag_new_literal;
tag_save_known = 0;
} else { /* Known tag */
tag_save_known = tag_new_known;
tag_save_literal = tag_new_literal;
/* The last statement avoids needless lookups */
}
/* Process the attribute list if present */
if (peek & 0x80) { /* Content and Attribute list present */
if (tag_new_known) { /* Known tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| Known Tag 0x%02X (AC) "
"| %s<%s",
*level, *codepage_stag, tag_new_known,
Indent (*level), tag_new_literal);
/* Tag string already looked up earlier! */
off++;
} else { /* LITERAL tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| LITERAL_AC (Literal tag) (AC) "
"| %s<%s",
*level, *codepage_stag, Indent (*level), tag_new_literal);
off += 1 + tag_len;
}
len = parse_wbxml_attribute_list_defined (tree, tvb,
off, str_tbl, *level, codepage_attr, map);
/* Check that there is still room in packet */
off += len;
if (off >= tvb_len) {
DebugLog(("STAG: level = %u, ThrowException: len = %u (short frame)\n",
*level, off - offset));
/*
* TODO - Do we need to free g_malloc()ed memory?
*/
THROW(ReportedBoundsError);
}
proto_tree_add_text (tree, tvb, off-1, 1,
" %3d | Tag | T %3d "
"| END (attribute list) "
"| %s>",
*level, *codepage_stag, Indent (*level));
} else { /* Content, no Attribute list */
if (tag_new_known) { /* Known tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| Known Tag 0x%02X (.C) "
"| %s<%s>",
*level, *codepage_stag, tag_new_known,
Indent (*level), tag_new_literal);
/* Tag string already looked up earlier! */
off++;
} else { /* LITERAL tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| LITERAL_C (Literal Tag) (.C) "
"| %s<%s>",
*level, *codepage_stag, Indent (*level),
tag_new_literal);
off += 1 + tag_len;
}
}
/* The data that follows in the parsing process
* represents content for the opening tag
* we've just processed in the lines above.
* Next time we encounter a tag with content: recurse
*/
parsing_tag_content = TRUE;
DebugLog(("Tag in Tag - No recursion this time! (off = %u)\n", off));
}
} else { /* No Content */
DebugLog(("<Tag/> in Tag - No recursion! (off = %u)\n", off));
(*level)++;
if (peek & 0x80) { /* No Content, Attribute list present */
if (tag_new_known) { /* Known tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| Known Tag 0x%02X (A.) "
"| %s<%s",
*level, *codepage_stag, tag_new_known,
Indent (*level), tag_new_literal);
/* Tag string already looked up earlier! */
off++;
len = parse_wbxml_attribute_list_defined (tree, tvb,
off, str_tbl, *level, codepage_attr, map);
/* Check that there is still room in packet */
off += len;
if (off > tvb_len) {
DebugLog(("STAG: level = %u, ThrowException: len = %u (short frame)\n", *level, off - offset));
/*
* TODO - Do we need to free g_malloc()ed memory?
*/
THROW(ReportedBoundsError);
}
proto_tree_add_text (tree, tvb, off-1, 1,
" %3d | Tag | T %3d "
"| END (Known Tag) "
"| %s/>",
*level, *codepage_stag, Indent (*level));
} else { /* LITERAL tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| LITERAL_A (Literal Tag) (A.) "
"| %s<%s",
*level, *codepage_stag, Indent (*level), tag_new_literal);
off += 1 + tag_len;
len = parse_wbxml_attribute_list_defined (tree, tvb,
off, str_tbl, *level, codepage_attr, map);
/* Check that there is still room in packet */
off += len;
if (off >= tvb_len) {
DebugLog(("STAG: level = %u, ThrowException: len = %u (short frame)\n", *level, off - offset));
/*
* TODO - Do we need to free g_malloc()ed memory?
*/
THROW(ReportedBoundsError);
}
proto_tree_add_text (tree, tvb, off-1, 1,
" %3d | Tag | T %3d "
"| END (Literal Tag) "
"| %s/>",
*level, *codepage_stag, Indent (*level));
}
} else { /* No Content, No Attribute list */
if (tag_new_known) { /* Known tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| Known Tag 0x%02x (..) "
"| %s<%s />",
*level, *codepage_stag, tag_new_known,
Indent (*level), tag_new_literal);
/* Tag string already looked up earlier! */
off++;
} else { /* LITERAL tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| LITERAL (Literal Tag) (..) "
"| %s<%s />",
*level, *codepage_stag, Indent (*level),
tag_new_literal);
off += 1 + tag_len;
}
}
(*level)--;
/* TODO: Do I have to reset code page here? */
}
} /* if (tag & 0x3F) >= 5 */
if (off < last_off) {
THROW(ReportedBoundsError);
}
last_off = off;
} /* while */
DebugLog(("STAG: level = %u, Return: len = %u (end of function body)\n", *level, off - offset));
return (off - offset);
}
/* This function performs the WBXML decoding as in parse_wbxml_tag_defined()
* but this time no WBXML mapping is performed.
*
* Attribute parsing is done in parse_wbxml_attribute_list().
*/
static guint32
parse_wbxml_tag (proto_tree *tree, tvbuff_t *tvb, guint32 offset,
guint32 str_tbl, guint8 *level,
guint8 *codepage_stag, guint8 *codepage_attr)
{
guint32 tvb_len = tvb_reported_length (tvb);
guint32 off = offset, last_off;
guint32 len;
guint str_len;
guint32 ent;
guint32 idx;
guint8 peek;
guint32 tag_len; /* Length of the idx (uintvar) from a LITERAL tag */
guint8 tag_save_known = 0; /* Will contain peek & 0x3F (tag identity) */
guint8 tag_new_known = 0; /* Will contain peek & 0x3F (tag identity) */
const char *tag_save_literal; /* Will contain the LITERAL tag identity */
const char *tag_new_literal; /* Will contain the LITERAL tag identity */
char *tag_save_buf = NULL; /* Will contain "tag_0x%02X" */
char *tag_new_buf = NULL; /* Will contain "tag_0x%02X" */
guint8 parsing_tag_content = FALSE; /* Are we parsing content from a
tag with content: <x>Content</x>
The initial state is FALSE.
This state will trigger recursion. */
tag_save_literal = NULL; /* Prevents compiler warning */
DebugLog(("parse_wbxml_tag (level = %u, offset = %u)\n", *level, offset));
last_off = off;
while (off < tvb_len) {
peek = tvb_get_guint8 (tvb, off);
DebugLog(("STAG: (top of while) level = %3u, peek = 0x%02X, off = %u, tvb_len = %u\n", *level, peek, off, tvb_len));
if ((peek & 0x3F) < 4) switch (peek) { /* Global tokens in state = STAG
but not the LITERAL tokens */
case 0x00: /* SWITCH_PAGE */
*codepage_stag = tvb_get_guint8 (tvb, off+1);
proto_tree_add_text (tree, tvb, off, 2,
" | Tag | T -->%3d "
"| SWITCH_PAGE (Tag code page) "
"|",
*codepage_stag);
off += 2;
break;
case 0x01: /* END: only possible for Tag with Content */
if (tag_save_known) { /* Known TAG */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| END (Known Tag 0x%02X) "
"| %s</%s>",
*level, *codepage_stag, tag_save_known,
Indent (*level),
tag_save_literal); /* We already looked it up! */
} else { /* Literal TAG */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| END (Literal Tag) "
"| %s</%s>",
*level, *codepage_stag, Indent (*level),
tag_save_literal ? tag_save_literal : "");
}
(*level)--;
off++;
/* Reset code page: not needed as return from recursion */
DebugLog(("STAG: level = %u, Return: len = %u\n",
*level, off - offset));
return (off - offset);
case 0x02: /* ENTITY */
ent = tvb_get_guintvar (tvb, off+1, &len);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| ENTITY "
"| %s'&#%u;'",
*level, *codepage_stag, Indent (*level), ent);
off += 1+len;
break;
case 0x03: /* STR_I */
len = tvb_strsize (tvb, off+1);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| STR_I (Inline string) "
"| %s\'%s\'",
*level, *codepage_stag, Indent(*level),
tvb_format_text (tvb, off+1, len-1));
off += 1+len;
break;
case 0x40: /* EXT_I_0 */
case 0x41: /* EXT_I_1 */
case 0x42: /* EXT_I_2 */
/* Extension tokens */
len = tvb_strsize (tvb, off+1);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| EXT_I_%1x (Extension Token) "
"| %s(Inline string extension: \'%s\')",
*level, *codepage_stag, peek & 0x0f, Indent (*level),
tvb_format_text (tvb, off+1, len-1));
off += 1+len;
break;
case 0x43: /* PI */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| PI (XML Processing Instruction) "
"| %s<?xml",
*level, *codepage_stag, Indent (*level));
len = parse_wbxml_attribute_list (tree, tvb, off, str_tbl,
*level, codepage_attr);
/* Check that there is still room in packet */
off += len;
if (off >= tvb_len) {
DebugLog(("STAG: level = %u, ThrowException: len = %u (short frame)\n",
*level, off - offset));
/*
* TODO - Do we need to free g_malloc()ed memory?
*/
THROW(ReportedBoundsError);
}
proto_tree_add_text (tree, tvb, off-1, 1,
" %3d | Tag | T %3d "
"| END (PI) "
"| %s?>",
*level, *codepage_stag, Indent (*level));
break;
case 0x80: /* EXT_T_0 */
case 0x81: /* EXT_T_1 */
case 0x82: /* EXT_T_2 */
/* Extension tokens */
idx = tvb_get_guintvar (tvb, off+1, &len);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| EXT_T_%1x (Extension Token) "
"| %s(Extension Token, integer value: %u)",
*level, *codepage_stag, peek & 0x0f, Indent (*level),
idx);
off += 1+len;
break;
case 0x83: /* STR_T */
idx = tvb_get_guintvar (tvb, off+1, &len);
str_len = tvb_strsize (tvb, str_tbl+idx);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| STR_T (Tableref string) "
"| %s\'%s\'",
*level, *codepage_stag, Indent (*level),
tvb_format_text (tvb, str_tbl+idx, str_len-1));
off += 1+len;
break;
case 0xC0: /* EXT_0 */
case 0xC1: /* EXT_1 */
case 0xC2: /* EXT_2 */
/* Extension tokens */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| EXT_%1x (Extension Token) "
"| %s(Single-byte extension)",
*level, *codepage_stag, peek & 0x0f, Indent (*level));
off++;
break;
case 0xC3: /* OPAQUE - WBXML 1.1 and newer */
if (tvb_get_guint8 (tvb, 0)) { /* WBXML 1.x (x > 0) */
idx = tvb_get_guintvar (tvb, off+1, &len);
proto_tree_add_text (tree, tvb, off, 1 + len + idx,
" %3d | Tag | T %3d "
"| OPAQUE (Opaque data) "
"| %s(%d bytes of opaque data)",
*level, *codepage_stag, Indent (*level), idx);
off += 1+len+idx;
} else { /* WBXML 1.0 - RESERVED_2 token (invalid) */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| RESERVED_2 (Invalid Token!) "
"| WBXML 1.0 parsing stops here.",
*level, *codepage_stag);
/* Stop processing as it is impossible to parse now */
off = tvb_len;
DebugLog(("STAG: level = %u, Return: len = %u\n",
*level, off - offset));
return (off - offset);
}
break;
/* No default clause, as all cases have been treated */
} else { /* LITERAL or Known TAG */
/* We must store the initial tag, and also retrieve the new tag.
* For efficiency reasons, we store the literal tag representation
* for known tags too, so we can easily close the tag without the
* need of a new lookup and avoiding storage of token codepage.
*
* There are 4 possibilities:
*
* 1. Known tag followed by a known tag
* 2. Known tag followed by a LITERAL tag
* 3. LITERAL tag followed by Known tag
* 4. LITERAL tag followed by LITERAL tag
*/
/* Store the new tag */
tag_len = 0;
if ((peek & 0x3F) == 4) { /* LITERAL */
DebugLog(("STAG: LITERAL tag (peek = 0x%02X, off = %u)"
" - TableRef follows!\n", peek, off));
idx = tvb_get_guintvar (tvb, off+1, &tag_len);
str_len = tvb_strsize (tvb, str_tbl+idx);
tag_new_literal = (const gchar*)tvb_get_ptr (tvb, str_tbl+idx, str_len);
tag_new_known = 0; /* invalidate known tag_new */
} else { /* Known tag */
tag_new_known = peek & 0x3F;
tag_new_buf=wmem_strdup_printf(wmem_packet_scope(), "Tag_0x%02X",
tag_new_known);
tag_new_literal = tag_new_buf;
/* Stored looked up tag name string */
}
/* Parsing of TAG starts HERE */
if (peek & 0x40) { /* Content present */
/* Content follows
* [!] An explicit END token is expected in these cases!
* ==> Recursion possible if we encounter a tag with content;
* recursion will return at the explicit END token.
*/
if (parsing_tag_content) { /* Recurse */
DebugLog(("STAG: Tag in Tag - RECURSE! (off = %u)\n", off));
/* Do not process the attribute list:
* recursion will take care of it */
(*level)++;
len = parse_wbxml_tag (tree, tvb, off, str_tbl, level,
codepage_stag, codepage_attr);
off += len;
} else { /* Now we will have content to parse */
/* Save the start tag so we can properly close it later. */
if ((peek & 0x3F) == 4) { /* Literal tag */
tag_save_literal = tag_new_literal;
tag_save_known = 0;
} else { /* Known tag */
tag_save_known = tag_new_known;
tag_save_buf=wmem_strdup_printf(wmem_packet_scope(), "Tag_0x%02X",
tag_new_known);
tag_save_literal = tag_save_buf;
/* The last statement avoids needless lookups */
}
/* Process the attribute list if present */
if (peek & 0x80) { /* Content and Attribute list present */
if (tag_new_known) { /* Known tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| Known Tag 0x%02X (AC) "
"| %s<%s",
*level, *codepage_stag, tag_new_known,
Indent (*level), tag_new_literal);
/* Tag string already looked up earlier! */
off++;
} else { /* LITERAL tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| LITERAL_AC (Literal tag) (AC) "
"| %s<%s",
*level, *codepage_stag, Indent (*level),
tag_new_literal);
off += 1 + tag_len;
}
len = parse_wbxml_attribute_list (tree, tvb,
off, str_tbl, *level, codepage_attr);
/* Check that there is still room in packet */
off += len;
if (off >= tvb_len) {
DebugLog(("STAG: level = %u, ThrowException: "
"len = %u (short frame)\n",
*level, off - offset));
/*
* TODO - Do we need to free g_malloc()ed memory?
*/
THROW(ReportedBoundsError);
}
proto_tree_add_text (tree, tvb, off-1, 1,
" %3d | Tag | T %3d "
"| END (attribute list) "
"| %s>",
*level, *codepage_stag, Indent (*level));
} else { /* Content, no Attribute list */
if (tag_new_known) { /* Known tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| Known Tag 0x%02X (.C) "
"| %s<%s>",
*level, *codepage_stag, tag_new_known,
Indent (*level), tag_new_literal);
/* Tag string already looked up earlier! */
off++;
} else { /* LITERAL tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| LITERAL_C (Literal Tag) (.C) "
"| %s<%s>",
*level, *codepage_stag, Indent (*level),
tag_new_literal);
off += 1 + tag_len;
}
}
/* The data that follows in the parsing process
* represents content for the opening tag
* we've just processed in the lines above.
* Next time we encounter a tag with content: recurse
*/
parsing_tag_content = TRUE;
DebugLog(("Tag in Tag - No recursion this time! "
"(off = %u)\n", off));
}
} else { /* No Content */
DebugLog(("<Tag/> in Tag - No recursion! (off = %u)\n", off));
(*level)++;
if (peek & 0x80) { /* No Content, Attribute list present */
if (tag_new_known) { /* Known tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| Known Tag 0x%02X (A.) "
"| %s<%s",
*level, *codepage_stag, tag_new_known,
Indent (*level), tag_new_literal);
/* Tag string already looked up earlier! */
off++;
len = parse_wbxml_attribute_list (tree, tvb,
off, str_tbl, *level, codepage_attr);
/* Check that there is still room in packet */
off += len;
if (off >= tvb_len) {
DebugLog(("STAG: level = %u, ThrowException: "
"len = %u (short frame)\n",
*level, off - offset));
/*
* TODO - Do we need to free g_malloc()ed memory?
*/
THROW(ReportedBoundsError);
}
proto_tree_add_text (tree, tvb, off-1, 1,
" %3d | Tag | T %3d "
"| END (Known Tag) "
"| %s/>",
*level, *codepage_stag, Indent (*level));
} else { /* LITERAL tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| LITERAL_A (Literal Tag) (A.) "
"| %s<%s",
*level, *codepage_stag, Indent (*level),
tag_new_literal);
off += 1 + tag_len;
len = parse_wbxml_attribute_list (tree, tvb,
off, str_tbl, *level, codepage_attr);
/* Check that there is still room in packet */
off += len;
if (off >= tvb_len) {
DebugLog(("STAG: level = %u, ThrowException: "
"len = %u (short frame)\n",
*level, off - offset));
/*
* TODO - Do we need to free g_malloc()ed memory?
*/
THROW(ReportedBoundsError);
}
proto_tree_add_text (tree, tvb, off-1, 1,
" %3d | Tag | T %3d "
"| END (Literal Tag) "
"| %s/>",
*level, *codepage_stag, Indent (*level));
}
} else { /* No Content, No Attribute list */
if (tag_new_known) { /* Known tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| Known Tag 0x%02x (..) "
"| %s<%s />",
*level, *codepage_stag, tag_new_known,
Indent (*level), tag_new_literal);
/* Tag string already looked up earlier! */
off++;
} else { /* LITERAL tag */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Tag | T %3d "
"| LITERAL (Literal Tag) (..) "
"| %s<%s />",
*level, *codepage_stag, Indent (*level),
tag_new_literal);
off += 1 + tag_len;
}
}
(*level)--;
/* TODO: Do I have to reset code page here? */
}
} /* if (tag & 0x3F) >= 5 */
if (off < last_off) {
THROW(ReportedBoundsError);
}
last_off = off;
} /* while */
DebugLog(("STAG: level = %u, Return: len = %u (end of function body)\n",
*level, off - offset));
return (off - offset);
}
/**************************
* WBXML Attribute tokens *
**************************
* Bit Mask : Example
* -------------------
* 0... .... : attr= (attribute name)
* href='http://' (attribute name with start of attribute value)
* 1... .... : 'www.' (attribute value, or part of it)
*
*/
/* This function parses the WBXML and maps known token interpretations
* to the WBXML tokens. As a result, the original XML document can be
* recreated. Indentation is generated in order to ease reading.
*
* This function performs attribute list parsing.
*
* The wbxml_decoding entry *map contains the actual token mapping.
*
* NOTE: See above for known token mappings.
*/
static guint32
parse_wbxml_attribute_list_defined (proto_tree *tree, tvbuff_t *tvb,
guint32 offset, guint32 str_tbl, guint8 level, guint8 *codepage_attr,
const wbxml_decoding *map)
{
guint32 tvb_len = tvb_reported_length (tvb);
guint32 off = offset, last_off;
guint32 len;
guint str_len;
guint32 ent;
guint32 idx;
guint8 peek;
guint8 attr_save_known = 0; /* Will contain peek & 0x3F (attr identity) */
const char *attr_save_literal = NULL; /* Will contain the LITERAL attr identity */
DebugLog(("parse_wbxml_attr_defined (level = %u, offset = %u)\n",
level, offset));
/* Parse attributes */
last_off = off;
while (off < tvb_len) {
peek = tvb_get_guint8 (tvb, off);
DebugLog(("ATTR: (top of while) level = %3u, peek = 0x%02X, "
"off = %u, tvb_len = %u\n", level, peek, off, tvb_len));
if ((peek & 0x3F) < 5) switch (peek) { /* Global tokens
in state = ATTR */
case 0x00: /* SWITCH_PAGE */
*codepage_attr = tvb_get_guint8 (tvb, off+1);
proto_tree_add_text (tree, tvb, off, 2,
" | Attr | A -->%3d "
"| SWITCH_PAGE (Attr code page) |",
*codepage_attr);
off += 2;
break;
case 0x01: /* END */
/* BEWARE
* The Attribute END token means either ">" or "/>"
* and as a consequence both must be treated separately.
* This is done in the TAG state parser.
*/
off++;
DebugLog(("ATTR: level = %u, Return: len = %u\n",
level, off - offset));
return (off - offset);
case 0x02: /* ENTITY */
ent = tvb_get_guintvar (tvb, off+1, &len);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Attr | A %3d "
"| ENTITY "
"| %s'&#%u;'",
level, *codepage_attr, Indent (level), ent);
off += 1+len;
break;
case 0x03: /* STR_I */
len = tvb_strsize (tvb, off+1);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Attr | A %3d "
"| STR_I (Inline string) "
"| %s\'%s\'",
level, *codepage_attr, Indent (level),
tvb_format_text (tvb, off+1, len-1));
off += 1+len;
break;
case 0x04: /* LITERAL */
/* ALWAYS means the start of a new attribute,
* and may only contain the NAME of the attribute.
*/
idx = tvb_get_guintvar (tvb, off+1, &len);
str_len = tvb_strsize (tvb, str_tbl+idx);
attr_save_known = 0;
attr_save_literal = tvb_format_text (tvb,
str_tbl+idx, str_len-1);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Attr | A %3d "
"| LITERAL (Literal Attribute) "
"| %s<%s />",
level, *codepage_attr, Indent (level),
attr_save_literal);
off += 1+len;
break;
case 0x40: /* EXT_I_0 */
case 0x41: /* EXT_I_1 */
case 0x42: /* EXT_I_2 */
/* Extension tokens */
len = tvb_strsize (tvb, off+1);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Attr | A %3d "
"| EXT_I_%1x (Extension Token) "
"| %s(%s: \'%s\')",
level, *codepage_attr, peek & 0x0f, Indent (level),
map_token (map->global, 0, peek),
tvb_format_text (tvb, off+1, len-1));
off += 1+len;
break;
/* 0x43 impossible in ATTR state */
/* 0x44 impossible in ATTR state */
case 0x80: /* EXT_T_0 */
case 0x81: /* EXT_T_1 */
case 0x82: /* EXT_T_2 */
/* Extension tokens */
idx = tvb_get_guintvar (tvb, off+1, &len);
{ char *s;
if (map->ext_t[peek & 0x03])
s = (map->ext_t[peek & 0x03])(tvb, idx, str_tbl);
else
s = wmem_strdup_printf(wmem_packet_scope(), "EXT_T_%1x (%s)", peek & 0x03,
map_token (map->global, 0, peek));
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Tag | T %3d "
"| EXT_T_%1x (Extension Token) "
"| %s%s)",
level, *codepage_attr, peek & 0x0f, Indent (level),
s);
}
off += 1+len;
break;
case 0x83: /* STR_T */
idx = tvb_get_guintvar (tvb, off+1, &len);
str_len = tvb_strsize (tvb, str_tbl+idx);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Attr | A %3d "
"| STR_T (Tableref string) "
"| %s\'%s\'",
level, *codepage_attr, Indent (level),
tvb_format_text (tvb, str_tbl+idx, str_len-1));
off += 1+len;
break;
/* 0x84 impossible in ATTR state */
case 0xC0: /* EXT_0 */
case 0xC1: /* EXT_1 */
case 0xC2: /* EXT_2 */
/* Extension tokens */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Attr | A %3d "
"| EXT_%1x (Extension Token) "
"| %s(%s)",
level, *codepage_attr, peek & 0x0f, Indent (level),
map_token (map->global, 0, peek));
off++;
break;
case 0xC3: /* OPAQUE - WBXML 1.1 and newer */
if (tvb_get_guint8 (tvb, 0)) { /* WBXML 1.x (x > 0) */
char *str;
if (attr_save_known) { /* Knwon attribute */
if (map->opaque_binary_attr) {
str = map->opaque_binary_attr(tvb, off + 1,
attr_save_known, *codepage_attr, &len);
} else {
str = default_opaque_binary_attr(tvb, off + 1,
attr_save_known, *codepage_attr, &len);
}
} else { /* lITERAL attribute */
if (map->opaque_literal_tag) {
str = map->opaque_literal_attr(tvb, off + 1,
attr_save_literal, *codepage_attr, &len);
} else {
str = default_opaque_literal_attr(tvb, off + 1,
attr_save_literal, *codepage_attr, &len);
}
}
proto_tree_add_text (tree, tvb, off, 1 + len,
" %3d | Attr | A %3d "
"| OPAQUE (Opaque data) "
"| %s%s",
level, *codepage_attr, Indent (level), str);
off += 1 + len;
} else { /* WBXML 1.0 - RESERVED_2 token (invalid) */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Attr | A %3d "
"| RESERVED_2 (Invalid Token!) "
"| WBXML 1.0 parsing stops here.",
level, *codepage_attr);
/* Stop processing as it is impossible to parse now */
off = tvb_len;
DebugLog(("ATTR: level = %u, Return: len = %u\n",
level, off - offset));
return (off - offset);
}
break;
/* 0xC4 impossible in ATTR state */
default:
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Attr | A %3d "
"| %-10s (Invalid Token!) "
"| WBXML parsing stops here.",
level, *codepage_attr,
val_to_str_ext (peek, &vals_wbxml1x_global_tokens_ext, "(unknown 0x%x)"));
/* Move to end of buffer */
off = tvb_len;
break;
} else { /* Known atribute token */
if (peek & 0x80) { /* attrValue */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Attr | A %3d "
"| Known attrValue 0x%02X "
"| %s%s",
level, *codepage_attr, peek & 0x7f, Indent (level),
map_token (map->attrValue, *codepage_attr, peek));
off++;
} else { /* attrStart */
attr_save_known = peek & 0x7f;
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Attr | A %3d "
"| Known attrStart 0x%02X "
"| %s%s",
level, *codepage_attr, attr_save_known, Indent (level),
map_token (map->attrStart, *codepage_attr, peek));
off++;
}
}
if (off < last_off) {
THROW(ReportedBoundsError);
}
last_off = off;
} /* End WHILE */
DebugLog(("ATTR: level = %u, Return: len = %u (end of function body)\n",
level, off - offset));
return (off - offset);
}
/* This function performs the WBXML attribute decoding as in
* parse_wbxml_attribute_list_defined() but this time no WBXML mapping
* is performed.
*
* This function performs attribute list parsing.
*
* NOTE: Code page switches not yet processed in the code!
*/
static guint32
parse_wbxml_attribute_list (proto_tree *tree, tvbuff_t *tvb,
guint32 offset, guint32 str_tbl, guint8 level, guint8 *codepage_attr)
{
guint32 tvb_len = tvb_reported_length (tvb);
guint32 off = offset, last_off;
guint32 len;
guint str_len;
guint32 ent;
guint32 idx;
guint8 peek;
DebugLog(("parse_wbxml_attr (level = %u, offset = %u)\n", level, offset));
/* Parse attributes */
last_off = off;
while (off < tvb_len) {
peek = tvb_get_guint8 (tvb, off);
DebugLog(("ATTR: (top of while) level = %3u, peek = 0x%02X, "
"off = %u, tvb_len = %u\n", level, peek, off, tvb_len));
if ((peek & 0x3F) < 5) switch (peek) { /* Global tokens
in state = ATTR */
case 0x00: /* SWITCH_PAGE */
*codepage_attr = tvb_get_guint8 (tvb, off+1);
proto_tree_add_text (tree, tvb, off, 2,
" | Attr | A -->%3d "
"| SWITCH_PAGE (Attr code page) |",
*codepage_attr);
off += 2;
break;
case 0x01: /* END */
/* BEWARE
* The Attribute END token means either ">" or "/>"
* and as a consequence both must be treated separately.
* This is done in the TAG state parser.
*/
off++;
DebugLog(("ATTR: level = %u, Return: len = %u\n",
level, off - offset));
return (off - offset);
case 0x02: /* ENTITY */
ent = tvb_get_guintvar (tvb, off+1, &len);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Attr | A %3d "
"| ENTITY "
"| %s'&#%u;'",
level, *codepage_attr, Indent (level), ent);
off += 1+len;
break;
case 0x03: /* STR_I */
len = tvb_strsize (tvb, off+1);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Attr | A %3d "
"| STR_I (Inline string) "
"| %s\'%s\'",
level, *codepage_attr, Indent (level),
tvb_format_text (tvb, off+1, len-1));
off += 1+len;
break;
case 0x04: /* LITERAL */
idx = tvb_get_guintvar (tvb, off+1, &len);
str_len = tvb_strsize (tvb, str_tbl+idx);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Attr | A %3d "
"| LITERAL (Literal Attribute) "
"| %s<%s />",
level, *codepage_attr, Indent (level),
tvb_format_text (tvb, str_tbl+idx, str_len-1));
off += 1+len;
break;
case 0x40: /* EXT_I_0 */
case 0x41: /* EXT_I_1 */
case 0x42: /* EXT_I_2 */
/* Extension tokens */
len = tvb_strsize (tvb, off+1);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Attr | A %3d "
"| EXT_I_%1x (Extension Token) "
"| %s(Inline string extension: \'%s\')",
level, *codepage_attr, peek & 0x0f, Indent (level),
tvb_format_text (tvb, off+1, len-1));
off += 1+len;
break;
/* 0x43 impossible in ATTR state */
/* 0x44 impossible in ATTR state */
case 0x80: /* EXT_T_0 */
case 0x81: /* EXT_T_1 */
case 0x82: /* EXT_T_2 */
/* Extension tokens */
idx = tvb_get_guintvar (tvb, off+1, &len);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Attr | A %3d "
"| EXT_T_%1x (Extension Token) "
"| %s(Extension Token, integer value: %u)",
level, *codepage_attr, peek & 0x0f, Indent (level),
idx);
off += 1+len;
break;
case 0x83: /* STR_T */
idx = tvb_get_guintvar (tvb, off+1, &len);
str_len = tvb_strsize (tvb, str_tbl+idx);
proto_tree_add_text (tree, tvb, off, 1+len,
" %3d | Attr | A %3d "
"| STR_T (Tableref string) "
"| %s\'%s\'",
level, *codepage_attr, Indent (level),
tvb_format_text (tvb, str_tbl+idx, str_len-1));
off += 1+len;
break;
/* 0x84 impossible in ATTR state */
case 0xC0: /* EXT_0 */
case 0xC1: /* EXT_1 */
case 0xC2: /* EXT_2 */
/* Extension tokens */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Attr | A %3d "
"| EXT_%1x (Extension Token) "
"| %s(Single-byte extension)",
level, *codepage_attr, peek & 0x0f, Indent (level));
off++;
break;
case 0xC3: /* OPAQUE - WBXML 1.1 and newer */
if (tvb_get_guint8 (tvb, 0)) { /* WBXML 1.x (x > 0) */
idx = tvb_get_guintvar (tvb, off+1, &len);
proto_tree_add_text (tree, tvb, off, 1 + len + idx,
" %3d | Attr | A %3d "
"| OPAQUE (Opaque data) "
"| %s(%d bytes of opaque data)",
level, *codepage_attr, Indent (level), idx);
off += 1+len+idx;
} else { /* WBXML 1.0 - RESERVED_2 token (invalid) */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Attr | A %3d "
"| RESERVED_2 (Invalid Token!) "
"| WBXML 1.0 parsing stops here.",
level, *codepage_attr);
/* Stop processing as it is impossible to parse now */
off = tvb_len;
DebugLog(("ATTR: level = %u, Return: len = %u\n",
level, off - offset));
return (off - offset);
}
break;
/* 0xC4 impossible in ATTR state */
default:
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Attr | A %3d "
"| %-10s (Invalid Token!) "
"| WBXML parsing stops here.",
level, *codepage_attr,
val_to_str_ext (peek, &vals_wbxml1x_global_tokens_ext, "(unknown 0x%x)"));
/* Move to end of buffer */
off = tvb_len;
break;
} else { /* Known atribute token */
if (peek & 0x80) { /* attrValue */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Attr | A %3d "
"| Known attrValue 0x%02X "
"| %sattrValue_0x%02X",
level, *codepage_attr, peek & 0x7f, Indent (level),
peek);
off++;
} else { /* attrStart */
proto_tree_add_text (tree, tvb, off, 1,
" %3d | Attr | A %3d "
"| Known attrStart 0x%02X "
"| %sattrStart_0x%02X",
level, *codepage_attr, peek & 0x7f, Indent (level),
peek);
off++;
}
}
if (off < last_off) {
THROW(ReportedBoundsError);
}
last_off = off;
} /* End WHILE */
DebugLog(("ATTR: level = %u, Return: len = %u (end of function body)\n",
level, off - offset));
return (off - offset);
}
/****************** Register the protocol with Wireshark ******************/
/* This format is required because a script is used to build the C function
* that calls the protocol registration. */
void
proto_register_wbxml(void)
{
module_t *wbxml_module; /* WBXML Preferences */
/* Setup list of header fields. */
static hf_register_info hf[] = {
{ &hf_wbxml_version,
{ "Version",
"wbxml.version",
FT_UINT8, BASE_HEX|BASE_EXT_STRING,
&vals_wbxml_versions_ext, 0x00,
"WBXML Version", HFILL }
},
{ &hf_wbxml_public_id_known,
{ "Public Identifier (known)",
"wbxml.public_id.known",
FT_UINT32, BASE_HEX|BASE_EXT_STRING,
&vals_wbxml_public_ids_ext, 0x00,
"WBXML Known Public Identifier (integer)", HFILL }
},
{ &hf_wbxml_public_id_literal,
{ "Public Identifier (literal)",
"wbxml.public_id.literal",
FT_STRING, BASE_NONE,
NULL, 0x00,
"WBXML Literal Public Identifier (text string)", HFILL }
},
{ &hf_wbxml_charset,
{ "Character Set",
"wbxml.charset",
FT_UINT32, BASE_HEX|BASE_EXT_STRING,
&wap_mib_enum_vals_character_sets_ext, 0x00,
"WBXML Character Set", HFILL }
},
};
/* Setup protocol subtree array */
static gint *ett[] = {
&ett_wbxml,
&ett_wbxml_str_tbl,
&ett_wbxml_content,
};
/* Register the protocol name and description */
proto_wbxml = proto_register_protocol(
"WAP Binary XML",
"WBXML",
"wbxml"
);
/* Required function calls to register the header fields
* and subtrees used */
proto_register_field_array(proto_wbxml, hf, array_length(hf));
proto_register_subtree_array(ett, array_length(ett));
/* Preferences */
wbxml_module = prefs_register_protocol(proto_wbxml, NULL);
prefs_register_bool_preference(wbxml_module,
"skip_wbxml_token_mapping",
"Skip the mapping of WBXML tokens to media type tokens.",
"Enable this preference if you want to view the WBXML "
"tokens without the representation in a media type "
"(e.g., WML). Tokens will show up as Tag_0x12, "
"attrStart_0x08 or attrValue_0x0B for example.",
&skip_wbxml_token_mapping);
prefs_register_bool_preference(wbxml_module,
"disable_wbxml_token_parsing",
"Disable the parsing of the WBXML tokens.",
"Enable this preference if you want to skip the "
"parsing of the WBXML tokens that constitute the body "
"of the WBXML document. Only the WBXML header will be "
"dissected (and visualized) then.",
&disable_wbxml_token_parsing);
register_dissector("wbxml", dissect_wbxml, proto_wbxml);
register_dissector("wbxml-uaprof", dissect_uaprof, proto_wbxml);
}
void
proto_reg_handoff_wbxml(void)
{
dissector_handle_t wbxml_handle;
/* Heuristic dissectors would be declared by means of:
* heur_dissector_add("wsp", dissect_wbxml_heur, proto_wbxml);
*/
wbxml_handle = find_dissector("wbxml");
/* Register the WSP content types (defined as protocol port)
* for WBXML dissection.
*
* See http://www.wapforum.org/wina/wsp-content-type.htm
*
* As the media types for WSP and HTTP are the same, the WSP dissector
* uses the same string dissector table as the HTTP protocol.
*/
/**** Well-known WBXML WSP Content-Type values ****/
dissector_add_string("media_type",
"application/vnd.wap.wmlc", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.wap.wta-eventc", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.wap.wbxml", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.wap.sic", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.wap.slc", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.wap.coc", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.wap.connectivity-wbxml", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.wap.locc+wbxml", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.syncml+wbxml", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.syncml.dm+wbxml", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.oma.drm.rights+wbxml", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.wv.csp.wbxml", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.ms-sync.wbxml", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.ms-sync", wbxml_handle);
/**** Registered WBXML WSP Content-Type values ****/
dissector_add_string("media_type",
"application/vnd.uplanet.cacheop-wbxml", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.uplanet.alert-wbxml", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.uplanet.list-wbxml", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.uplanet.listcmd-wbxml", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.uplanet.channel-wbxml", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.uplanet.bearer-choice-wbxml", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.phonecom.mmc-wbxml", wbxml_handle);
dissector_add_string("media_type",
"application/vnd.nokia.syncset+wbxml", wbxml_handle);
/***** Content types that only have a textual representation *****/
dissector_add_string("media_type",
"application/x-wap-prov.browser-bookmarks", wbxml_handle);
dissector_add_string("media_type",
"application/x-wap-prov.browser-settings", wbxml_handle);
/* Same as application/vnd.nokia.syncset+wbxml */
dissector_add_string("media_type",
"application/x-prov.syncset+wbxml", wbxml_handle);
}
/*
* Editor modelines
*
* Local Variables:
* c-basic-offset: 8
* tab-width: 8
* indent-tabs-mode: t
* End:
*
* ex: set shiftwidth=8 tabstop=8 noexpandtab:
* :indentSize=8:tabSize=8:noTabs=false:
*/
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_5116_0 |
crossvul-cpp_data_bad_4966_2 | /* scm.c - Socket level control messages processing.
*
* Author: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
* Alignment and value checking mods by Craig Metz
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/stat.h>
#include <linux/socket.h>
#include <linux/file.h>
#include <linux/fcntl.h>
#include <linux/net.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/security.h>
#include <linux/pid_namespace.h>
#include <linux/pid.h>
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/compat.h>
#include <net/scm.h>
#include <net/cls_cgroup.h>
/*
* Only allow a user to send credentials, that they could set with
* setu(g)id.
*/
static __inline__ int scm_check_creds(struct ucred *creds)
{
const struct cred *cred = current_cred();
kuid_t uid = make_kuid(cred->user_ns, creds->uid);
kgid_t gid = make_kgid(cred->user_ns, creds->gid);
if (!uid_valid(uid) || !gid_valid(gid))
return -EINVAL;
if ((creds->pid == task_tgid_vnr(current) ||
ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
uid_eq(uid, cred->suid)) || ns_capable(cred->user_ns, CAP_SETUID)) &&
((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
gid_eq(gid, cred->sgid)) || ns_capable(cred->user_ns, CAP_SETGID))) {
return 0;
}
return -EPERM;
}
static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
{
int *fdp = (int*)CMSG_DATA(cmsg);
struct scm_fp_list *fpl = *fplp;
struct file **fpp;
int i, num;
num = (cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)))/sizeof(int);
if (num <= 0)
return 0;
if (num > SCM_MAX_FD)
return -EINVAL;
if (!fpl)
{
fpl = kmalloc(sizeof(struct scm_fp_list), GFP_KERNEL);
if (!fpl)
return -ENOMEM;
*fplp = fpl;
fpl->count = 0;
fpl->max = SCM_MAX_FD;
}
fpp = &fpl->fp[fpl->count];
if (fpl->count + num > fpl->max)
return -EINVAL;
/*
* Verify the descriptors and increment the usage count.
*/
for (i=0; i< num; i++)
{
int fd = fdp[i];
struct file *file;
if (fd < 0 || !(file = fget_raw(fd)))
return -EBADF;
*fpp++ = file;
fpl->count++;
}
return num;
}
void __scm_destroy(struct scm_cookie *scm)
{
struct scm_fp_list *fpl = scm->fp;
int i;
if (fpl) {
scm->fp = NULL;
for (i=fpl->count-1; i>=0; i--)
fput(fpl->fp[i]);
kfree(fpl);
}
}
EXPORT_SYMBOL(__scm_destroy);
int __scm_send(struct socket *sock, struct msghdr *msg, struct scm_cookie *p)
{
struct cmsghdr *cmsg;
int err;
for_each_cmsghdr(cmsg, msg) {
err = -EINVAL;
/* Verify that cmsg_len is at least sizeof(struct cmsghdr) */
/* The first check was omitted in <= 2.2.5. The reasoning was
that parser checks cmsg_len in any case, so that
additional check would be work duplication.
But if cmsg_level is not SOL_SOCKET, we do not check
for too short ancillary data object at all! Oops.
OK, let's add it...
*/
if (!CMSG_OK(msg, cmsg))
goto error;
if (cmsg->cmsg_level != SOL_SOCKET)
continue;
switch (cmsg->cmsg_type)
{
case SCM_RIGHTS:
if (!sock->ops || sock->ops->family != PF_UNIX)
goto error;
err=scm_fp_copy(cmsg, &p->fp);
if (err<0)
goto error;
break;
case SCM_CREDENTIALS:
{
struct ucred creds;
kuid_t uid;
kgid_t gid;
if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct ucred)))
goto error;
memcpy(&creds, CMSG_DATA(cmsg), sizeof(struct ucred));
err = scm_check_creds(&creds);
if (err)
goto error;
p->creds.pid = creds.pid;
if (!p->pid || pid_vnr(p->pid) != creds.pid) {
struct pid *pid;
err = -ESRCH;
pid = find_get_pid(creds.pid);
if (!pid)
goto error;
put_pid(p->pid);
p->pid = pid;
}
err = -EINVAL;
uid = make_kuid(current_user_ns(), creds.uid);
gid = make_kgid(current_user_ns(), creds.gid);
if (!uid_valid(uid) || !gid_valid(gid))
goto error;
p->creds.uid = uid;
p->creds.gid = gid;
break;
}
default:
goto error;
}
}
if (p->fp && !p->fp->count)
{
kfree(p->fp);
p->fp = NULL;
}
return 0;
error:
scm_destroy(p);
return err;
}
EXPORT_SYMBOL(__scm_send);
int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
{
struct cmsghdr __user *cm
= (__force struct cmsghdr __user *)msg->msg_control;
struct cmsghdr cmhdr;
int cmlen = CMSG_LEN(len);
int err;
if (MSG_CMSG_COMPAT & msg->msg_flags)
return put_cmsg_compat(msg, level, type, len, data);
if (cm==NULL || msg->msg_controllen < sizeof(*cm)) {
msg->msg_flags |= MSG_CTRUNC;
return 0; /* XXX: return error? check spec. */
}
if (msg->msg_controllen < cmlen) {
msg->msg_flags |= MSG_CTRUNC;
cmlen = msg->msg_controllen;
}
cmhdr.cmsg_level = level;
cmhdr.cmsg_type = type;
cmhdr.cmsg_len = cmlen;
err = -EFAULT;
if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
goto out;
if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
goto out;
cmlen = CMSG_SPACE(len);
if (msg->msg_controllen < cmlen)
cmlen = msg->msg_controllen;
msg->msg_control += cmlen;
msg->msg_controllen -= cmlen;
err = 0;
out:
return err;
}
EXPORT_SYMBOL(put_cmsg);
void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
{
struct cmsghdr __user *cm
= (__force struct cmsghdr __user*)msg->msg_control;
int fdmax = 0;
int fdnum = scm->fp->count;
struct file **fp = scm->fp->fp;
int __user *cmfptr;
int err = 0, i;
if (MSG_CMSG_COMPAT & msg->msg_flags) {
scm_detach_fds_compat(msg, scm);
return;
}
if (msg->msg_controllen > sizeof(struct cmsghdr))
fdmax = ((msg->msg_controllen - sizeof(struct cmsghdr))
/ sizeof(int));
if (fdnum < fdmax)
fdmax = fdnum;
for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
i++, cmfptr++)
{
struct socket *sock;
int new_fd;
err = security_file_receive(fp[i]);
if (err)
break;
err = get_unused_fd_flags(MSG_CMSG_CLOEXEC & msg->msg_flags
? O_CLOEXEC : 0);
if (err < 0)
break;
new_fd = err;
err = put_user(new_fd, cmfptr);
if (err) {
put_unused_fd(new_fd);
break;
}
/* Bump the usage count and install the file. */
sock = sock_from_file(fp[i], &err);
if (sock) {
sock_update_netprioidx(&sock->sk->sk_cgrp_data);
sock_update_classid(&sock->sk->sk_cgrp_data);
}
fd_install(new_fd, get_file(fp[i]));
}
if (i > 0)
{
int cmlen = CMSG_LEN(i*sizeof(int));
err = put_user(SOL_SOCKET, &cm->cmsg_level);
if (!err)
err = put_user(SCM_RIGHTS, &cm->cmsg_type);
if (!err)
err = put_user(cmlen, &cm->cmsg_len);
if (!err) {
cmlen = CMSG_SPACE(i*sizeof(int));
if (msg->msg_controllen < cmlen)
cmlen = msg->msg_controllen;
msg->msg_control += cmlen;
msg->msg_controllen -= cmlen;
}
}
if (i < fdnum || (fdnum && fdmax <= 0))
msg->msg_flags |= MSG_CTRUNC;
/*
* All of the files that fit in the message have had their
* usage counts incremented, so we just free the list.
*/
__scm_destroy(scm);
}
EXPORT_SYMBOL(scm_detach_fds);
struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
{
struct scm_fp_list *new_fpl;
int i;
if (!fpl)
return NULL;
new_fpl = kmemdup(fpl, offsetof(struct scm_fp_list, fp[fpl->count]),
GFP_KERNEL);
if (new_fpl) {
for (i = 0; i < fpl->count; i++)
get_file(fpl->fp[i]);
new_fpl->max = new_fpl->count;
}
return new_fpl;
}
EXPORT_SYMBOL(scm_fp_dup);
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_4966_2 |
crossvul-cpp_data_good_5594_1 | /*
* Kernel-based Virtual Machine driver for Linux
*
* derived from drivers/kvm/kvm_main.c
*
* Copyright (C) 2006 Qumranet, Inc.
* Copyright (C) 2008 Qumranet, Inc.
* Copyright IBM Corporation, 2008
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Avi Kivity <avi@qumranet.com>
* Yaniv Kamay <yaniv@qumranet.com>
* Amit Shah <amit.shah@qumranet.com>
* Ben-Ami Yassour <benami@il.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#include <linux/kvm_host.h>
#include "irq.h"
#include "mmu.h"
#include "i8254.h"
#include "tss.h"
#include "kvm_cache_regs.h"
#include "x86.h"
#include "cpuid.h"
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/kvm.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/highmem.h>
#include <linux/iommu.h>
#include <linux/intel-iommu.h>
#include <linux/cpufreq.h>
#include <linux/user-return-notifier.h>
#include <linux/srcu.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <linux/hash.h>
#include <linux/pci.h>
#include <linux/timekeeper_internal.h>
#include <linux/pvclock_gtod.h>
#include <trace/events/kvm.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
#include <asm/debugreg.h>
#include <asm/msr.h>
#include <asm/desc.h>
#include <asm/mtrr.h>
#include <asm/mce.h>
#include <asm/i387.h>
#include <asm/fpu-internal.h> /* Ugh! */
#include <asm/xcr.h>
#include <asm/pvclock.h>
#include <asm/div64.h>
#define MAX_IO_MSRS 256
#define KVM_MAX_MCE_BANKS 32
#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
#define emul_to_vcpu(ctxt) \
container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
/* EFER defaults:
* - enable syscall per default because its emulated by KVM
* - enable LME and LMA per default on 64 bit KVM
*/
#ifdef CONFIG_X86_64
static
u64 __read_mostly efer_reserved_bits = ~((u64)(EFER_SCE | EFER_LME | EFER_LMA));
#else
static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
#endif
#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
static void update_cr8_intercept(struct kvm_vcpu *vcpu);
static void process_nmi(struct kvm_vcpu *vcpu);
struct kvm_x86_ops *kvm_x86_ops;
EXPORT_SYMBOL_GPL(kvm_x86_ops);
static bool ignore_msrs = 0;
module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
bool kvm_has_tsc_control;
EXPORT_SYMBOL_GPL(kvm_has_tsc_control);
u32 kvm_max_guest_tsc_khz;
EXPORT_SYMBOL_GPL(kvm_max_guest_tsc_khz);
/* tsc tolerance in parts per million - default to 1/2 of the NTP threshold */
static u32 tsc_tolerance_ppm = 250;
module_param(tsc_tolerance_ppm, uint, S_IRUGO | S_IWUSR);
#define KVM_NR_SHARED_MSRS 16
struct kvm_shared_msrs_global {
int nr;
u32 msrs[KVM_NR_SHARED_MSRS];
};
struct kvm_shared_msrs {
struct user_return_notifier urn;
bool registered;
struct kvm_shared_msr_values {
u64 host;
u64 curr;
} values[KVM_NR_SHARED_MSRS];
};
static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
static struct kvm_shared_msrs __percpu *shared_msrs;
struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "pf_fixed", VCPU_STAT(pf_fixed) },
{ "pf_guest", VCPU_STAT(pf_guest) },
{ "tlb_flush", VCPU_STAT(tlb_flush) },
{ "invlpg", VCPU_STAT(invlpg) },
{ "exits", VCPU_STAT(exits) },
{ "io_exits", VCPU_STAT(io_exits) },
{ "mmio_exits", VCPU_STAT(mmio_exits) },
{ "signal_exits", VCPU_STAT(signal_exits) },
{ "irq_window", VCPU_STAT(irq_window_exits) },
{ "nmi_window", VCPU_STAT(nmi_window_exits) },
{ "halt_exits", VCPU_STAT(halt_exits) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ "hypercalls", VCPU_STAT(hypercalls) },
{ "request_irq", VCPU_STAT(request_irq_exits) },
{ "irq_exits", VCPU_STAT(irq_exits) },
{ "host_state_reload", VCPU_STAT(host_state_reload) },
{ "efer_reload", VCPU_STAT(efer_reload) },
{ "fpu_reload", VCPU_STAT(fpu_reload) },
{ "insn_emulation", VCPU_STAT(insn_emulation) },
{ "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
{ "irq_injections", VCPU_STAT(irq_injections) },
{ "nmi_injections", VCPU_STAT(nmi_injections) },
{ "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) },
{ "mmu_pte_write", VM_STAT(mmu_pte_write) },
{ "mmu_pte_updated", VM_STAT(mmu_pte_updated) },
{ "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) },
{ "mmu_flooded", VM_STAT(mmu_flooded) },
{ "mmu_recycled", VM_STAT(mmu_recycled) },
{ "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
{ "mmu_unsync", VM_STAT(mmu_unsync) },
{ "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
{ "largepages", VM_STAT(lpages) },
{ NULL }
};
u64 __read_mostly host_xcr0;
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
static int kvm_vcpu_reset(struct kvm_vcpu *vcpu);
static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
{
int i;
for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU); i++)
vcpu->arch.apf.gfns[i] = ~0;
}
static void kvm_on_user_return(struct user_return_notifier *urn)
{
unsigned slot;
struct kvm_shared_msrs *locals
= container_of(urn, struct kvm_shared_msrs, urn);
struct kvm_shared_msr_values *values;
for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
values = &locals->values[slot];
if (values->host != values->curr) {
wrmsrl(shared_msrs_global.msrs[slot], values->host);
values->curr = values->host;
}
}
locals->registered = false;
user_return_notifier_unregister(urn);
}
static void shared_msr_update(unsigned slot, u32 msr)
{
u64 value;
unsigned int cpu = smp_processor_id();
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
/* only read, and nobody should modify it at this time,
* so don't need lock */
if (slot >= shared_msrs_global.nr) {
printk(KERN_ERR "kvm: invalid MSR slot!");
return;
}
rdmsrl_safe(msr, &value);
smsr->values[slot].host = value;
smsr->values[slot].curr = value;
}
void kvm_define_shared_msr(unsigned slot, u32 msr)
{
if (slot >= shared_msrs_global.nr)
shared_msrs_global.nr = slot + 1;
shared_msrs_global.msrs[slot] = msr;
/* we need ensured the shared_msr_global have been updated */
smp_wmb();
}
EXPORT_SYMBOL_GPL(kvm_define_shared_msr);
static void kvm_shared_msr_cpu_online(void)
{
unsigned i;
for (i = 0; i < shared_msrs_global.nr; ++i)
shared_msr_update(i, shared_msrs_global.msrs[i]);
}
void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
{
unsigned int cpu = smp_processor_id();
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
if (((value ^ smsr->values[slot].curr) & mask) == 0)
return;
smsr->values[slot].curr = value;
wrmsrl(shared_msrs_global.msrs[slot], value);
if (!smsr->registered) {
smsr->urn.on_user_return = kvm_on_user_return;
user_return_notifier_register(&smsr->urn);
smsr->registered = true;
}
}
EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
static void drop_user_return_notifiers(void *ignore)
{
unsigned int cpu = smp_processor_id();
struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
if (smsr->registered)
kvm_on_user_return(&smsr->urn);
}
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu)
{
return vcpu->arch.apic_base;
}
EXPORT_SYMBOL_GPL(kvm_get_apic_base);
void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
{
/* TODO: reserve bits check */
kvm_lapic_set_base(vcpu, data);
}
EXPORT_SYMBOL_GPL(kvm_set_apic_base);
#define EXCPT_BENIGN 0
#define EXCPT_CONTRIBUTORY 1
#define EXCPT_PF 2
static int exception_class(int vector)
{
switch (vector) {
case PF_VECTOR:
return EXCPT_PF;
case DE_VECTOR:
case TS_VECTOR:
case NP_VECTOR:
case SS_VECTOR:
case GP_VECTOR:
return EXCPT_CONTRIBUTORY;
default:
break;
}
return EXCPT_BENIGN;
}
static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
unsigned nr, bool has_error, u32 error_code,
bool reinject)
{
u32 prev_nr;
int class1, class2;
kvm_make_request(KVM_REQ_EVENT, vcpu);
if (!vcpu->arch.exception.pending) {
queue:
vcpu->arch.exception.pending = true;
vcpu->arch.exception.has_error_code = has_error;
vcpu->arch.exception.nr = nr;
vcpu->arch.exception.error_code = error_code;
vcpu->arch.exception.reinject = reinject;
return;
}
/* to check exception */
prev_nr = vcpu->arch.exception.nr;
if (prev_nr == DF_VECTOR) {
/* triple fault -> shutdown */
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return;
}
class1 = exception_class(prev_nr);
class2 = exception_class(nr);
if ((class1 == EXCPT_CONTRIBUTORY && class2 == EXCPT_CONTRIBUTORY)
|| (class1 == EXCPT_PF && class2 != EXCPT_BENIGN)) {
/* generate double fault per SDM Table 5-5 */
vcpu->arch.exception.pending = true;
vcpu->arch.exception.has_error_code = true;
vcpu->arch.exception.nr = DF_VECTOR;
vcpu->arch.exception.error_code = 0;
} else
/* replace previous exception with a new one in a hope
that instruction re-execution will regenerate lost
exception */
goto queue;
}
void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
kvm_multiple_exception(vcpu, nr, false, 0, false);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception);
void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
kvm_multiple_exception(vcpu, nr, false, 0, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception);
void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err)
{
if (err)
kvm_inject_gp(vcpu, 0);
else
kvm_x86_ops->skip_emulated_instruction(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_complete_insn_gp);
void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{
++vcpu->stat.pf_guest;
vcpu->arch.cr2 = fault->address;
kvm_queue_exception_e(vcpu, PF_VECTOR, fault->error_code);
}
EXPORT_SYMBOL_GPL(kvm_inject_page_fault);
void kvm_propagate_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault)
{
if (mmu_is_nested(vcpu) && !fault->nested_page_fault)
vcpu->arch.nested_mmu.inject_page_fault(vcpu, fault);
else
vcpu->arch.mmu.inject_page_fault(vcpu, fault);
}
void kvm_inject_nmi(struct kvm_vcpu *vcpu)
{
atomic_inc(&vcpu->arch.nmi_queued);
kvm_make_request(KVM_REQ_NMI, vcpu);
}
EXPORT_SYMBOL_GPL(kvm_inject_nmi);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
kvm_multiple_exception(vcpu, nr, true, error_code, false);
}
EXPORT_SYMBOL_GPL(kvm_queue_exception_e);
void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code)
{
kvm_multiple_exception(vcpu, nr, true, error_code, true);
}
EXPORT_SYMBOL_GPL(kvm_requeue_exception_e);
/*
* Checks if cpl <= required_cpl; if true, return true. Otherwise queue
* a #GP and return false.
*/
bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl)
{
if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl)
return true;
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
return false;
}
EXPORT_SYMBOL_GPL(kvm_require_cpl);
/*
* This function will be used to read from the physical memory of the currently
* running guest. The difference to kvm_read_guest_page is that this function
* can read from guest physical or from the guest's guest physical memory.
*/
int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
gfn_t ngfn, void *data, int offset, int len,
u32 access)
{
gfn_t real_gfn;
gpa_t ngpa;
ngpa = gfn_to_gpa(ngfn);
real_gfn = mmu->translate_gpa(vcpu, ngpa, access);
if (real_gfn == UNMAPPED_GVA)
return -EFAULT;
real_gfn = gpa_to_gfn(real_gfn);
return kvm_read_guest_page(vcpu->kvm, real_gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page_mmu);
int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
void *data, int offset, int len, u32 access)
{
return kvm_read_guest_page_mmu(vcpu, vcpu->arch.walk_mmu, gfn,
data, offset, len, access);
}
/*
* Load the pae pdptrs. Return true is they are all valid.
*/
int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
{
gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT;
unsigned offset = ((cr3 & (PAGE_SIZE-1)) >> 5) << 2;
int i;
int ret;
u64 pdpte[ARRAY_SIZE(mmu->pdptrs)];
ret = kvm_read_guest_page_mmu(vcpu, mmu, pdpt_gfn, pdpte,
offset * sizeof(u64), sizeof(pdpte),
PFERR_USER_MASK|PFERR_WRITE_MASK);
if (ret < 0) {
ret = 0;
goto out;
}
for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
if (is_present_gpte(pdpte[i]) &&
(pdpte[i] & vcpu->arch.mmu.rsvd_bits_mask[0][2])) {
ret = 0;
goto out;
}
}
ret = 1;
memcpy(mmu->pdptrs, pdpte, sizeof(mmu->pdptrs));
__set_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_avail);
__set_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_dirty);
out:
return ret;
}
EXPORT_SYMBOL_GPL(load_pdptrs);
static bool pdptrs_changed(struct kvm_vcpu *vcpu)
{
u64 pdpte[ARRAY_SIZE(vcpu->arch.walk_mmu->pdptrs)];
bool changed = true;
int offset;
gfn_t gfn;
int r;
if (is_long_mode(vcpu) || !is_pae(vcpu))
return false;
if (!test_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_avail))
return true;
gfn = (kvm_read_cr3(vcpu) & ~31u) >> PAGE_SHIFT;
offset = (kvm_read_cr3(vcpu) & ~31u) & (PAGE_SIZE - 1);
r = kvm_read_nested_guest_page(vcpu, gfn, pdpte, offset, sizeof(pdpte),
PFERR_USER_MASK | PFERR_WRITE_MASK);
if (r < 0)
goto out;
changed = memcmp(pdpte, vcpu->arch.walk_mmu->pdptrs, sizeof(pdpte)) != 0;
out:
return changed;
}
int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
unsigned long old_cr0 = kvm_read_cr0(vcpu);
unsigned long update_bits = X86_CR0_PG | X86_CR0_WP |
X86_CR0_CD | X86_CR0_NW;
cr0 |= X86_CR0_ET;
#ifdef CONFIG_X86_64
if (cr0 & 0xffffffff00000000UL)
return 1;
#endif
cr0 &= ~CR0_RESERVED_BITS;
if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD))
return 1;
if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE))
return 1;
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
#ifdef CONFIG_X86_64
if ((vcpu->arch.efer & EFER_LME)) {
int cs_db, cs_l;
if (!is_pae(vcpu))
return 1;
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
if (cs_l)
return 1;
} else
#endif
if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
kvm_read_cr3(vcpu)))
return 1;
}
if (!(cr0 & X86_CR0_PG) && kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE))
return 1;
kvm_x86_ops->set_cr0(vcpu, cr0);
if ((cr0 ^ old_cr0) & X86_CR0_PG) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
}
if ((cr0 ^ old_cr0) & update_bits)
kvm_mmu_reset_context(vcpu);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_cr0);
void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
{
(void)kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f));
}
EXPORT_SYMBOL_GPL(kvm_lmsw);
int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
u64 xcr0;
/* Only support XCR_XFEATURE_ENABLED_MASK(xcr0) now */
if (index != XCR_XFEATURE_ENABLED_MASK)
return 1;
xcr0 = xcr;
if (kvm_x86_ops->get_cpl(vcpu) != 0)
return 1;
if (!(xcr0 & XSTATE_FP))
return 1;
if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
return 1;
if (xcr0 & ~host_xcr0)
return 1;
vcpu->arch.xcr0 = xcr0;
vcpu->guest_xcr0_loaded = 0;
return 0;
}
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
{
if (__kvm_set_xcr(vcpu, index, xcr)) {
kvm_inject_gp(vcpu, 0);
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_xcr);
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
X86_CR4_PAE | X86_CR4_SMEP;
if (cr4 & CR4_RESERVED_BITS)
return 1;
if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
return 1;
if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
return 1;
if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS))
return 1;
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE))
return 1;
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & pdptr_bits)
&& !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
kvm_read_cr3(vcpu)))
return 1;
if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
if (!guest_cpuid_has_pcid(vcpu))
return 1;
/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
return 1;
}
if (kvm_x86_ops->set_cr4(vcpu, cr4))
return 1;
if (((cr4 ^ old_cr4) & pdptr_bits) ||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);
if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
kvm_update_cpuid(vcpu);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_cr4);
int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
kvm_mmu_sync_roots(vcpu);
kvm_mmu_flush_tlb(vcpu);
return 0;
}
if (is_long_mode(vcpu)) {
if (kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) {
if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS)
return 1;
} else
if (cr3 & CR3_L_MODE_RESERVED_BITS)
return 1;
} else {
if (is_pae(vcpu)) {
if (cr3 & CR3_PAE_RESERVED_BITS)
return 1;
if (is_paging(vcpu) &&
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
return 1;
}
/*
* We don't check reserved bits in nonpae mode, because
* this isn't enforced, and VMware depends on this.
*/
}
/*
* Does the new cr3 value map to physical memory? (Note, we
* catch an invalid cr3 even in real-mode, because it would
* cause trouble later on when we turn on paging anyway.)
*
* A real CPU would silently accept an invalid cr3 and would
* attempt to use it - with largely undefined (and often hard
* to debug) behavior on the guest side.
*/
if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
return 1;
vcpu->arch.cr3 = cr3;
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
vcpu->arch.mmu.new_cr3(vcpu);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_cr3);
int kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
{
if (cr8 & CR8_RESERVED_BITS)
return 1;
if (irqchip_in_kernel(vcpu->kvm))
kvm_lapic_set_tpr(vcpu, cr8);
else
vcpu->arch.cr8 = cr8;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_cr8);
unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
{
if (irqchip_in_kernel(vcpu->kvm))
return kvm_lapic_get_cr8(vcpu);
else
return vcpu->arch.cr8;
}
EXPORT_SYMBOL_GPL(kvm_get_cr8);
static void kvm_update_dr7(struct kvm_vcpu *vcpu)
{
unsigned long dr7;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
dr7 = vcpu->arch.guest_debug_dr7;
else
dr7 = vcpu->arch.dr7;
kvm_x86_ops->set_dr7(vcpu, dr7);
vcpu->arch.switch_db_regs = (dr7 & DR7_BP_EN_MASK);
}
static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{
switch (dr) {
case 0 ... 3:
vcpu->arch.db[dr] = val;
if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
vcpu->arch.eff_db[dr] = val;
break;
case 4:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return 1; /* #UD */
/* fall through */
case 6:
if (val & 0xffffffff00000000ULL)
return -1; /* #GP */
vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
break;
case 5:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return 1; /* #UD */
/* fall through */
default: /* 7 */
if (val & 0xffffffff00000000ULL)
return -1; /* #GP */
vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
kvm_update_dr7(vcpu);
break;
}
return 0;
}
int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
{
int res;
res = __kvm_set_dr(vcpu, dr, val);
if (res > 0)
kvm_queue_exception(vcpu, UD_VECTOR);
else if (res < 0)
kvm_inject_gp(vcpu, 0);
return res;
}
EXPORT_SYMBOL_GPL(kvm_set_dr);
static int _kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
{
switch (dr) {
case 0 ... 3:
*val = vcpu->arch.db[dr];
break;
case 4:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return 1;
/* fall through */
case 6:
*val = vcpu->arch.dr6;
break;
case 5:
if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
return 1;
/* fall through */
default: /* 7 */
*val = vcpu->arch.dr7;
break;
}
return 0;
}
int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val)
{
if (_kvm_get_dr(vcpu, dr, val)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_get_dr);
bool kvm_rdpmc(struct kvm_vcpu *vcpu)
{
u32 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
u64 data;
int err;
err = kvm_pmu_read_pmc(vcpu, ecx, &data);
if (err)
return err;
kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
kvm_register_write(vcpu, VCPU_REGS_RDX, data >> 32);
return err;
}
EXPORT_SYMBOL_GPL(kvm_rdpmc);
/*
* List of msr numbers which we expose to userspace through KVM_GET_MSRS
* and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
*
* This list is modified at module load time to reflect the
* capabilities of the host cpu. This capabilities test skips MSRs that are
* kvm-specific. Those are put in the beginning of the list.
*/
#define KVM_SAVE_MSRS_BEGIN 10
static u32 msrs_to_save[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
HV_X64_MSR_APIC_ASSIST_PAGE, MSR_KVM_ASYNC_PF_EN, MSR_KVM_STEAL_TIME,
MSR_KVM_PV_EOI_EN,
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
MSR_STAR,
#ifdef CONFIG_X86_64
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
};
static unsigned num_msrs_to_save;
static const u32 emulated_msrs[] = {
MSR_IA32_TSC_ADJUST,
MSR_IA32_TSCDEADLINE,
MSR_IA32_MISC_ENABLE,
MSR_IA32_MCG_STATUS,
MSR_IA32_MCG_CTL,
};
static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
u64 old_efer = vcpu->arch.efer;
if (efer & efer_reserved_bits)
return 1;
if (is_paging(vcpu)
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
return 1;
if (efer & EFER_FFXSR) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
return 1;
}
if (efer & EFER_SVME) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
return 1;
}
efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA;
kvm_x86_ops->set_efer(vcpu, efer);
/* Update reserved bits */
if ((efer ^ old_efer) & EFER_NX)
kvm_mmu_reset_context(vcpu);
return 0;
}
void kvm_enable_efer_bits(u64 mask)
{
efer_reserved_bits &= ~mask;
}
EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
/*
* Writes msr value into into the appropriate "register".
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
return kvm_x86_ops->set_msr(vcpu, msr);
}
/*
* Adapt set_msr() to msr_io()'s calling convention
*/
static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
{
struct msr_data msr;
msr.data = *data;
msr.index = index;
msr.host_initiated = true;
return kvm_set_msr(vcpu, &msr);
}
#ifdef CONFIG_X86_64
struct pvclock_gtod_data {
seqcount_t seq;
struct { /* extract of a clocksource struct */
int vclock_mode;
cycle_t cycle_last;
cycle_t mask;
u32 mult;
u32 shift;
} clock;
/* open coded 'struct timespec' */
u64 monotonic_time_snsec;
time_t monotonic_time_sec;
};
static struct pvclock_gtod_data pvclock_gtod_data;
static void update_pvclock_gtod(struct timekeeper *tk)
{
struct pvclock_gtod_data *vdata = &pvclock_gtod_data;
write_seqcount_begin(&vdata->seq);
/* copy pvclock gtod data */
vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode;
vdata->clock.cycle_last = tk->clock->cycle_last;
vdata->clock.mask = tk->clock->mask;
vdata->clock.mult = tk->mult;
vdata->clock.shift = tk->shift;
vdata->monotonic_time_sec = tk->xtime_sec
+ tk->wall_to_monotonic.tv_sec;
vdata->monotonic_time_snsec = tk->xtime_nsec
+ (tk->wall_to_monotonic.tv_nsec
<< tk->shift);
while (vdata->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << tk->shift)) {
vdata->monotonic_time_snsec -=
((u64)NSEC_PER_SEC) << tk->shift;
vdata->monotonic_time_sec++;
}
write_seqcount_end(&vdata->seq);
}
#endif
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
{
int version;
int r;
struct pvclock_wall_clock wc;
struct timespec boot;
if (!wall_clock)
return;
r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version));
if (r)
return;
if (version & 1)
++version; /* first time write, random junk */
++version;
kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
/*
* The guest calculates current wall clock time by adding
* system time (updated by kvm_guest_time_update below) to the
* wall clock specified here. guest system time equals host
* system time for us, thus we must fill in host boot time here.
*/
getboottime(&boot);
if (kvm->arch.kvmclock_offset) {
struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset);
boot = timespec_sub(boot, ts);
}
wc.sec = boot.tv_sec;
wc.nsec = boot.tv_nsec;
wc.version = version;
kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
version++;
kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
}
static uint32_t div_frac(uint32_t dividend, uint32_t divisor)
{
uint32_t quotient, remainder;
/* Don't try to replace with do_div(), this one calculates
* "(dividend << 32) / divisor" */
__asm__ ( "divl %4"
: "=a" (quotient), "=d" (remainder)
: "0" (0), "1" (dividend), "r" (divisor) );
return quotient;
}
static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
s8 *pshift, u32 *pmultiplier)
{
uint64_t scaled64;
int32_t shift = 0;
uint64_t tps64;
uint32_t tps32;
tps64 = base_khz * 1000LL;
scaled64 = scaled_khz * 1000LL;
while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
tps64 >>= 1;
shift--;
}
tps32 = (uint32_t)tps64;
while (tps32 <= scaled64 || scaled64 & 0xffffffff00000000ULL) {
if (scaled64 & 0xffffffff00000000ULL || tps32 & 0x80000000)
scaled64 >>= 1;
else
tps32 <<= 1;
shift++;
}
*pshift = shift;
*pmultiplier = div_frac(scaled64, tps32);
pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
__func__, base_khz, scaled_khz, shift, *pmultiplier);
}
static inline u64 get_kernel_ns(void)
{
struct timespec ts;
WARN_ON(preemptible());
ktime_get_ts(&ts);
monotonic_to_bootbased(&ts);
return timespec_to_ns(&ts);
}
#ifdef CONFIG_X86_64
static atomic_t kvm_guest_has_master_clock = ATOMIC_INIT(0);
#endif
static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
unsigned long max_tsc_khz;
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
{
return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult,
vcpu->arch.virtual_tsc_shift);
}
static u32 adjust_tsc_khz(u32 khz, s32 ppm)
{
u64 v = (u64)khz * (1000000 + ppm);
do_div(v, 1000000);
return v;
}
static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
{
u32 thresh_lo, thresh_hi;
int use_scaling = 0;
/* Compute a scale to convert nanoseconds in TSC cycles */
kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
&vcpu->arch.virtual_tsc_shift,
&vcpu->arch.virtual_tsc_mult);
vcpu->arch.virtual_tsc_khz = this_tsc_khz;
/*
* Compute the variation in TSC rate which is acceptable
* within the range of tolerance and decide if the
* rate being applied is within that bounds of the hardware
* rate. If so, no scaling or compensation need be done.
*/
thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) {
pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
use_scaling = 1;
}
kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
}
static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
{
u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
vcpu->arch.virtual_tsc_mult,
vcpu->arch.virtual_tsc_shift);
tsc += vcpu->arch.this_tsc_write;
return tsc;
}
void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_X86_64
bool vcpus_matched;
bool do_request = false;
struct kvm_arch *ka = &vcpu->kvm->arch;
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
atomic_read(&vcpu->kvm->online_vcpus));
if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC)
if (!ka->use_master_clock)
do_request = 1;
if (!vcpus_matched && ka->use_master_clock)
do_request = 1;
if (do_request)
kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc,
atomic_read(&vcpu->kvm->online_vcpus),
ka->use_master_clock, gtod->clock.vclock_mode);
#endif
}
static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
{
u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu);
vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
}
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
struct kvm *kvm = vcpu->kvm;
u64 offset, ns, elapsed;
unsigned long flags;
s64 usdiff;
bool matched;
u64 data = msr->data;
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
ns = get_kernel_ns();
elapsed = ns - kvm->arch.last_tsc_nsec;
/* n.b - signed multiplication and division required */
usdiff = data - kvm->arch.last_tsc_write;
#ifdef CONFIG_X86_64
usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
#else
/* do_div() only does unsigned */
asm("idivl %2; xor %%edx, %%edx"
: "=A"(usdiff)
: "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
#endif
do_div(elapsed, 1000);
usdiff -= elapsed;
if (usdiff < 0)
usdiff = -usdiff;
/*
* Special case: TSC write with a small delta (1 second) of virtual
* cycle time against real time is interpreted as an attempt to
* synchronize the CPU.
*
* For a reliable TSC, we can match TSC offsets, and for an unstable
* TSC, we add elapsed time in this computation. We could let the
* compensation code attempt to catch up if we fall behind, but
* it's better to try to match offsets from the beginning.
*/
if (usdiff < USEC_PER_SEC &&
vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
if (!check_tsc_unstable()) {
offset = kvm->arch.cur_tsc_offset;
pr_debug("kvm: matched tsc offset for %llu\n", data);
} else {
u64 delta = nsec_to_cycles(vcpu, elapsed);
data += delta;
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
}
matched = true;
} else {
/*
* We split periods of matched TSC writes into generations.
* For each generation, we track the original measured
* nanosecond time, offset, and write, so if TSCs are in
* sync, we can match exact offset, and if not, we can match
* exact software computation in compute_guest_tsc()
*
* These values are tracked in kvm->arch.cur_xxx variables.
*/
kvm->arch.cur_tsc_generation++;
kvm->arch.cur_tsc_nsec = ns;
kvm->arch.cur_tsc_write = data;
kvm->arch.cur_tsc_offset = offset;
matched = false;
pr_debug("kvm: new tsc generation %u, clock %llu\n",
kvm->arch.cur_tsc_generation, data);
}
/*
* We also track th most recent recorded KHZ, write and time to
* allow the matching interval to be extended at each write.
*/
kvm->arch.last_tsc_nsec = ns;
kvm->arch.last_tsc_write = data;
kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
/* Reset of TSC must disable overshoot protection below */
vcpu->arch.hv_clock.tsc_timestamp = 0;
vcpu->arch.last_guest_tsc = data;
/* Keep track of which generation this VCPU has synchronized to */
vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
update_ia32_tsc_adjust_msr(vcpu, offset);
kvm_x86_ops->write_tsc_offset(vcpu, offset);
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
if (matched)
kvm->arch.nr_vcpus_matched_tsc++;
else
kvm->arch.nr_vcpus_matched_tsc = 0;
kvm_track_tsc_matching(vcpu);
spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
}
EXPORT_SYMBOL_GPL(kvm_write_tsc);
#ifdef CONFIG_X86_64
static cycle_t read_tsc(void)
{
cycle_t ret;
u64 last;
/*
* Empirically, a fence (of type that depends on the CPU)
* before rdtsc is enough to ensure that rdtsc is ordered
* with respect to loads. The various CPU manuals are unclear
* as to whether rdtsc can be reordered with later loads,
* but no one has ever seen it happen.
*/
rdtsc_barrier();
ret = (cycle_t)vget_cycles();
last = pvclock_gtod_data.clock.cycle_last;
if (likely(ret >= last))
return ret;
/*
* GCC likes to generate cmov here, but this branch is extremely
* predictable (it's just a funciton of time and the likely is
* very likely) and there's a data dependence, so force GCC
* to generate a branch instead. I don't barrier() because
* we don't actually need a barrier, and if this function
* ever gets inlined it will generate worse code.
*/
asm volatile ("");
return last;
}
static inline u64 vgettsc(cycle_t *cycle_now)
{
long v;
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
*cycle_now = read_tsc();
v = (*cycle_now - gtod->clock.cycle_last) & gtod->clock.mask;
return v * gtod->clock.mult;
}
static int do_monotonic(struct timespec *ts, cycle_t *cycle_now)
{
unsigned long seq;
u64 ns;
int mode;
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
ts->tv_nsec = 0;
do {
seq = read_seqcount_begin(>od->seq);
mode = gtod->clock.vclock_mode;
ts->tv_sec = gtod->monotonic_time_sec;
ns = gtod->monotonic_time_snsec;
ns += vgettsc(cycle_now);
ns >>= gtod->clock.shift;
} while (unlikely(read_seqcount_retry(>od->seq, seq)));
timespec_add_ns(ts, ns);
return mode;
}
/* returns true if host is using tsc clocksource */
static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now)
{
struct timespec ts;
/* checked again under seqlock below */
if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC)
return false;
if (do_monotonic(&ts, cycle_now) != VCLOCK_TSC)
return false;
monotonic_to_bootbased(&ts);
*kernel_ns = timespec_to_ns(&ts);
return true;
}
#endif
/*
*
* Assuming a stable TSC across physical CPUS, and a stable TSC
* across virtual CPUs, the following condition is possible.
* Each numbered line represents an event visible to both
* CPUs at the next numbered event.
*
* "timespecX" represents host monotonic time. "tscX" represents
* RDTSC value.
*
* VCPU0 on CPU0 | VCPU1 on CPU1
*
* 1. read timespec0,tsc0
* 2. | timespec1 = timespec0 + N
* | tsc1 = tsc0 + M
* 3. transition to guest | transition to guest
* 4. ret0 = timespec0 + (rdtsc - tsc0) |
* 5. | ret1 = timespec1 + (rdtsc - tsc1)
* | ret1 = timespec0 + N + (rdtsc - (tsc0 + M))
*
* Since ret0 update is visible to VCPU1 at time 5, to obey monotonicity:
*
* - ret0 < ret1
* - timespec0 + (rdtsc - tsc0) < timespec0 + N + (rdtsc - (tsc0 + M))
* ...
* - 0 < N - M => M < N
*
* That is, when timespec0 != timespec1, M < N. Unfortunately that is not
* always the case (the difference between two distinct xtime instances
* might be smaller then the difference between corresponding TSC reads,
* when updating guest vcpus pvclock areas).
*
* To avoid that problem, do not allow visibility of distinct
* system_timestamp/tsc_timestamp values simultaneously: use a master
* copy of host monotonic time values. Update that master copy
* in lockstep.
*
* Rely on synchronization of host TSCs and guest TSCs for monotonicity.
*
*/
static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
{
#ifdef CONFIG_X86_64
struct kvm_arch *ka = &kvm->arch;
int vclock_mode;
bool host_tsc_clocksource, vcpus_matched;
vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 ==
atomic_read(&kvm->online_vcpus));
/*
* If the host uses TSC clock, then passthrough TSC as stable
* to the guest.
*/
host_tsc_clocksource = kvm_get_time_and_clockread(
&ka->master_kernel_ns,
&ka->master_cycle_now);
ka->use_master_clock = host_tsc_clocksource & vcpus_matched;
if (ka->use_master_clock)
atomic_set(&kvm_guest_has_master_clock, 1);
vclock_mode = pvclock_gtod_data.clock.vclock_mode;
trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode,
vcpus_matched);
#endif
}
static int kvm_guest_time_update(struct kvm_vcpu *v)
{
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
kernel_ns = 0;
host_tsc = 0;
/*
* If the host uses TSC clock, then passthrough TSC as stable
* to the guest.
*/
spin_lock(&ka->pvclock_gtod_sync_lock);
use_master_clock = ka->use_master_clock;
if (use_master_clock) {
host_tsc = ka->master_cycle_now;
kernel_ns = ka->master_kernel_ns;
}
spin_unlock(&ka->pvclock_gtod_sync_lock);
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
if (unlikely(this_tsc_khz == 0)) {
local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
return 1;
}
if (!use_master_clock) {
host_tsc = native_read_tsc();
kernel_ns = get_kernel_ns();
}
tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);
/*
* We may have to catch up the TSC to match elapsed wall clock
* time for two reasons, even if kvmclock is used.
* 1) CPU could have been running below the maximum TSC rate
* 2) Broken TSC compensation resets the base at each VCPU
* entry to avoid unknown leaps of TSC even when running
* again on the same CPU. This may cause apparent elapsed
* time to disappear, and the guest to stand still or run
* very slowly.
*/
if (vcpu->tsc_catchup) {
u64 tsc = compute_guest_tsc(v, kernel_ns);
if (tsc > tsc_timestamp) {
adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
tsc_timestamp = tsc;
}
}
local_irq_restore(flags);
if (!vcpu->pv_time_enabled)
return 0;
/*
* Time as measured by the TSC may go backwards when resetting the base
* tsc_timestamp. The reason for this is that the TSC resolution is
* higher than the resolution of the other clock scales. Thus, many
* possible measurments of the TSC correspond to one measurement of any
* other clock, and so a spread of values is possible. This is not a
* problem for the computation of the nanosecond clock; with TSC rates
* around 1GHZ, there can only be a few cycles which correspond to one
* nanosecond value, and any path through this code will inevitably
* take longer than that. However, with the kernel_ns value itself,
* the precision may be much lower, down to HZ granularity. If the
* first sampling of TSC against kernel_ns ends in the low part of the
* range, and the second in the high end of the range, we can get:
*
* (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
*
* As the sampling errors potentially range in the thousands of cycles,
* it is possible such a time value has already been observed by the
* guest. To protect against this, we must compute the system time as
* observed by the guest and ensure the new system time is greater.
*/
max_kernel_ns = 0;
if (vcpu->hv_clock.tsc_timestamp) {
max_kernel_ns = vcpu->last_guest_tsc -
vcpu->hv_clock.tsc_timestamp;
max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
vcpu->hv_clock.tsc_to_system_mul,
vcpu->hv_clock.tsc_shift);
max_kernel_ns += vcpu->last_kernel_ns;
}
if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
&vcpu->hv_clock.tsc_shift,
&vcpu->hv_clock.tsc_to_system_mul);
vcpu->hw_tsc_khz = this_tsc_khz;
}
/* with a master <monotonic time, tsc value> tuple,
* pvclock clock reads always increase at the (scaled) rate
* of guest TSC - no need to deal with sampling errors.
*/
if (!use_master_clock) {
if (max_kernel_ns > kernel_ns)
kernel_ns = max_kernel_ns;
}
/* With all the info we got, fill in the values */
vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
vcpu->last_kernel_ns = kernel_ns;
vcpu->last_guest_tsc = tsc_timestamp;
/*
* The interface expects us to write an even number signaling that the
* update is finished. Since the guest won't see the intermediate
* state, we just increase by 2 at the end.
*/
vcpu->hv_clock.version += 2;
if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
&guest_hv_clock, sizeof(guest_hv_clock))))
return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
vcpu->pvclock_set_guest_stopped_request = false;
}
/* If the host uses TSC clocksource, then it is stable */
if (use_master_clock)
pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
vcpu->hv_clock.flags = pvclock_flags;
kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
&vcpu->hv_clock,
sizeof(vcpu->hv_clock));
return 0;
}
static bool msr_mtrr_valid(unsigned msr)
{
switch (msr) {
case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
case MSR_MTRRfix64K_00000:
case MSR_MTRRfix16K_80000:
case MSR_MTRRfix16K_A0000:
case MSR_MTRRfix4K_C0000:
case MSR_MTRRfix4K_C8000:
case MSR_MTRRfix4K_D0000:
case MSR_MTRRfix4K_D8000:
case MSR_MTRRfix4K_E0000:
case MSR_MTRRfix4K_E8000:
case MSR_MTRRfix4K_F0000:
case MSR_MTRRfix4K_F8000:
case MSR_MTRRdefType:
case MSR_IA32_CR_PAT:
return true;
case 0x2f8:
return true;
}
return false;
}
static bool valid_pat_type(unsigned t)
{
return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
}
static bool valid_mtrr_type(unsigned t)
{
return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
}
static bool mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
int i;
if (!msr_mtrr_valid(msr))
return false;
if (msr == MSR_IA32_CR_PAT) {
for (i = 0; i < 8; i++)
if (!valid_pat_type((data >> (i * 8)) & 0xff))
return false;
return true;
} else if (msr == MSR_MTRRdefType) {
if (data & ~0xcff)
return false;
return valid_mtrr_type(data & 0xff);
} else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
for (i = 0; i < 8 ; i++)
if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
return false;
return true;
}
/* variable MTRRs */
return valid_mtrr_type(data & 0xff);
}
static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
if (!mtrr_valid(vcpu, msr, data))
return 1;
if (msr == MSR_MTRRdefType) {
vcpu->arch.mtrr_state.def_type = data;
vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
} else if (msr == MSR_MTRRfix64K_00000)
p[0] = data;
else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
p[1 + msr - MSR_MTRRfix16K_80000] = data;
else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
p[3 + msr - MSR_MTRRfix4K_C0000] = data;
else if (msr == MSR_IA32_CR_PAT)
vcpu->arch.pat = data;
else { /* Variable MTRRs */
int idx, is_mtrr_mask;
u64 *pt;
idx = (msr - 0x200) / 2;
is_mtrr_mask = msr - 0x200 - 2 * idx;
if (!is_mtrr_mask)
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
else
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
*pt = data;
}
kvm_mmu_reset_context(vcpu);
return 0;
}
static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
u64 mcg_cap = vcpu->arch.mcg_cap;
unsigned bank_num = mcg_cap & 0xff;
switch (msr) {
case MSR_IA32_MCG_STATUS:
vcpu->arch.mcg_status = data;
break;
case MSR_IA32_MCG_CTL:
if (!(mcg_cap & MCG_CTL_P))
return 1;
if (data != 0 && data != ~(u64)0)
return -1;
vcpu->arch.mcg_ctl = data;
break;
default:
if (msr >= MSR_IA32_MC0_CTL &&
msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
u32 offset = msr - MSR_IA32_MC0_CTL;
/* only 0 or all 1s can be written to IA32_MCi_CTL
* some Linux kernels though clear bit 10 in bank 4 to
* workaround a BIOS/GART TBL issue on AMD K8s, ignore
* this to avoid an uncatched #GP in the guest
*/
if ((offset & 0x3) == 0 &&
data != 0 && (data | (1 << 10)) != ~(u64)0)
return -1;
vcpu->arch.mce_banks[offset] = data;
break;
}
return 1;
}
return 0;
}
static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
int lm = is_long_mode(vcpu);
u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
: (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
: kvm->arch.xen_hvm_config.blob_size_32;
u32 page_num = data & ~PAGE_MASK;
u64 page_addr = data & PAGE_MASK;
u8 *page;
int r;
r = -E2BIG;
if (page_num >= blob_size)
goto out;
r = -ENOMEM;
page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
if (IS_ERR(page)) {
r = PTR_ERR(page);
goto out;
}
if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
goto out_free;
r = 0;
out_free:
kfree(page);
out:
return r;
}
static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
{
return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
}
static bool kvm_hv_msr_partition_wide(u32 msr)
{
bool r = false;
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
case HV_X64_MSR_HYPERCALL:
r = true;
break;
}
return r;
}
static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
struct kvm *kvm = vcpu->kvm;
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
kvm->arch.hv_guest_os_id = data;
/* setting guest os id to zero disables hypercall page */
if (!kvm->arch.hv_guest_os_id)
kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
break;
case HV_X64_MSR_HYPERCALL: {
u64 gfn;
unsigned long addr;
u8 instructions[4];
/* if guest os id is not set hypercall should remain disabled */
if (!kvm->arch.hv_guest_os_id)
break;
if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
kvm->arch.hv_hypercall = data;
break;
}
gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return 1;
kvm_x86_ops->patch_hypercall(vcpu, instructions);
((unsigned char *)instructions)[3] = 0xc3; /* ret */
if (__copy_to_user((void __user *)addr, instructions, 4))
return 1;
kvm->arch.hv_hypercall = data;
break;
}
default:
vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
"data 0x%llx\n", msr, data);
return 1;
}
return 0;
}
static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
switch (msr) {
case HV_X64_MSR_APIC_ASSIST_PAGE: {
unsigned long addr;
if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
vcpu->arch.hv_vapic = data;
break;
}
addr = gfn_to_hva(vcpu->kvm, data >>
HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
if (kvm_is_error_hva(addr))
return 1;
if (__clear_user((void __user *)addr, PAGE_SIZE))
return 1;
vcpu->arch.hv_vapic = data;
break;
}
case HV_X64_MSR_EOI:
return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
case HV_X64_MSR_ICR:
return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
case HV_X64_MSR_TPR:
return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
default:
vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
"data 0x%llx\n", msr, data);
return 1;
}
return 0;
}
static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{
gpa_t gpa = data & ~0x3f;
/* Bits 2:5 are reserved, Should be zero */
if (data & 0x3c)
return 1;
vcpu->arch.apf.msr_val = data;
if (!(data & KVM_ASYNC_PF_ENABLED)) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
return 0;
}
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
return 1;
vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
kvm_async_pf_wakeup_all(vcpu);
return 0;
}
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
{
u64 delta;
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
delta = current->sched_info.run_delay - vcpu->arch.st.last_steal;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
vcpu->arch.st.accum_steal = delta;
}
static void record_steal_time(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
return;
if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
return;
vcpu->arch.st.steal.steal += vcpu->arch.st.accum_steal;
vcpu->arch.st.steal.version += 2;
vcpu->arch.st.accum_steal = 0;
kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
&vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
}
int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
bool pr = false;
u32 msr = msr_info->index;
u64 data = msr_info->data;
switch (msr) {
case MSR_AMD64_NB_CFG:
case MSR_IA32_UCODE_REV:
case MSR_IA32_UCODE_WRITE:
case MSR_VM_HSAVE_PA:
case MSR_AMD64_PATCH_LOADER:
case MSR_AMD64_BU_CFG2:
break;
case MSR_EFER:
return set_efer(vcpu, data);
case MSR_K7_HWCR:
data &= ~(u64)0x40; /* ignore flush filter disable */
data &= ~(u64)0x100; /* ignore ignne emulation enable */
data &= ~(u64)0x8; /* ignore TLB cache disable */
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
data);
return 1;
}
break;
case MSR_FAM10H_MMIO_CONF_BASE:
if (data != 0) {
vcpu_unimpl(vcpu, "unimplemented MMIO_CONF_BASE wrmsr: "
"0x%llx\n", data);
return 1;
}
break;
case MSR_IA32_DEBUGCTLMSR:
if (!data) {
/* We support the non-activated case already */
break;
} else if (data & ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_BTF)) {
/* Values other than LBR and BTF are vendor-specific,
thus reserved and should throw a #GP */
return 1;
}
vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
__func__, data);
break;
case 0x200 ... 0x2ff:
return set_msr_mtrr(vcpu, msr, data);
case MSR_IA32_APICBASE:
kvm_set_apic_base(vcpu, data);
break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
return kvm_x2apic_msr_write(vcpu, msr, data);
case MSR_IA32_TSCDEADLINE:
kvm_set_lapic_tscdeadline_msr(vcpu, data);
break;
case MSR_IA32_TSC_ADJUST:
if (guest_cpuid_has_tsc_adjust(vcpu)) {
if (!msr_info->host_initiated) {
u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
}
vcpu->arch.ia32_tsc_adjust_msr = data;
}
break;
case MSR_IA32_MISC_ENABLE:
vcpu->arch.ia32_misc_enable_msr = data;
break;
case MSR_KVM_WALL_CLOCK_NEW:
case MSR_KVM_WALL_CLOCK:
vcpu->kvm->arch.wall_clock = data;
kvm_write_wall_clock(vcpu->kvm, data);
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
/* we verify if the enable bit is set... */
if (!(data & 1))
break;
gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
&vcpu->arch.pv_time, data & ~1ULL))
vcpu->arch.pv_time_enabled = false;
else
vcpu->arch.pv_time_enabled = true;
break;
}
case MSR_KVM_ASYNC_PF_EN:
if (kvm_pv_enable_async_pf(vcpu, data))
return 1;
break;
case MSR_KVM_STEAL_TIME:
if (unlikely(!sched_info_on()))
return 1;
if (data & KVM_STEAL_RESERVED_MASK)
return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
data & KVM_STEAL_VALID_BITS))
return 1;
vcpu->arch.st.msr_val = data;
if (!(data & KVM_MSR_ENABLED))
break;
vcpu->arch.st.last_steal = current->sched_info.run_delay;
preempt_disable();
accumulate_steal_time(vcpu);
preempt_enable();
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
break;
case MSR_KVM_PV_EOI_EN:
if (kvm_lapic_enable_pv_eoi(vcpu, data))
return 1;
break;
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
return set_msr_mce(vcpu, msr, data);
/* Performance counters are not protected by a CPUID bit,
* so we should check all of them in the generic path for the sake of
* cross vendor migration.
* Writing a zero into the event select MSRs disables them,
* which we perfectly emulate ;-). Any other value should be at least
* reported, some guests depend on them.
*/
case MSR_K7_EVNTSEL0:
case MSR_K7_EVNTSEL1:
case MSR_K7_EVNTSEL2:
case MSR_K7_EVNTSEL3:
if (data != 0)
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
/* at least RHEL 4 unconditionally writes to the perfctr registers,
* so we ignore writes to make it happy.
*/
case MSR_K7_PERFCTR0:
case MSR_K7_PERFCTR1:
case MSR_K7_PERFCTR2:
case MSR_K7_PERFCTR3:
vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
pr = true;
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (pr || data != 0)
vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
"0x%x data 0x%llx\n", msr, data);
break;
case MSR_K7_CLK_CTL:
/*
* Ignore all writes to this no longer documented MSR.
* Writes are only relevant for old K7 processors,
* all pre-dating SVM, but a recommended workaround from
* AMD for these chips. It is possible to specify the
* affected processor models on the command line, hence
* the need to ignore the workaround.
*/
break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
if (kvm_hv_msr_partition_wide(msr)) {
int r;
mutex_lock(&vcpu->kvm->lock);
r = set_msr_hyperv_pw(vcpu, msr, data);
mutex_unlock(&vcpu->kvm->lock);
return r;
} else
return set_msr_hyperv(vcpu, msr, data);
break;
case MSR_IA32_BBL_CR_CTL3:
/* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail.
*/
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n", msr, data);
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.length = data;
break;
case MSR_AMD64_OSVW_STATUS:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
vcpu->arch.osvw.status = data;
break;
default:
if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
return xen_hvm_config(vcpu, data);
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_set_msr(vcpu, msr, data);
if (!ignore_msrs) {
vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
msr, data);
return 1;
} else {
vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data %llx\n",
msr, data);
break;
}
}
return 0;
}
EXPORT_SYMBOL_GPL(kvm_set_msr_common);
/*
* Reads an msr value (of 'msr_index') into 'pdata'.
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
{
return kvm_x86_ops->get_msr(vcpu, msr_index, pdata);
}
static int get_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
if (!msr_mtrr_valid(msr))
return 1;
if (msr == MSR_MTRRdefType)
*pdata = vcpu->arch.mtrr_state.def_type +
(vcpu->arch.mtrr_state.enabled << 10);
else if (msr == MSR_MTRRfix64K_00000)
*pdata = p[0];
else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
*pdata = p[1 + msr - MSR_MTRRfix16K_80000];
else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
*pdata = p[3 + msr - MSR_MTRRfix4K_C0000];
else if (msr == MSR_IA32_CR_PAT)
*pdata = vcpu->arch.pat;
else { /* Variable MTRRs */
int idx, is_mtrr_mask;
u64 *pt;
idx = (msr - 0x200) / 2;
is_mtrr_mask = msr - 0x200 - 2 * idx;
if (!is_mtrr_mask)
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
else
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
*pdata = *pt;
}
return 0;
}
static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
u64 data;
u64 mcg_cap = vcpu->arch.mcg_cap;
unsigned bank_num = mcg_cap & 0xff;
switch (msr) {
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
data = 0;
break;
case MSR_IA32_MCG_CAP:
data = vcpu->arch.mcg_cap;
break;
case MSR_IA32_MCG_CTL:
if (!(mcg_cap & MCG_CTL_P))
return 1;
data = vcpu->arch.mcg_ctl;
break;
case MSR_IA32_MCG_STATUS:
data = vcpu->arch.mcg_status;
break;
default:
if (msr >= MSR_IA32_MC0_CTL &&
msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
u32 offset = msr - MSR_IA32_MC0_CTL;
data = vcpu->arch.mce_banks[offset];
break;
}
return 1;
}
*pdata = data;
return 0;
}
static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
u64 data = 0;
struct kvm *kvm = vcpu->kvm;
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
data = kvm->arch.hv_guest_os_id;
break;
case HV_X64_MSR_HYPERCALL:
data = kvm->arch.hv_hypercall;
break;
default:
vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
return 1;
}
*pdata = data;
return 0;
}
static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
u64 data = 0;
switch (msr) {
case HV_X64_MSR_VP_INDEX: {
int r;
struct kvm_vcpu *v;
kvm_for_each_vcpu(r, v, vcpu->kvm)
if (v == vcpu)
data = r;
break;
}
case HV_X64_MSR_EOI:
return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
case HV_X64_MSR_ICR:
return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
case HV_X64_MSR_TPR:
return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
case HV_X64_MSR_APIC_ASSIST_PAGE:
data = vcpu->arch.hv_vapic;
break;
default:
vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
return 1;
}
*pdata = data;
return 0;
}
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
{
u64 data;
switch (msr) {
case MSR_IA32_PLATFORM_ID:
case MSR_IA32_EBL_CR_POWERON:
case MSR_IA32_DEBUGCTLMSR:
case MSR_IA32_LASTBRANCHFROMIP:
case MSR_IA32_LASTBRANCHTOIP:
case MSR_IA32_LASTINTFROMIP:
case MSR_IA32_LASTINTTOIP:
case MSR_K8_SYSCFG:
case MSR_K7_HWCR:
case MSR_VM_HSAVE_PA:
case MSR_K7_EVNTSEL0:
case MSR_K7_PERFCTR0:
case MSR_K8_INT_PENDING_MSG:
case MSR_AMD64_NB_CFG:
case MSR_FAM10H_MMIO_CONF_BASE:
case MSR_AMD64_BU_CFG2:
data = 0;
break;
case MSR_P6_PERFCTR0:
case MSR_P6_PERFCTR1:
case MSR_P6_EVNTSEL0:
case MSR_P6_EVNTSEL1:
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_get_msr(vcpu, msr, pdata);
data = 0;
break;
case MSR_IA32_UCODE_REV:
data = 0x100000000ULL;
break;
case MSR_MTRRcap:
data = 0x500 | KVM_NR_VAR_MTRR;
break;
case 0x200 ... 0x2ff:
return get_msr_mtrr(vcpu, msr, pdata);
case 0xcd: /* fsb frequency */
data = 3;
break;
/*
* MSR_EBC_FREQUENCY_ID
* Conservative value valid for even the basic CPU models.
* Models 0,1: 000 in bits 23:21 indicating a bus speed of
* 100MHz, model 2 000 in bits 18:16 indicating 100MHz,
* and 266MHz for model 3, or 4. Set Core Clock
* Frequency to System Bus Frequency Ratio to 1 (bits
* 31:24) even though these are only valid for CPU
* models > 2, however guests may end up dividing or
* multiplying by zero otherwise.
*/
case MSR_EBC_FREQUENCY_ID:
data = 1 << 24;
break;
case MSR_IA32_APICBASE:
data = kvm_get_apic_base(vcpu);
break;
case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff:
return kvm_x2apic_msr_read(vcpu, msr, pdata);
break;
case MSR_IA32_TSCDEADLINE:
data = kvm_get_lapic_tscdeadline_msr(vcpu);
break;
case MSR_IA32_TSC_ADJUST:
data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
break;
case MSR_IA32_MISC_ENABLE:
data = vcpu->arch.ia32_misc_enable_msr;
break;
case MSR_IA32_PERF_STATUS:
/* TSC increment by tick */
data = 1000ULL;
/* CPU multiplier */
data |= (((uint64_t)4ULL) << 40);
break;
case MSR_EFER:
data = vcpu->arch.efer;
break;
case MSR_KVM_WALL_CLOCK:
case MSR_KVM_WALL_CLOCK_NEW:
data = vcpu->kvm->arch.wall_clock;
break;
case MSR_KVM_SYSTEM_TIME:
case MSR_KVM_SYSTEM_TIME_NEW:
data = vcpu->arch.time;
break;
case MSR_KVM_ASYNC_PF_EN:
data = vcpu->arch.apf.msr_val;
break;
case MSR_KVM_STEAL_TIME:
data = vcpu->arch.st.msr_val;
break;
case MSR_KVM_PV_EOI_EN:
data = vcpu->arch.pv_eoi.msr_val;
break;
case MSR_IA32_P5_MC_ADDR:
case MSR_IA32_P5_MC_TYPE:
case MSR_IA32_MCG_CAP:
case MSR_IA32_MCG_CTL:
case MSR_IA32_MCG_STATUS:
case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
return get_msr_mce(vcpu, msr, pdata);
case MSR_K7_CLK_CTL:
/*
* Provide expected ramp-up count for K7. All other
* are set to zero, indicating minimum divisors for
* every field.
*
* This prevents guest kernels on AMD host with CPU
* type 6, model 8 and higher from exploding due to
* the rdmsr failing.
*/
data = 0x20000000;
break;
case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
if (kvm_hv_msr_partition_wide(msr)) {
int r;
mutex_lock(&vcpu->kvm->lock);
r = get_msr_hyperv_pw(vcpu, msr, pdata);
mutex_unlock(&vcpu->kvm->lock);
return r;
} else
return get_msr_hyperv(vcpu, msr, pdata);
break;
case MSR_IA32_BBL_CR_CTL3:
/* This legacy MSR exists but isn't fully documented in current
* silicon. It is however accessed by winxp in very narrow
* scenarios where it sets bit #19, itself documented as
* a "reserved" bit. Best effort attempt to source coherent
* read data here should the balance of the register be
* interpreted by the guest:
*
* L2 cache control register 3: 64GB range, 256KB size,
* enabled, latency 0x1, configured
*/
data = 0xbe702111;
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
data = vcpu->arch.osvw.length;
break;
case MSR_AMD64_OSVW_STATUS:
if (!guest_cpuid_has_osvw(vcpu))
return 1;
data = vcpu->arch.osvw.status;
break;
default:
if (kvm_pmu_msr(vcpu, msr))
return kvm_pmu_get_msr(vcpu, msr, pdata);
if (!ignore_msrs) {
vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
return 1;
} else {
vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr);
data = 0;
}
break;
}
*pdata = data;
return 0;
}
EXPORT_SYMBOL_GPL(kvm_get_msr_common);
/*
* Read or write a bunch of msrs. All parameters are kernel addresses.
*
* @return number of msrs set successfully.
*/
static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
struct kvm_msr_entry *entries,
int (*do_msr)(struct kvm_vcpu *vcpu,
unsigned index, u64 *data))
{
int i, idx;
idx = srcu_read_lock(&vcpu->kvm->srcu);
for (i = 0; i < msrs->nmsrs; ++i)
if (do_msr(vcpu, entries[i].index, &entries[i].data))
break;
srcu_read_unlock(&vcpu->kvm->srcu, idx);
return i;
}
/*
* Read or write a bunch of msrs. Parameters are user addresses.
*
* @return number of msrs set successfully.
*/
static int msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs __user *user_msrs,
int (*do_msr)(struct kvm_vcpu *vcpu,
unsigned index, u64 *data),
int writeback)
{
struct kvm_msrs msrs;
struct kvm_msr_entry *entries;
int r, n;
unsigned size;
r = -EFAULT;
if (copy_from_user(&msrs, user_msrs, sizeof msrs))
goto out;
r = -E2BIG;
if (msrs.nmsrs >= MAX_IO_MSRS)
goto out;
size = sizeof(struct kvm_msr_entry) * msrs.nmsrs;
entries = memdup_user(user_msrs->entries, size);
if (IS_ERR(entries)) {
r = PTR_ERR(entries);
goto out;
}
r = n = __msr_io(vcpu, &msrs, entries, do_msr);
if (r < 0)
goto out_free;
r = -EFAULT;
if (writeback && copy_to_user(user_msrs->entries, entries, size))
goto out_free;
r = n;
out_free:
kfree(entries);
out:
return r;
}
int kvm_dev_ioctl_check_extension(long ext)
{
int r;
switch (ext) {
case KVM_CAP_IRQCHIP:
case KVM_CAP_HLT:
case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
case KVM_CAP_SET_TSS_ADDR:
case KVM_CAP_EXT_CPUID:
case KVM_CAP_CLOCKSOURCE:
case KVM_CAP_PIT:
case KVM_CAP_NOP_IO_DELAY:
case KVM_CAP_MP_STATE:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_USER_NMI:
case KVM_CAP_REINJECT_CONTROL:
case KVM_CAP_IRQ_INJECT_STATUS:
case KVM_CAP_ASSIGN_DEV_IRQ:
case KVM_CAP_IRQFD:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_PIT2:
case KVM_CAP_PIT_STATE2:
case KVM_CAP_SET_IDENTITY_MAP_ADDR:
case KVM_CAP_XEN_HVM:
case KVM_CAP_ADJUST_CLOCK:
case KVM_CAP_VCPU_EVENTS:
case KVM_CAP_HYPERV:
case KVM_CAP_HYPERV_VAPIC:
case KVM_CAP_HYPERV_SPIN:
case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP:
case KVM_CAP_XSAVE:
case KVM_CAP_ASYNC_PF:
case KVM_CAP_GET_TSC_KHZ:
case KVM_CAP_PCI_2_3:
case KVM_CAP_KVMCLOCK_CTRL:
case KVM_CAP_READONLY_MEM:
case KVM_CAP_IRQFD_RESAMPLE:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
case KVM_CAP_VAPIC:
r = !kvm_x86_ops->cpu_has_accelerated_tpr();
break;
case KVM_CAP_NR_VCPUS:
r = KVM_SOFT_MAX_VCPUS;
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_NR_MEMSLOTS:
r = KVM_USER_MEM_SLOTS;
break;
case KVM_CAP_PV_MMU: /* obsolete */
r = 0;
break;
case KVM_CAP_IOMMU:
r = iommu_present(&pci_bus_type);
break;
case KVM_CAP_MCE:
r = KVM_MAX_MCE_BANKS;
break;
case KVM_CAP_XCRS:
r = cpu_has_xsave;
break;
case KVM_CAP_TSC_CONTROL:
r = kvm_has_tsc_control;
break;
case KVM_CAP_TSC_DEADLINE_TIMER:
r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
break;
default:
r = 0;
break;
}
return r;
}
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
void __user *argp = (void __user *)arg;
long r;
switch (ioctl) {
case KVM_GET_MSR_INDEX_LIST: {
struct kvm_msr_list __user *user_msr_list = argp;
struct kvm_msr_list msr_list;
unsigned n;
r = -EFAULT;
if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
goto out;
n = msr_list.nmsrs;
msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
goto out;
r = -E2BIG;
if (n < msr_list.nmsrs)
goto out;
r = -EFAULT;
if (copy_to_user(user_msr_list->indices, &msrs_to_save,
num_msrs_to_save * sizeof(u32)))
goto out;
if (copy_to_user(user_msr_list->indices + num_msrs_to_save,
&emulated_msrs,
ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
goto out;
r = 0;
break;
}
case KVM_GET_SUPPORTED_CPUID: {
struct kvm_cpuid2 __user *cpuid_arg = argp;
struct kvm_cpuid2 cpuid;
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
goto out;
r = kvm_dev_ioctl_get_supported_cpuid(&cpuid,
cpuid_arg->entries);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
goto out;
r = 0;
break;
}
case KVM_X86_GET_MCE_CAP_SUPPORTED: {
u64 mce_cap;
mce_cap = KVM_MCE_CAP_SUPPORTED;
r = -EFAULT;
if (copy_to_user(argp, &mce_cap, sizeof mce_cap))
goto out;
r = 0;
break;
}
default:
r = -EINVAL;
}
out:
return r;
}
static void wbinvd_ipi(void *garbage)
{
wbinvd();
}
static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
return vcpu->kvm->arch.iommu_domain &&
!(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY);
}
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
/* Address WBINVD may be executed by guest */
if (need_emulate_wbinvd(vcpu)) {
if (kvm_x86_ops->has_wbinvd_exit())
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
smp_call_function_single(vcpu->cpu,
wbinvd_ipi, NULL, 1);
}
kvm_x86_ops->vcpu_load(vcpu, cpu);
/* Apply any externally detected TSC adjustments (due to suspend) */
if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
vcpu->arch.tsc_offset_adjustment = 0;
set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
}
if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
native_read_tsc() - vcpu->arch.last_host_tsc;
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
if (check_tsc_unstable()) {
u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);
kvm_x86_ops->write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1;
}
/*
* On a host with synchronized TSC, there is no need to update
* kvmclock on vcpu->cpu migration
*/
if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
if (vcpu->cpu != cpu)
kvm_migrate_timers(vcpu);
vcpu->cpu = cpu;
}
accumulate_steal_time(vcpu);
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
{
kvm_x86_ops->vcpu_put(vcpu);
kvm_put_guest_fpu(vcpu);
vcpu->arch.last_host_tsc = native_read_tsc();
}
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
return 0;
}
static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
struct kvm_lapic_state *s)
{
kvm_apic_post_state_restore(vcpu, s);
update_cr8_intercept(vcpu);
return 0;
}
static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
return -EINVAL;
if (irqchip_in_kernel(vcpu->kvm))
return -ENXIO;
kvm_queue_interrupt(vcpu, irq->irq, false);
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0;
}
static int kvm_vcpu_ioctl_nmi(struct kvm_vcpu *vcpu)
{
kvm_inject_nmi(vcpu);
return 0;
}
static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
struct kvm_tpr_access_ctl *tac)
{
if (tac->flags)
return -EINVAL;
vcpu->arch.tpr_access_reporting = !!tac->enabled;
return 0;
}
static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu,
u64 mcg_cap)
{
int r;
unsigned bank_num = mcg_cap & 0xff, bank;
r = -EINVAL;
if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS)
goto out;
if (mcg_cap & ~(KVM_MCE_CAP_SUPPORTED | 0xff | 0xff0000))
goto out;
r = 0;
vcpu->arch.mcg_cap = mcg_cap;
/* Init IA32_MCG_CTL to all 1s */
if (mcg_cap & MCG_CTL_P)
vcpu->arch.mcg_ctl = ~(u64)0;
/* Init IA32_MCi_CTL to all 1s */
for (bank = 0; bank < bank_num; bank++)
vcpu->arch.mce_banks[bank*4] = ~(u64)0;
out:
return r;
}
static int kvm_vcpu_ioctl_x86_set_mce(struct kvm_vcpu *vcpu,
struct kvm_x86_mce *mce)
{
u64 mcg_cap = vcpu->arch.mcg_cap;
unsigned bank_num = mcg_cap & 0xff;
u64 *banks = vcpu->arch.mce_banks;
if (mce->bank >= bank_num || !(mce->status & MCI_STATUS_VAL))
return -EINVAL;
/*
* if IA32_MCG_CTL is not all 1s, the uncorrected error
* reporting is disabled
*/
if ((mce->status & MCI_STATUS_UC) && (mcg_cap & MCG_CTL_P) &&
vcpu->arch.mcg_ctl != ~(u64)0)
return 0;
banks += 4 * mce->bank;
/*
* if IA32_MCi_CTL is not all 1s, the uncorrected error
* reporting is disabled for the bank
*/
if ((mce->status & MCI_STATUS_UC) && banks[0] != ~(u64)0)
return 0;
if (mce->status & MCI_STATUS_UC) {
if ((vcpu->arch.mcg_status & MCG_STATUS_MCIP) ||
!kvm_read_cr4_bits(vcpu, X86_CR4_MCE)) {
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return 0;
}
if (banks[1] & MCI_STATUS_VAL)
mce->status |= MCI_STATUS_OVER;
banks[2] = mce->addr;
banks[3] = mce->misc;
vcpu->arch.mcg_status = mce->mcg_status;
banks[1] = mce->status;
kvm_queue_exception(vcpu, MC_VECTOR);
} else if (!(banks[1] & MCI_STATUS_VAL)
|| !(banks[1] & MCI_STATUS_UC)) {
if (banks[1] & MCI_STATUS_VAL)
mce->status |= MCI_STATUS_OVER;
banks[2] = mce->addr;
banks[3] = mce->misc;
banks[1] = mce->status;
} else
banks[1] |= MCI_STATUS_OVER;
return 0;
}
static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
process_nmi(vcpu);
events->exception.injected =
vcpu->arch.exception.pending &&
!kvm_exception_is_soft(vcpu->arch.exception.nr);
events->exception.nr = vcpu->arch.exception.nr;
events->exception.has_error_code = vcpu->arch.exception.has_error_code;
events->exception.pad = 0;
events->exception.error_code = vcpu->arch.exception.error_code;
events->interrupt.injected =
vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
events->interrupt.nr = vcpu->arch.interrupt.nr;
events->interrupt.soft = 0;
events->interrupt.shadow =
kvm_x86_ops->get_interrupt_shadow(vcpu,
KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
events->nmi.injected = vcpu->arch.nmi_injected;
events->nmi.pending = vcpu->arch.nmi_pending != 0;
events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
events->nmi.pad = 0;
events->sipi_vector = vcpu->arch.sipi_vector;
events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW);
memset(&events->reserved, 0, sizeof(events->reserved));
}
static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
struct kvm_vcpu_events *events)
{
if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
| KVM_VCPUEVENT_VALID_SIPI_VECTOR
| KVM_VCPUEVENT_VALID_SHADOW))
return -EINVAL;
process_nmi(vcpu);
vcpu->arch.exception.pending = events->exception.injected;
vcpu->arch.exception.nr = events->exception.nr;
vcpu->arch.exception.has_error_code = events->exception.has_error_code;
vcpu->arch.exception.error_code = events->exception.error_code;
vcpu->arch.interrupt.pending = events->interrupt.injected;
vcpu->arch.interrupt.nr = events->interrupt.nr;
vcpu->arch.interrupt.soft = events->interrupt.soft;
if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
kvm_x86_ops->set_interrupt_shadow(vcpu,
events->interrupt.shadow);
vcpu->arch.nmi_injected = events->nmi.injected;
if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
vcpu->arch.nmi_pending = events->nmi.pending;
kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
vcpu->arch.sipi_vector = events->sipi_vector;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0;
}
static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
struct kvm_debugregs *dbgregs)
{
memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
dbgregs->dr6 = vcpu->arch.dr6;
dbgregs->dr7 = vcpu->arch.dr7;
dbgregs->flags = 0;
memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
}
static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
struct kvm_debugregs *dbgregs)
{
if (dbgregs->flags)
return -EINVAL;
memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
vcpu->arch.dr6 = dbgregs->dr6;
vcpu->arch.dr7 = dbgregs->dr7;
return 0;
}
static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
if (cpu_has_xsave)
memcpy(guest_xsave->region,
&vcpu->arch.guest_fpu.state->xsave,
xstate_size);
else {
memcpy(guest_xsave->region,
&vcpu->arch.guest_fpu.state->fxsave,
sizeof(struct i387_fxsave_struct));
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
XSTATE_FPSSE;
}
}
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
u64 xstate_bv =
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
if (cpu_has_xsave)
memcpy(&vcpu->arch.guest_fpu.state->xsave,
guest_xsave->region, xstate_size);
else {
if (xstate_bv & ~XSTATE_FPSSE)
return -EINVAL;
memcpy(&vcpu->arch.guest_fpu.state->fxsave,
guest_xsave->region, sizeof(struct i387_fxsave_struct));
}
return 0;
}
static void kvm_vcpu_ioctl_x86_get_xcrs(struct kvm_vcpu *vcpu,
struct kvm_xcrs *guest_xcrs)
{
if (!cpu_has_xsave) {
guest_xcrs->nr_xcrs = 0;
return;
}
guest_xcrs->nr_xcrs = 1;
guest_xcrs->flags = 0;
guest_xcrs->xcrs[0].xcr = XCR_XFEATURE_ENABLED_MASK;
guest_xcrs->xcrs[0].value = vcpu->arch.xcr0;
}
static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
struct kvm_xcrs *guest_xcrs)
{
int i, r = 0;
if (!cpu_has_xsave)
return -EINVAL;
if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags)
return -EINVAL;
for (i = 0; i < guest_xcrs->nr_xcrs; i++)
/* Only support XCR0 currently */
if (guest_xcrs->xcrs[0].xcr == XCR_XFEATURE_ENABLED_MASK) {
r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK,
guest_xcrs->xcrs[0].value);
break;
}
if (r)
r = -EINVAL;
return r;
}
/*
* kvm_set_guest_paused() indicates to the guest kernel that it has been
* stopped by the hypervisor. This function will be called from the host only.
* EINVAL is returned when the host attempts to set the flag for a guest that
* does not support pv clocks.
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
return 0;
}
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
int r;
union {
struct kvm_lapic_state *lapic;
struct kvm_xsave *xsave;
struct kvm_xcrs *xcrs;
void *buffer;
} u;
u.buffer = NULL;
switch (ioctl) {
case KVM_GET_LAPIC: {
r = -EINVAL;
if (!vcpu->arch.apic)
goto out;
u.lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
r = -ENOMEM;
if (!u.lapic)
goto out;
r = kvm_vcpu_ioctl_get_lapic(vcpu, u.lapic);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, u.lapic, sizeof(struct kvm_lapic_state)))
goto out;
r = 0;
break;
}
case KVM_SET_LAPIC: {
r = -EINVAL;
if (!vcpu->arch.apic)
goto out;
u.lapic = memdup_user(argp, sizeof(*u.lapic));
if (IS_ERR(u.lapic))
return PTR_ERR(u.lapic);
r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
break;
}
case KVM_INTERRUPT: {
struct kvm_interrupt irq;
r = -EFAULT;
if (copy_from_user(&irq, argp, sizeof irq))
goto out;
r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
break;
}
case KVM_NMI: {
r = kvm_vcpu_ioctl_nmi(vcpu);
break;
}
case KVM_SET_CPUID: {
struct kvm_cpuid __user *cpuid_arg = argp;
struct kvm_cpuid cpuid;
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
goto out;
r = kvm_vcpu_ioctl_set_cpuid(vcpu, &cpuid, cpuid_arg->entries);
break;
}
case KVM_SET_CPUID2: {
struct kvm_cpuid2 __user *cpuid_arg = argp;
struct kvm_cpuid2 cpuid;
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
goto out;
r = kvm_vcpu_ioctl_set_cpuid2(vcpu, &cpuid,
cpuid_arg->entries);
break;
}
case KVM_GET_CPUID2: {
struct kvm_cpuid2 __user *cpuid_arg = argp;
struct kvm_cpuid2 cpuid;
r = -EFAULT;
if (copy_from_user(&cpuid, cpuid_arg, sizeof cpuid))
goto out;
r = kvm_vcpu_ioctl_get_cpuid2(vcpu, &cpuid,
cpuid_arg->entries);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(cpuid_arg, &cpuid, sizeof cpuid))
goto out;
r = 0;
break;
}
case KVM_GET_MSRS:
r = msr_io(vcpu, argp, kvm_get_msr, 1);
break;
case KVM_SET_MSRS:
r = msr_io(vcpu, argp, do_set_msr, 0);
break;
case KVM_TPR_ACCESS_REPORTING: {
struct kvm_tpr_access_ctl tac;
r = -EFAULT;
if (copy_from_user(&tac, argp, sizeof tac))
goto out;
r = vcpu_ioctl_tpr_access_reporting(vcpu, &tac);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &tac, sizeof tac))
goto out;
r = 0;
break;
};
case KVM_SET_VAPIC_ADDR: {
struct kvm_vapic_addr va;
r = -EINVAL;
if (!irqchip_in_kernel(vcpu->kvm))
goto out;
r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va))
goto out;
r = 0;
kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
break;
}
case KVM_X86_SETUP_MCE: {
u64 mcg_cap;
r = -EFAULT;
if (copy_from_user(&mcg_cap, argp, sizeof mcg_cap))
goto out;
r = kvm_vcpu_ioctl_x86_setup_mce(vcpu, mcg_cap);
break;
}
case KVM_X86_SET_MCE: {
struct kvm_x86_mce mce;
r = -EFAULT;
if (copy_from_user(&mce, argp, sizeof mce))
goto out;
r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce);
break;
}
case KVM_GET_VCPU_EVENTS: {
struct kvm_vcpu_events events;
kvm_vcpu_ioctl_x86_get_vcpu_events(vcpu, &events);
r = -EFAULT;
if (copy_to_user(argp, &events, sizeof(struct kvm_vcpu_events)))
break;
r = 0;
break;
}
case KVM_SET_VCPU_EVENTS: {
struct kvm_vcpu_events events;
r = -EFAULT;
if (copy_from_user(&events, argp, sizeof(struct kvm_vcpu_events)))
break;
r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
break;
}
case KVM_GET_DEBUGREGS: {
struct kvm_debugregs dbgregs;
kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
r = -EFAULT;
if (copy_to_user(argp, &dbgregs,
sizeof(struct kvm_debugregs)))
break;
r = 0;
break;
}
case KVM_SET_DEBUGREGS: {
struct kvm_debugregs dbgregs;
r = -EFAULT;
if (copy_from_user(&dbgregs, argp,
sizeof(struct kvm_debugregs)))
break;
r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
break;
}
case KVM_GET_XSAVE: {
u.xsave = kzalloc(sizeof(struct kvm_xsave), GFP_KERNEL);
r = -ENOMEM;
if (!u.xsave)
break;
kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
r = -EFAULT;
if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
break;
r = 0;
break;
}
case KVM_SET_XSAVE: {
u.xsave = memdup_user(argp, sizeof(*u.xsave));
if (IS_ERR(u.xsave))
return PTR_ERR(u.xsave);
r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
break;
}
case KVM_GET_XCRS: {
u.xcrs = kzalloc(sizeof(struct kvm_xcrs), GFP_KERNEL);
r = -ENOMEM;
if (!u.xcrs)
break;
kvm_vcpu_ioctl_x86_get_xcrs(vcpu, u.xcrs);
r = -EFAULT;
if (copy_to_user(argp, u.xcrs,
sizeof(struct kvm_xcrs)))
break;
r = 0;
break;
}
case KVM_SET_XCRS: {
u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
if (IS_ERR(u.xcrs))
return PTR_ERR(u.xcrs);
r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
break;
}
case KVM_SET_TSC_KHZ: {
u32 user_tsc_khz;
r = -EINVAL;
user_tsc_khz = (u32)arg;
if (user_tsc_khz >= kvm_max_guest_tsc_khz)
goto out;
if (user_tsc_khz == 0)
user_tsc_khz = tsc_khz;
kvm_set_tsc_khz(vcpu, user_tsc_khz);
r = 0;
goto out;
}
case KVM_GET_TSC_KHZ: {
r = vcpu->arch.virtual_tsc_khz;
goto out;
}
case KVM_KVMCLOCK_CTRL: {
r = kvm_set_guest_paused(vcpu);
goto out;
}
default:
r = -EINVAL;
}
out:
kfree(u.buffer);
return r;
}
int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
}
static int kvm_vm_ioctl_set_tss_addr(struct kvm *kvm, unsigned long addr)
{
int ret;
if (addr > (unsigned int)(-3 * PAGE_SIZE))
return -EINVAL;
ret = kvm_x86_ops->set_tss_addr(kvm, addr);
return ret;
}
static int kvm_vm_ioctl_set_identity_map_addr(struct kvm *kvm,
u64 ident_addr)
{
kvm->arch.ept_identity_map_addr = ident_addr;
return 0;
}
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
u32 kvm_nr_mmu_pages)
{
if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES)
return -EINVAL;
mutex_lock(&kvm->slots_lock);
kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
mutex_unlock(&kvm->slots_lock);
return 0;
}
static int kvm_vm_ioctl_get_nr_mmu_pages(struct kvm *kvm)
{
return kvm->arch.n_max_mmu_pages;
}
static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{
int r;
r = 0;
switch (chip->chip_id) {
case KVM_IRQCHIP_PIC_MASTER:
memcpy(&chip->chip.pic,
&pic_irqchip(kvm)->pics[0],
sizeof(struct kvm_pic_state));
break;
case KVM_IRQCHIP_PIC_SLAVE:
memcpy(&chip->chip.pic,
&pic_irqchip(kvm)->pics[1],
sizeof(struct kvm_pic_state));
break;
case KVM_IRQCHIP_IOAPIC:
r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
break;
default:
r = -EINVAL;
break;
}
return r;
}
static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
{
int r;
r = 0;
switch (chip->chip_id) {
case KVM_IRQCHIP_PIC_MASTER:
spin_lock(&pic_irqchip(kvm)->lock);
memcpy(&pic_irqchip(kvm)->pics[0],
&chip->chip.pic,
sizeof(struct kvm_pic_state));
spin_unlock(&pic_irqchip(kvm)->lock);
break;
case KVM_IRQCHIP_PIC_SLAVE:
spin_lock(&pic_irqchip(kvm)->lock);
memcpy(&pic_irqchip(kvm)->pics[1],
&chip->chip.pic,
sizeof(struct kvm_pic_state));
spin_unlock(&pic_irqchip(kvm)->lock);
break;
case KVM_IRQCHIP_IOAPIC:
r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
break;
default:
r = -EINVAL;
break;
}
kvm_pic_update_irq(pic_irqchip(kvm));
return r;
}
static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
int r = 0;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return r;
}
static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
int r = 0;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
kvm_pit_load_count(kvm, 0, ps->channels[0].count, 0);
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return r;
}
static int kvm_vm_ioctl_get_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
int r = 0;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
memcpy(ps->channels, &kvm->arch.vpit->pit_state.channels,
sizeof(ps->channels));
ps->flags = kvm->arch.vpit->pit_state.flags;
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
memset(&ps->reserved, 0, sizeof(ps->reserved));
return r;
}
static int kvm_vm_ioctl_set_pit2(struct kvm *kvm, struct kvm_pit_state2 *ps)
{
int r = 0, start = 0;
u32 prev_legacy, cur_legacy;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
if (!prev_legacy && cur_legacy)
start = 1;
memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
sizeof(kvm->arch.vpit->pit_state.channels));
kvm->arch.vpit->pit_state.flags = ps->flags;
kvm_pit_load_count(kvm, 0, kvm->arch.vpit->pit_state.channels[0].count, start);
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return r;
}
static int kvm_vm_ioctl_reinject(struct kvm *kvm,
struct kvm_reinject_control *control)
{
if (!kvm->arch.vpit)
return -ENXIO;
mutex_lock(&kvm->arch.vpit->pit_state.lock);
kvm->arch.vpit->pit_state.reinject = control->pit_reinject;
mutex_unlock(&kvm->arch.vpit->pit_state.lock);
return 0;
}
/**
* kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
* @kvm: kvm instance
* @log: slot id and address to which we copy the log
*
* We need to keep it in mind that VCPU threads can write to the bitmap
* concurrently. So, to avoid losing data, we keep the following order for
* each bit:
*
* 1. Take a snapshot of the bit and clear it if needed.
* 2. Write protect the corresponding page.
* 3. Flush TLB's if needed.
* 4. Copy the snapshot to the userspace.
*
* Between 2 and 3, the guest may write to the page using the remaining TLB
* entry. This is not a problem because the page will be reported dirty at
* step 4 using the snapshot taken before and step 3 ensures that successive
* writes will be logged for the next call.
*/
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
{
int r;
struct kvm_memory_slot *memslot;
unsigned long n, i;
unsigned long *dirty_bitmap;
unsigned long *dirty_bitmap_buffer;
bool is_dirty = false;
mutex_lock(&kvm->slots_lock);
r = -EINVAL;
if (log->slot >= KVM_USER_MEM_SLOTS)
goto out;
memslot = id_to_memslot(kvm->memslots, log->slot);
dirty_bitmap = memslot->dirty_bitmap;
r = -ENOENT;
if (!dirty_bitmap)
goto out;
n = kvm_dirty_bitmap_bytes(memslot);
dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
memset(dirty_bitmap_buffer, 0, n);
spin_lock(&kvm->mmu_lock);
for (i = 0; i < n / sizeof(long); i++) {
unsigned long mask;
gfn_t offset;
if (!dirty_bitmap[i])
continue;
is_dirty = true;
mask = xchg(&dirty_bitmap[i], 0);
dirty_bitmap_buffer[i] = mask;
offset = i * BITS_PER_LONG;
kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask);
}
if (is_dirty)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
r = -EFAULT;
if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
goto out;
r = 0;
out:
mutex_unlock(&kvm->slots_lock);
return r;
}
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event)
{
if (!irqchip_in_kernel(kvm))
return -ENXIO;
irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event->irq, irq_event->level);
return 0;
}
long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg;
int r = -ENOTTY;
/*
* This union makes it completely explicit to gcc-3.x
* that these two variables' stack usage should be
* combined, not added together.
*/
union {
struct kvm_pit_state ps;
struct kvm_pit_state2 ps2;
struct kvm_pit_config pit_config;
} u;
switch (ioctl) {
case KVM_SET_TSS_ADDR:
r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
break;
case KVM_SET_IDENTITY_MAP_ADDR: {
u64 ident_addr;
r = -EFAULT;
if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
goto out;
r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
break;
}
case KVM_SET_NR_MMU_PAGES:
r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
break;
case KVM_GET_NR_MMU_PAGES:
r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
break;
case KVM_CREATE_IRQCHIP: {
struct kvm_pic *vpic;
mutex_lock(&kvm->lock);
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
r = -EINVAL;
if (atomic_read(&kvm->online_vcpus))
goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
r = kvm_ioapic_init(kvm);
if (r) {
mutex_lock(&kvm->slots_lock);
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
&vpic->dev_master);
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
&vpic->dev_slave);
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
&vpic->dev_eclr);
mutex_unlock(&kvm->slots_lock);
kfree(vpic);
goto create_irqchip_unlock;
}
} else
goto create_irqchip_unlock;
smp_wmb();
kvm->arch.vpic = vpic;
smp_wmb();
r = kvm_setup_default_irq_routing(kvm);
if (r) {
mutex_lock(&kvm->slots_lock);
mutex_lock(&kvm->irq_lock);
kvm_ioapic_destroy(kvm);
kvm_destroy_pic(kvm);
mutex_unlock(&kvm->irq_lock);
mutex_unlock(&kvm->slots_lock);
}
create_irqchip_unlock:
mutex_unlock(&kvm->lock);
break;
}
case KVM_CREATE_PIT:
u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
goto create_pit;
case KVM_CREATE_PIT2:
r = -EFAULT;
if (copy_from_user(&u.pit_config, argp,
sizeof(struct kvm_pit_config)))
goto out;
create_pit:
mutex_lock(&kvm->slots_lock);
r = -EEXIST;
if (kvm->arch.vpit)
goto create_pit_unlock;
r = -ENOMEM;
kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
if (kvm->arch.vpit)
r = 0;
create_pit_unlock:
mutex_unlock(&kvm->slots_lock);
break;
case KVM_GET_IRQCHIP: {
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
struct kvm_irqchip *chip;
chip = memdup_user(argp, sizeof(*chip));
if (IS_ERR(chip)) {
r = PTR_ERR(chip);
goto out;
}
r = -ENXIO;
if (!irqchip_in_kernel(kvm))
goto get_irqchip_out;
r = kvm_vm_ioctl_get_irqchip(kvm, chip);
if (r)
goto get_irqchip_out;
r = -EFAULT;
if (copy_to_user(argp, chip, sizeof *chip))
goto get_irqchip_out;
r = 0;
get_irqchip_out:
kfree(chip);
break;
}
case KVM_SET_IRQCHIP: {
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
struct kvm_irqchip *chip;
chip = memdup_user(argp, sizeof(*chip));
if (IS_ERR(chip)) {
r = PTR_ERR(chip);
goto out;
}
r = -ENXIO;
if (!irqchip_in_kernel(kvm))
goto set_irqchip_out;
r = kvm_vm_ioctl_set_irqchip(kvm, chip);
if (r)
goto set_irqchip_out;
r = 0;
set_irqchip_out:
kfree(chip);
break;
}
case KVM_GET_PIT: {
r = -EFAULT;
if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
goto out;
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
goto out;
r = 0;
break;
}
case KVM_SET_PIT: {
r = -EFAULT;
if (copy_from_user(&u.ps, argp, sizeof u.ps))
goto out;
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
break;
}
case KVM_GET_PIT2: {
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
goto out;
r = 0;
break;
}
case KVM_SET_PIT2: {
r = -EFAULT;
if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
goto out;
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
break;
}
case KVM_REINJECT_CONTROL: {
struct kvm_reinject_control control;
r = -EFAULT;
if (copy_from_user(&control, argp, sizeof(control)))
goto out;
r = kvm_vm_ioctl_reinject(kvm, &control);
break;
}
case KVM_XEN_HVM_CONFIG: {
r = -EFAULT;
if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
sizeof(struct kvm_xen_hvm_config)))
goto out;
r = -EINVAL;
if (kvm->arch.xen_hvm_config.flags)
goto out;
r = 0;
break;
}
case KVM_SET_CLOCK: {
struct kvm_clock_data user_ns;
u64 now_ns;
s64 delta;
r = -EFAULT;
if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
goto out;
r = -EINVAL;
if (user_ns.flags)
goto out;
r = 0;
local_irq_disable();
now_ns = get_kernel_ns();
delta = user_ns.clock - now_ns;
local_irq_enable();
kvm->arch.kvmclock_offset = delta;
break;
}
case KVM_GET_CLOCK: {
struct kvm_clock_data user_ns;
u64 now_ns;
local_irq_disable();
now_ns = get_kernel_ns();
user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
local_irq_enable();
user_ns.flags = 0;
memset(&user_ns.pad, 0, sizeof(user_ns.pad));
r = -EFAULT;
if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
goto out;
r = 0;
break;
}
default:
;
}
out:
return r;
}
static void kvm_init_msr_list(void)
{
u32 dummy[2];
unsigned i, j;
/* skip the first msrs in the list. KVM-specific */
for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) {
if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0)
continue;
if (j < i)
msrs_to_save[j] = msrs_to_save[i];
j++;
}
num_msrs_to_save = j;
}
static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
const void *v)
{
int handled = 0;
int n;
do {
n = min(len, 8);
if (!(vcpu->arch.apic &&
!kvm_iodevice_write(&vcpu->arch.apic->dev, addr, n, v))
&& kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
break;
handled += n;
addr += n;
len -= n;
v += n;
} while (len);
return handled;
}
static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
{
int handled = 0;
int n;
do {
n = min(len, 8);
if (!(vcpu->arch.apic &&
!kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
&& kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
break;
trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
handled += n;
addr += n;
len -= n;
v += n;
} while (len);
return handled;
}
static void kvm_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
kvm_x86_ops->set_segment(vcpu, var, seg);
}
void kvm_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
kvm_x86_ops->get_segment(vcpu, var, seg);
}
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
{
gpa_t t_gpa;
struct x86_exception exception;
BUG_ON(!mmu_is_nested(vcpu));
/* NPT walks are always user-walks */
access |= PFERR_USER_MASK;
t_gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gpa, access, &exception);
return t_gpa;
}
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
}
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_FETCH_MASK;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
}
gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
access |= PFERR_WRITE_MASK;
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
}
/* uses this to access any guest's mapped memory without checking CPL */
gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception)
{
return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, exception);
}
static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
struct kvm_vcpu *vcpu, u32 access,
struct x86_exception *exception)
{
void *data = val;
int r = X86EMUL_CONTINUE;
while (bytes) {
gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
exception);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
if (gpa == UNMAPPED_GVA)
return X86EMUL_PROPAGATE_FAULT;
ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
if (ret < 0) {
r = X86EMUL_IO_NEEDED;
goto out;
}
bytes -= toread;
data += toread;
addr += toread;
}
out:
return r;
}
/* used for instruction fetching */
static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
access | PFERR_FETCH_MASK,
exception);
}
int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
exception);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_virt);
static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val, unsigned int bytes,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
}
int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
gva_t addr, void *val,
unsigned int bytes,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
void *data = val;
int r = X86EMUL_CONTINUE;
while (bytes) {
gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
PFERR_WRITE_MASK,
exception);
unsigned offset = addr & (PAGE_SIZE-1);
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret;
if (gpa == UNMAPPED_GVA)
return X86EMUL_PROPAGATE_FAULT;
ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
if (ret < 0) {
r = X86EMUL_IO_NEEDED;
goto out;
}
bytes -= towrite;
data += towrite;
addr += towrite;
}
out:
return r;
}
EXPORT_SYMBOL_GPL(kvm_write_guest_virt_system);
static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
gpa_t *gpa, struct x86_exception *exception,
bool write)
{
u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0)
| (write ? PFERR_WRITE_MASK : 0);
if (vcpu_match_mmio_gva(vcpu, gva)
&& !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) {
*gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT |
(gva & (PAGE_SIZE - 1));
trace_vcpu_match_mmio(gva, *gpa, write, false);
return 1;
}
*gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception);
if (*gpa == UNMAPPED_GVA)
return -1;
/* For APIC access vmexit */
if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
return 1;
if (vcpu_match_mmio_gpa(vcpu, *gpa)) {
trace_vcpu_match_mmio(gva, *gpa, write, true);
return 1;
}
return 0;
}
int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes)
{
int ret;
ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
if (ret < 0)
return 0;
kvm_mmu_pte_write(vcpu, gpa, val, bytes);
return 1;
}
struct read_write_emulator_ops {
int (*read_write_prepare)(struct kvm_vcpu *vcpu, void *val,
int bytes);
int (*read_write_emulate)(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes);
int (*read_write_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
int bytes, void *val);
int (*read_write_exit_mmio)(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes);
bool write;
};
static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
{
if (vcpu->mmio_read_completed) {
trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
vcpu->mmio_fragments[0].gpa, *(u64 *)val);
vcpu->mmio_read_completed = 0;
return 1;
}
return 0;
}
static int read_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{
return !kvm_read_guest(vcpu->kvm, gpa, val, bytes);
}
static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{
return emulator_write_phys(vcpu, gpa, val, bytes);
}
static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
{
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
return vcpu_mmio_write(vcpu, gpa, bytes, val);
}
static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{
trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, bytes, gpa, 0);
return X86EMUL_IO_NEEDED;
}
static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{
struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len));
return X86EMUL_CONTINUE;
}
static const struct read_write_emulator_ops read_emultor = {
.read_write_prepare = read_prepare,
.read_write_emulate = read_emulate,
.read_write_mmio = vcpu_mmio_read,
.read_write_exit_mmio = read_exit_mmio,
};
static const struct read_write_emulator_ops write_emultor = {
.read_write_emulate = write_emulate,
.read_write_mmio = write_mmio,
.read_write_exit_mmio = write_exit_mmio,
.write = true,
};
static int emulator_read_write_onepage(unsigned long addr, void *val,
unsigned int bytes,
struct x86_exception *exception,
struct kvm_vcpu *vcpu,
const struct read_write_emulator_ops *ops)
{
gpa_t gpa;
int handled, ret;
bool write = ops->write;
struct kvm_mmio_fragment *frag;
ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
if (ret < 0)
return X86EMUL_PROPAGATE_FAULT;
/* For APIC access vmexit */
if (ret)
goto mmio;
if (ops->read_write_emulate(vcpu, gpa, val, bytes))
return X86EMUL_CONTINUE;
mmio:
/*
* Is this MMIO handled locally?
*/
handled = ops->read_write_mmio(vcpu, gpa, bytes, val);
if (handled == bytes)
return X86EMUL_CONTINUE;
gpa += handled;
bytes -= handled;
val += handled;
WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS);
frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
frag->gpa = gpa;
frag->data = val;
frag->len = bytes;
return X86EMUL_CONTINUE;
}
int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
void *val, unsigned int bytes,
struct x86_exception *exception,
const struct read_write_emulator_ops *ops)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
gpa_t gpa;
int rc;
if (ops->read_write_prepare &&
ops->read_write_prepare(vcpu, val, bytes))
return X86EMUL_CONTINUE;
vcpu->mmio_nr_fragments = 0;
/* Crossing a page boundary? */
if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
int now;
now = -addr & ~PAGE_MASK;
rc = emulator_read_write_onepage(addr, val, now, exception,
vcpu, ops);
if (rc != X86EMUL_CONTINUE)
return rc;
addr += now;
val += now;
bytes -= now;
}
rc = emulator_read_write_onepage(addr, val, bytes, exception,
vcpu, ops);
if (rc != X86EMUL_CONTINUE)
return rc;
if (!vcpu->mmio_nr_fragments)
return rc;
gpa = vcpu->mmio_fragments[0].gpa;
vcpu->mmio_needed = 1;
vcpu->mmio_cur_fragment = 0;
vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len);
vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
vcpu->run->exit_reason = KVM_EXIT_MMIO;
vcpu->run->mmio.phys_addr = gpa;
return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
}
static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
void *val,
unsigned int bytes,
struct x86_exception *exception)
{
return emulator_read_write(ctxt, addr, val, bytes,
exception, &read_emultor);
}
int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
const void *val,
unsigned int bytes,
struct x86_exception *exception)
{
return emulator_read_write(ctxt, addr, (void *)val, bytes,
exception, &write_emultor);
}
#define CMPXCHG_TYPE(t, ptr, old, new) \
(cmpxchg((t *)(ptr), *(t *)(old), *(t *)(new)) == *(t *)(old))
#ifdef CONFIG_X86_64
# define CMPXCHG64(ptr, old, new) CMPXCHG_TYPE(u64, ptr, old, new)
#else
# define CMPXCHG64(ptr, old, new) \
(cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
#endif
static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr,
const void *old,
const void *new,
unsigned int bytes,
struct x86_exception *exception)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
gpa_t gpa;
struct page *page;
char *kaddr;
bool exchanged;
/* guests cmpxchg8b have to be emulated atomically */
if (bytes > 8 || (bytes & (bytes - 1)))
goto emul_write;
gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
if (gpa == UNMAPPED_GVA ||
(gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
goto emul_write;
if (((gpa + bytes - 1) & PAGE_MASK) != (gpa & PAGE_MASK))
goto emul_write;
page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
if (is_error_page(page))
goto emul_write;
kaddr = kmap_atomic(page);
kaddr += offset_in_page(gpa);
switch (bytes) {
case 1:
exchanged = CMPXCHG_TYPE(u8, kaddr, old, new);
break;
case 2:
exchanged = CMPXCHG_TYPE(u16, kaddr, old, new);
break;
case 4:
exchanged = CMPXCHG_TYPE(u32, kaddr, old, new);
break;
case 8:
exchanged = CMPXCHG64(kaddr, old, new);
break;
default:
BUG();
}
kunmap_atomic(kaddr);
kvm_release_page_dirty(page);
if (!exchanged)
return X86EMUL_CMPXCHG_FAILED;
kvm_mmu_pte_write(vcpu, gpa, new, bytes);
return X86EMUL_CONTINUE;
emul_write:
printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
return emulator_write_emulated(ctxt, addr, new, bytes, exception);
}
static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
{
/* TODO: String I/O for in kernel device */
int r;
if (vcpu->arch.pio.in)
r = kvm_io_bus_read(vcpu->kvm, KVM_PIO_BUS, vcpu->arch.pio.port,
vcpu->arch.pio.size, pd);
else
r = kvm_io_bus_write(vcpu->kvm, KVM_PIO_BUS,
vcpu->arch.pio.port, vcpu->arch.pio.size,
pd);
return r;
}
static int emulator_pio_in_out(struct kvm_vcpu *vcpu, int size,
unsigned short port, void *val,
unsigned int count, bool in)
{
trace_kvm_pio(!in, port, size, count);
vcpu->arch.pio.port = port;
vcpu->arch.pio.in = in;
vcpu->arch.pio.count = count;
vcpu->arch.pio.size = size;
if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
vcpu->arch.pio.count = 0;
return 1;
}
vcpu->run->exit_reason = KVM_EXIT_IO;
vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
vcpu->run->io.size = size;
vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
vcpu->run->io.count = count;
vcpu->run->io.port = port;
return 0;
}
static int emulator_pio_in_emulated(struct x86_emulate_ctxt *ctxt,
int size, unsigned short port, void *val,
unsigned int count)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
int ret;
if (vcpu->arch.pio.count)
goto data_avail;
ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
if (ret) {
data_avail:
memcpy(val, vcpu->arch.pio_data, size * count);
vcpu->arch.pio.count = 0;
return 1;
}
return 0;
}
static int emulator_pio_out_emulated(struct x86_emulate_ctxt *ctxt,
int size, unsigned short port,
const void *val, unsigned int count)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
memcpy(vcpu->arch.pio_data, val, size * count);
return emulator_pio_in_out(vcpu, size, port, (void *)val, count, false);
}
static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
return kvm_x86_ops->get_segment_base(vcpu, seg);
}
static void emulator_invlpg(struct x86_emulate_ctxt *ctxt, ulong address)
{
kvm_mmu_invlpg(emul_to_vcpu(ctxt), address);
}
int kvm_emulate_wbinvd(struct kvm_vcpu *vcpu)
{
if (!need_emulate_wbinvd(vcpu))
return X86EMUL_CONTINUE;
if (kvm_x86_ops->has_wbinvd_exit()) {
int cpu = get_cpu();
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
wbinvd_ipi, NULL, 1);
put_cpu();
cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
} else
wbinvd();
return X86EMUL_CONTINUE;
}
EXPORT_SYMBOL_GPL(kvm_emulate_wbinvd);
static void emulator_wbinvd(struct x86_emulate_ctxt *ctxt)
{
kvm_emulate_wbinvd(emul_to_vcpu(ctxt));
}
int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
{
return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
}
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
{
return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
}
static u64 mk_cr_64(u64 curr_cr, u32 new_val)
{
return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
}
static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
unsigned long value;
switch (cr) {
case 0:
value = kvm_read_cr0(vcpu);
break;
case 2:
value = vcpu->arch.cr2;
break;
case 3:
value = kvm_read_cr3(vcpu);
break;
case 4:
value = kvm_read_cr4(vcpu);
break;
case 8:
value = kvm_get_cr8(vcpu);
break;
default:
kvm_err("%s: unexpected cr %u\n", __func__, cr);
return 0;
}
return value;
}
static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
int res = 0;
switch (cr) {
case 0:
res = kvm_set_cr0(vcpu, mk_cr_64(kvm_read_cr0(vcpu), val));
break;
case 2:
vcpu->arch.cr2 = val;
break;
case 3:
res = kvm_set_cr3(vcpu, val);
break;
case 4:
res = kvm_set_cr4(vcpu, mk_cr_64(kvm_read_cr4(vcpu), val));
break;
case 8:
res = kvm_set_cr8(vcpu, val);
break;
default:
kvm_err("%s: unexpected cr %u\n", __func__, cr);
res = -1;
}
return res;
}
static void emulator_set_rflags(struct x86_emulate_ctxt *ctxt, ulong val)
{
kvm_set_rflags(emul_to_vcpu(ctxt), val);
}
static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
{
return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
}
static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
}
static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
}
static void emulator_set_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
kvm_x86_ops->set_gdt(emul_to_vcpu(ctxt), dt);
}
static void emulator_set_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
{
kvm_x86_ops->set_idt(emul_to_vcpu(ctxt), dt);
}
static unsigned long emulator_get_cached_segment_base(
struct x86_emulate_ctxt *ctxt, int seg)
{
return get_segment_base(emul_to_vcpu(ctxt), seg);
}
static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
struct desc_struct *desc, u32 *base3,
int seg)
{
struct kvm_segment var;
kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
*selector = var.selector;
if (var.unusable) {
memset(desc, 0, sizeof(*desc));
return false;
}
if (var.g)
var.limit >>= 12;
set_desc_limit(desc, var.limit);
set_desc_base(desc, (unsigned long)var.base);
#ifdef CONFIG_X86_64
if (base3)
*base3 = var.base >> 32;
#endif
desc->type = var.type;
desc->s = var.s;
desc->dpl = var.dpl;
desc->p = var.present;
desc->avl = var.avl;
desc->l = var.l;
desc->d = var.db;
desc->g = var.g;
return true;
}
static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
struct desc_struct *desc, u32 base3,
int seg)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
struct kvm_segment var;
var.selector = selector;
var.base = get_desc_base(desc);
#ifdef CONFIG_X86_64
var.base |= ((u64)base3) << 32;
#endif
var.limit = get_desc_limit(desc);
if (desc->g)
var.limit = (var.limit << 12) | 0xfff;
var.type = desc->type;
var.present = desc->p;
var.dpl = desc->dpl;
var.db = desc->d;
var.s = desc->s;
var.l = desc->l;
var.g = desc->g;
var.avl = desc->avl;
var.present = desc->p;
var.unusable = !var.present;
var.padding = 0;
kvm_set_segment(vcpu, &var, seg);
return;
}
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 *pdata)
{
return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
}
static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 data)
{
struct msr_data msr;
msr.data = data;
msr.index = msr_index;
msr.host_initiated = false;
return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
}
static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
u32 pmc, u64 *pdata)
{
return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata);
}
static void emulator_halt(struct x86_emulate_ctxt *ctxt)
{
emul_to_vcpu(ctxt)->arch.halt_request = 1;
}
static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
{
preempt_disable();
kvm_load_guest_fpu(emul_to_vcpu(ctxt));
/*
* CR0.TS may reference the host fpu state, not the guest fpu state,
* so it may be clear at this point.
*/
clts();
}
static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
{
preempt_enable();
}
static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
}
static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
{
kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx);
}
static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg)
{
return kvm_register_read(emul_to_vcpu(ctxt), reg);
}
static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val)
{
kvm_register_write(emul_to_vcpu(ctxt), reg, val);
}
static const struct x86_emulate_ops emulate_ops = {
.read_gpr = emulator_read_gpr,
.write_gpr = emulator_write_gpr,
.read_std = kvm_read_guest_virt_system,
.write_std = kvm_write_guest_virt_system,
.fetch = kvm_fetch_guest_virt,
.read_emulated = emulator_read_emulated,
.write_emulated = emulator_write_emulated,
.cmpxchg_emulated = emulator_cmpxchg_emulated,
.invlpg = emulator_invlpg,
.pio_in_emulated = emulator_pio_in_emulated,
.pio_out_emulated = emulator_pio_out_emulated,
.get_segment = emulator_get_segment,
.set_segment = emulator_set_segment,
.get_cached_segment_base = emulator_get_cached_segment_base,
.get_gdt = emulator_get_gdt,
.get_idt = emulator_get_idt,
.set_gdt = emulator_set_gdt,
.set_idt = emulator_set_idt,
.get_cr = emulator_get_cr,
.set_cr = emulator_set_cr,
.set_rflags = emulator_set_rflags,
.cpl = emulator_get_cpl,
.get_dr = emulator_get_dr,
.set_dr = emulator_set_dr,
.set_msr = emulator_set_msr,
.get_msr = emulator_get_msr,
.read_pmc = emulator_read_pmc,
.halt = emulator_halt,
.wbinvd = emulator_wbinvd,
.fix_hypercall = emulator_fix_hypercall,
.get_fpu = emulator_get_fpu,
.put_fpu = emulator_put_fpu,
.intercept = emulator_intercept,
.get_cpuid = emulator_get_cpuid,
};
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
{
u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
/*
* an sti; sti; sequence only disable interrupts for the first
* instruction. So, if the last instruction, be it emulated or
* not, left the system with the INT_STI flag enabled, it
* means that the last instruction is an sti. We should not
* leave the flag on in this case. The same goes for mov ss
*/
if (!(int_shadow & mask))
kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
}
static void inject_emulated_exception(struct kvm_vcpu *vcpu)
{
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
if (ctxt->exception.vector == PF_VECTOR)
kvm_propagate_fault(vcpu, &ctxt->exception);
else if (ctxt->exception.error_code_valid)
kvm_queue_exception_e(vcpu, ctxt->exception.vector,
ctxt->exception.error_code);
else
kvm_queue_exception(vcpu, ctxt->exception.vector);
}
static void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
memset(&ctxt->twobyte, 0,
(void *)&ctxt->_regs - (void *)&ctxt->twobyte);
ctxt->fetch.start = 0;
ctxt->fetch.end = 0;
ctxt->io_read.pos = 0;
ctxt->io_read.end = 0;
ctxt->mem_read.pos = 0;
ctxt->mem_read.end = 0;
}
static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
{
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
int cs_db, cs_l;
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
ctxt->eflags = kvm_get_rflags(vcpu);
ctxt->eip = kvm_rip_read(vcpu);
ctxt->mode = (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
(ctxt->eflags & X86_EFLAGS_VM) ? X86EMUL_MODE_VM86 :
cs_l ? X86EMUL_MODE_PROT64 :
cs_db ? X86EMUL_MODE_PROT32 :
X86EMUL_MODE_PROT16;
ctxt->guest_mode = is_guest_mode(vcpu);
init_decode_cache(ctxt);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
}
int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
{
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
int ret;
init_emulate_ctxt(vcpu);
ctxt->op_bytes = 2;
ctxt->ad_bytes = 2;
ctxt->_eip = ctxt->eip + inc_eip;
ret = emulate_int_real(ctxt, irq);
if (ret != X86EMUL_CONTINUE)
return EMULATE_FAIL;
ctxt->eip = ctxt->_eip;
kvm_rip_write(vcpu, ctxt->eip);
kvm_set_rflags(vcpu, ctxt->eflags);
if (irq == NMI_VECTOR)
vcpu->arch.nmi_pending = 0;
else
vcpu->arch.interrupt.pending = false;
return EMULATE_DONE;
}
EXPORT_SYMBOL_GPL(kvm_inject_realmode_interrupt);
static int handle_emulation_failure(struct kvm_vcpu *vcpu)
{
int r = EMULATE_DONE;
++vcpu->stat.insn_emulation_fail;
trace_kvm_emulate_insn_failed(vcpu);
if (!is_guest_mode(vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
r = EMULATE_FAIL;
}
kvm_queue_exception(vcpu, UD_VECTOR);
return r;
}
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
bool write_fault_to_shadow_pgtable)
{
gpa_t gpa = cr2;
pfn_t pfn;
if (!vcpu->arch.mmu.direct_map) {
/*
* Write permission should be allowed since only
* write access need to be emulated.
*/
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
/*
* If the mapping is invalid in guest, let cpu retry
* it to generate fault.
*/
if (gpa == UNMAPPED_GVA)
return true;
}
/*
* Do not retry the unhandleable instruction if it faults on the
* readonly host memory, otherwise it will goto a infinite loop:
* retry instruction -> write #PF -> emulation fail -> retry
* instruction -> ...
*/
pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
/*
* If the instruction failed on the error pfn, it can not be fixed,
* report the error to userspace.
*/
if (is_error_noslot_pfn(pfn))
return false;
kvm_release_pfn_clean(pfn);
/* The instructions are well-emulated on direct mmu. */
if (vcpu->arch.mmu.direct_map) {
unsigned int indirect_shadow_pages;
spin_lock(&vcpu->kvm->mmu_lock);
indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
spin_unlock(&vcpu->kvm->mmu_lock);
if (indirect_shadow_pages)
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
return true;
}
/*
* if emulation was due to access to shadowed page table
* and it failed try to unshadow page and re-enter the
* guest to let CPU execute the instruction.
*/
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
/*
* If the access faults on its page table, it can not
* be fixed by unprotecting shadow page and it should
* be reported to userspace.
*/
return !write_fault_to_shadow_pgtable;
}
static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
unsigned long cr2, int emulation_type)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
last_retry_eip = vcpu->arch.last_retry_eip;
last_retry_addr = vcpu->arch.last_retry_addr;
/*
* If the emulation is caused by #PF and it is non-page_table
* writing instruction, it means the VM-EXIT is caused by shadow
* page protected, we can zap the shadow page and retry this
* instruction directly.
*
* Note: if the guest uses a non-page-table modifying instruction
* on the PDE that points to the instruction, then we will unmap
* the instruction and go to an infinite loop. So, we cache the
* last retried eip and the last fault address, if we meet the eip
* and the address again, we can break out of the potential infinite
* loop.
*/
vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
if (!(emulation_type & EMULTYPE_RETRY))
return false;
if (x86_page_table_writing_insn(ctxt))
return false;
if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
return false;
vcpu->arch.last_retry_eip = ctxt->eip;
vcpu->arch.last_retry_addr = cr2;
if (!vcpu->arch.mmu.direct_map)
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
return true;
}
static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
static int complete_emulated_pio(struct kvm_vcpu *vcpu);
int x86_emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2,
int emulation_type,
void *insn,
int insn_len)
{
int r;
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
bool writeback = true;
bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
/*
* Clear write_fault_to_shadow_pgtable here to ensure it is
* never reused.
*/
vcpu->arch.write_fault_to_shadow_pgtable = false;
kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
init_emulate_ctxt(vcpu);
ctxt->interruptibility = 0;
ctxt->have_exception = false;
ctxt->perm_ok = false;
ctxt->only_vendor_specific_insn
= emulation_type & EMULTYPE_TRAP_UD;
r = x86_decode_insn(ctxt, insn, insn_len);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
if (r != EMULATION_OK) {
if (emulation_type & EMULTYPE_TRAP_UD)
return EMULATE_FAIL;
if (reexecute_instruction(vcpu, cr2,
write_fault_to_spt))
return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL;
return handle_emulation_failure(vcpu);
}
}
if (emulation_type & EMULTYPE_SKIP) {
kvm_rip_write(vcpu, ctxt->_eip);
return EMULATE_DONE;
}
if (retry_instruction(ctxt, cr2, emulation_type))
return EMULATE_DONE;
/* this is needed for vmware backdoor interface to work since it
changes registers values during IO operation */
if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
emulator_invalidate_register_cache(ctxt);
}
restart:
r = x86_emulate_insn(ctxt);
if (r == EMULATION_INTERCEPTED)
return EMULATE_DONE;
if (r == EMULATION_FAILED) {
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt))
return EMULATE_DONE;
return handle_emulation_failure(vcpu);
}
if (ctxt->have_exception) {
inject_emulated_exception(vcpu);
r = EMULATE_DONE;
} else if (vcpu->arch.pio.count) {
if (!vcpu->arch.pio.in)
vcpu->arch.pio.count = 0;
else {
writeback = false;
vcpu->arch.complete_userspace_io = complete_emulated_pio;
}
r = EMULATE_DO_MMIO;
} else if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
writeback = false;
r = EMULATE_DO_MMIO;
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
} else if (r == EMULATION_RESTART)
goto restart;
else
r = EMULATE_DONE;
if (writeback) {
toggle_interruptibility(vcpu, ctxt->interruptibility);
kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
} else
vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
return r;
}
EXPORT_SYMBOL_GPL(x86_emulate_instruction);
int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port)
{
unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
size, port, &val, 1);
/* do not return to emulator after return from userspace */
vcpu->arch.pio.count = 0;
return ret;
}
EXPORT_SYMBOL_GPL(kvm_fast_pio_out);
static void tsc_bad(void *info)
{
__this_cpu_write(cpu_tsc_khz, 0);
}
static void tsc_khz_changed(void *data)
{
struct cpufreq_freqs *freq = data;
unsigned long khz = 0;
if (data)
khz = freq->new;
else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
khz = cpufreq_quick_get(raw_smp_processor_id());
if (!khz)
khz = tsc_khz;
__this_cpu_write(cpu_tsc_khz, khz);
}
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_freqs *freq = data;
struct kvm *kvm;
struct kvm_vcpu *vcpu;
int i, send_ipi = 0;
/*
* We allow guests to temporarily run on slowing clocks,
* provided we notify them after, or to run on accelerating
* clocks, provided we notify them before. Thus time never
* goes backwards.
*
* However, we have a problem. We can't atomically update
* the frequency of a given CPU from this function; it is
* merely a notifier, which can be called from any CPU.
* Changing the TSC frequency at arbitrary points in time
* requires a recomputation of local variables related to
* the TSC for each VCPU. We must flag these local variables
* to be updated and be sure the update takes place with the
* new frequency before any guests proceed.
*
* Unfortunately, the combination of hotplug CPU and frequency
* change creates an intractable locking scenario; the order
* of when these callouts happen is undefined with respect to
* CPU hotplug, and they can race with each other. As such,
* merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is
* undefined; you can actually have a CPU frequency change take
* place in between the computation of X and the setting of the
* variable. To protect against this problem, all updates of
* the per_cpu tsc_khz variable are done in an interrupt
* protected IPI, and all callers wishing to update the value
* must wait for a synchronous IPI to complete (which is trivial
* if the caller is on the CPU already). This establishes the
* necessary total order on variable updates.
*
* Note that because a guest time update may take place
* anytime after the setting of the VCPU's request bit, the
* correct TSC value must be set before the request. However,
* to ensure the update actually makes it to any guest which
* starts running in hardware virtualization between the set
* and the acquisition of the spinlock, we must also ping the
* CPU after setting the request bit.
*
*/
if (val == CPUFREQ_PRECHANGE && freq->old > freq->new)
return 0;
if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new)
return 0;
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (vcpu->cpu != freq->cpu)
continue;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
if (vcpu->cpu != smp_processor_id())
send_ipi = 1;
}
}
raw_spin_unlock(&kvm_lock);
if (freq->old < freq->new && send_ipi) {
/*
* We upscale the frequency. Must make the guest
* doesn't see old kvmclock values while running with
* the new frequency, otherwise we risk the guest sees
* time go backwards.
*
* In case we update the frequency for another cpu
* (which might be in guest context) send an interrupt
* to kick the cpu out of guest context. Next time
* guest context is entered kvmclock will be updated,
* so the guest will not see stale values.
*/
smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
}
return 0;
}
static struct notifier_block kvmclock_cpufreq_notifier_block = {
.notifier_call = kvmclock_cpufreq_notifier
};
static int kvmclock_cpu_notifier(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
switch (action) {
case CPU_ONLINE:
case CPU_DOWN_FAILED:
smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
break;
case CPU_DOWN_PREPARE:
smp_call_function_single(cpu, tsc_bad, NULL, 1);
break;
}
return NOTIFY_OK;
}
static struct notifier_block kvmclock_cpu_notifier_block = {
.notifier_call = kvmclock_cpu_notifier,
.priority = -INT_MAX
};
static void kvm_timer_init(void)
{
int cpu;
max_tsc_khz = tsc_khz;
register_hotcpu_notifier(&kvmclock_cpu_notifier_block);
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
#ifdef CONFIG_CPU_FREQ
struct cpufreq_policy policy;
memset(&policy, 0, sizeof(policy));
cpu = get_cpu();
cpufreq_get_policy(&policy, cpu);
if (policy.cpuinfo.max_freq)
max_tsc_khz = policy.cpuinfo.max_freq;
put_cpu();
#endif
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
}
pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz);
for_each_online_cpu(cpu)
smp_call_function_single(cpu, tsc_khz_changed, NULL, 1);
}
static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
int kvm_is_in_guest(void)
{
return __this_cpu_read(current_vcpu) != NULL;
}
static int kvm_is_user_mode(void)
{
int user_mode = 3;
if (__this_cpu_read(current_vcpu))
user_mode = kvm_x86_ops->get_cpl(__this_cpu_read(current_vcpu));
return user_mode != 0;
}
static unsigned long kvm_get_guest_ip(void)
{
unsigned long ip = 0;
if (__this_cpu_read(current_vcpu))
ip = kvm_rip_read(__this_cpu_read(current_vcpu));
return ip;
}
static struct perf_guest_info_callbacks kvm_guest_cbs = {
.is_in_guest = kvm_is_in_guest,
.is_user_mode = kvm_is_user_mode,
.get_guest_ip = kvm_get_guest_ip,
};
void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
{
__this_cpu_write(current_vcpu, vcpu);
}
EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
{
__this_cpu_write(current_vcpu, NULL);
}
EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
static void kvm_set_mmio_spte_mask(void)
{
u64 mask;
int maxphyaddr = boot_cpu_data.x86_phys_bits;
/*
* Set the reserved bits and the present bit of an paging-structure
* entry to generate page fault with PFER.RSV = 1.
*/
mask = ((1ull << (62 - maxphyaddr + 1)) - 1) << maxphyaddr;
mask |= 1ull;
#ifdef CONFIG_X86_64
/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if (maxphyaddr == 52)
mask &= ~1ull;
#endif
kvm_mmu_set_mmio_spte_mask(mask);
}
#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{
struct kvm *kvm;
struct kvm_vcpu *vcpu;
int i;
raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm)
set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests);
atomic_set(&kvm_guest_has_master_clock, 0);
raw_spin_unlock(&kvm_lock);
}
static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
/*
* Notification about pvclock gtod data update.
*/
static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused,
void *priv)
{
struct pvclock_gtod_data *gtod = &pvclock_gtod_data;
struct timekeeper *tk = priv;
update_pvclock_gtod(tk);
/* disable master clock if host does not trust, or does not
* use, TSC clocksource
*/
if (gtod->clock.vclock_mode != VCLOCK_TSC &&
atomic_read(&kvm_guest_has_master_clock) != 0)
queue_work(system_long_wq, &pvclock_gtod_work);
return 0;
}
static struct notifier_block pvclock_gtod_notifier = {
.notifier_call = pvclock_gtod_notify,
};
#endif
int kvm_arch_init(void *opaque)
{
int r;
struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
if (kvm_x86_ops) {
printk(KERN_ERR "kvm: already loaded the other module\n");
r = -EEXIST;
goto out;
}
if (!ops->cpu_has_kvm_support()) {
printk(KERN_ERR "kvm: no hardware support\n");
r = -EOPNOTSUPP;
goto out;
}
if (ops->disabled_by_bios()) {
printk(KERN_ERR "kvm: disabled by bios\n");
r = -EOPNOTSUPP;
goto out;
}
r = -ENOMEM;
shared_msrs = alloc_percpu(struct kvm_shared_msrs);
if (!shared_msrs) {
printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n");
goto out;
}
r = kvm_mmu_module_init();
if (r)
goto out_free_percpu;
kvm_set_mmio_spte_mask();
kvm_init_msr_list();
kvm_x86_ops = ops;
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
PT_DIRTY_MASK, PT64_NX_MASK, 0);
kvm_timer_init();
perf_register_guest_info_callbacks(&kvm_guest_cbs);
if (cpu_has_xsave)
host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
kvm_lapic_init();
#ifdef CONFIG_X86_64
pvclock_gtod_register_notifier(&pvclock_gtod_notifier);
#endif
return 0;
out_free_percpu:
free_percpu(shared_msrs);
out:
return r;
}
void kvm_arch_exit(void)
{
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block);
#ifdef CONFIG_X86_64
pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
#endif
kvm_x86_ops = NULL;
kvm_mmu_module_exit();
free_percpu(shared_msrs);
}
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
{
++vcpu->stat.halt_exits;
if (irqchip_in_kernel(vcpu->kvm)) {
vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
return 1;
} else {
vcpu->run->exit_reason = KVM_EXIT_HLT;
return 0;
}
}
EXPORT_SYMBOL_GPL(kvm_emulate_halt);
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{
u64 param, ingpa, outgpa, ret;
uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
bool fast, longmode;
int cs_db, cs_l;
/*
* hypercall generates UD from non zero cpl and real mode
* per HYPER-V spec
*/
if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 0;
}
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
longmode = is_long_mode(vcpu) && cs_l == 1;
if (!longmode) {
param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
(kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
(kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
(kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
}
#ifdef CONFIG_X86_64
else {
param = kvm_register_read(vcpu, VCPU_REGS_RCX);
ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
}
#endif
code = param & 0xffff;
fast = (param >> 16) & 0x1;
rep_cnt = (param >> 32) & 0xfff;
rep_idx = (param >> 48) & 0xfff;
trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
switch (code) {
case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
kvm_vcpu_on_spin(vcpu);
break;
default:
res = HV_STATUS_INVALID_HYPERCALL_CODE;
break;
}
ret = res | (((u64)rep_done & 0xfff) << 32);
if (longmode) {
kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
} else {
kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
}
return 1;
}
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
unsigned long nr, a0, a1, a2, a3, ret;
int r = 1;
if (kvm_hv_hypercall_enabled(vcpu->kvm))
return kvm_hv_hypercall(vcpu);
nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
trace_kvm_hypercall(nr, a0, a1, a2, a3);
if (!is_long_mode(vcpu)) {
nr &= 0xFFFFFFFF;
a0 &= 0xFFFFFFFF;
a1 &= 0xFFFFFFFF;
a2 &= 0xFFFFFFFF;
a3 &= 0xFFFFFFFF;
}
if (kvm_x86_ops->get_cpl(vcpu) != 0) {
ret = -KVM_EPERM;
goto out;
}
switch (nr) {
case KVM_HC_VAPIC_POLL_IRQ:
ret = 0;
break;
default:
ret = -KVM_ENOSYS;
break;
}
out:
kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
++vcpu->stat.hypercalls;
return r;
}
EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
char instruction[3];
unsigned long rip = kvm_rip_read(vcpu);
/*
* Blow out the MMU to ensure that no other VCPU has an active mapping
* to ensure that the updated hypercall appears atomically across all
* VCPUs.
*/
kvm_mmu_zap_all(vcpu->kvm);
kvm_x86_ops->patch_hypercall(vcpu, instruction);
return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
}
/*
* Check if userspace requested an interrupt window, and that the
* interrupt window is open.
*
* No need to exit to userspace if we already have an interrupt queued.
*/
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
{
return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
vcpu->run->request_interrupt_window &&
kvm_arch_interrupt_allowed(vcpu));
}
static void post_kvm_run_save(struct kvm_vcpu *vcpu)
{
struct kvm_run *kvm_run = vcpu->run;
kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
kvm_run->cr8 = kvm_get_cr8(vcpu);
kvm_run->apic_base = kvm_get_apic_base(vcpu);
if (irqchip_in_kernel(vcpu->kvm))
kvm_run->ready_for_interrupt_injection = 1;
else
kvm_run->ready_for_interrupt_injection =
kvm_arch_interrupt_allowed(vcpu) &&
!kvm_cpu_has_interrupt(vcpu) &&
!kvm_event_needs_reinjection(vcpu);
}
static int vapic_enter(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct page *page;
if (!apic || !apic->vapic_addr)
return 0;
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
if (is_error_page(page))
return -EFAULT;
vcpu->arch.apic->vapic_page = page;
return 0;
}
static void vapic_exit(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int idx;
if (!apic || !apic->vapic_addr)
return;
idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_release_page_dirty(apic->vapic_page);
mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
{
int max_irr, tpr;
if (!kvm_x86_ops->update_cr8_intercept)
return;
if (!vcpu->arch.apic)
return;
if (!vcpu->arch.apic->vapic_addr)
max_irr = kvm_lapic_find_highest_irr(vcpu);
else
max_irr = -1;
if (max_irr != -1)
max_irr >>= 4;
tpr = kvm_lapic_get_cr8(vcpu);
kvm_x86_ops->update_cr8_intercept(vcpu, tpr, max_irr);
}
static void inject_pending_event(struct kvm_vcpu *vcpu)
{
/* try to reinject previous events if any */
if (vcpu->arch.exception.pending) {
trace_kvm_inj_exception(vcpu->arch.exception.nr,
vcpu->arch.exception.has_error_code,
vcpu->arch.exception.error_code);
kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
vcpu->arch.exception.has_error_code,
vcpu->arch.exception.error_code,
vcpu->arch.exception.reinject);
return;
}
if (vcpu->arch.nmi_injected) {
kvm_x86_ops->set_nmi(vcpu);
return;
}
if (vcpu->arch.interrupt.pending) {
kvm_x86_ops->set_irq(vcpu);
return;
}
/* try to inject new event if pending */
if (vcpu->arch.nmi_pending) {
if (kvm_x86_ops->nmi_allowed(vcpu)) {
--vcpu->arch.nmi_pending;
vcpu->arch.nmi_injected = true;
kvm_x86_ops->set_nmi(vcpu);
}
} else if (kvm_cpu_has_injectable_intr(vcpu)) {
if (kvm_x86_ops->interrupt_allowed(vcpu)) {
kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
false);
kvm_x86_ops->set_irq(vcpu);
}
}
}
static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
{
if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
!vcpu->guest_xcr0_loaded) {
/* kvm_set_xcr() also depends on this */
xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
vcpu->guest_xcr0_loaded = 1;
}
}
static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu)
{
if (vcpu->guest_xcr0_loaded) {
if (vcpu->arch.xcr0 != host_xcr0)
xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0);
vcpu->guest_xcr0_loaded = 0;
}
}
static void process_nmi(struct kvm_vcpu *vcpu)
{
unsigned limit = 2;
/*
* x86 is limited to one NMI running, and one NMI pending after it.
* If an NMI is already in progress, limit further NMIs to just one.
* Otherwise, allow two (and we'll inject the first one immediately).
*/
if (kvm_x86_ops->get_nmi_mask(vcpu) || vcpu->arch.nmi_injected)
limit = 1;
vcpu->arch.nmi_pending += atomic_xchg(&vcpu->arch.nmi_queued, 0);
vcpu->arch.nmi_pending = min(vcpu->arch.nmi_pending, limit);
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
static void kvm_gen_update_masterclock(struct kvm *kvm)
{
#ifdef CONFIG_X86_64
int i;
struct kvm_vcpu *vcpu;
struct kvm_arch *ka = &kvm->arch;
spin_lock(&ka->pvclock_gtod_sync_lock);
kvm_make_mclock_inprogress_request(kvm);
/* no guest entries from this point */
pvclock_update_vm_gtod_copy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm)
set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
/* guest entries allowed */
kvm_for_each_vcpu(i, vcpu, kvm)
clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);
spin_unlock(&ka->pvclock_gtod_sync_lock);
#endif
}
static void update_eoi_exitmap(struct kvm_vcpu *vcpu)
{
u64 eoi_exit_bitmap[4];
memset(eoi_exit_bitmap, 0, 32);
kvm_ioapic_calculate_eoi_exitmap(vcpu, eoi_exit_bitmap);
kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
}
static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
vcpu->run->request_interrupt_window;
bool req_immediate_exit = 0;
if (vcpu->requests) {
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
kvm_mmu_unload(vcpu);
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
__kvm_migrate_timers(vcpu);
if (kvm_check_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu))
kvm_gen_update_masterclock(vcpu->kvm);
if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
r = kvm_guest_time_update(vcpu);
if (unlikely(r))
goto out;
}
if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
kvm_mmu_sync_roots(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvm_x86_ops->tlb_flush(vcpu);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
r = 0;
goto out;
}
if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
r = 0;
goto out;
}
if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
vcpu->fpu_active = 0;
kvm_x86_ops->fpu_deactivate(vcpu);
}
if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
/* Page is swapped out. Do synthetic halt */
vcpu->arch.apf.halted = true;
r = 1;
goto out;
}
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
record_steal_time(vcpu);
if (kvm_check_request(KVM_REQ_NMI, vcpu))
process_nmi(vcpu);
req_immediate_exit =
kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
if (kvm_check_request(KVM_REQ_PMU, vcpu))
kvm_handle_pmu_event(vcpu);
if (kvm_check_request(KVM_REQ_PMI, vcpu))
kvm_deliver_pmi(vcpu);
if (kvm_check_request(KVM_REQ_EOIBITMAP, vcpu))
update_eoi_exitmap(vcpu);
}
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
inject_pending_event(vcpu);
/* enable NMI/IRQ window open exits if needed */
if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu);
else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
kvm_x86_ops->enable_irq_window(vcpu);
if (kvm_lapic_enabled(vcpu)) {
/*
* Update architecture specific hints for APIC
* virtual interrupt delivery.
*/
if (kvm_x86_ops->hwapic_irr_update)
kvm_x86_ops->hwapic_irr_update(vcpu,
kvm_lapic_find_highest_irr(vcpu));
update_cr8_intercept(vcpu);
kvm_lapic_sync_to_vapic(vcpu);
}
}
r = kvm_mmu_reload(vcpu);
if (unlikely(r)) {
goto cancel_injection;
}
preempt_disable();
kvm_x86_ops->prepare_guest_switch(vcpu);
if (vcpu->fpu_active)
kvm_load_guest_fpu(vcpu);
kvm_load_guest_xcr0(vcpu);
vcpu->mode = IN_GUEST_MODE;
/* We should set ->mode before check ->requests,
* see the comment in make_all_cpus_request.
*/
smp_mb();
local_irq_disable();
if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
|| need_resched() || signal_pending(current)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
local_irq_enable();
preempt_enable();
r = 1;
goto cancel_injection;
}
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (req_immediate_exit)
smp_send_reschedule(vcpu->cpu);
kvm_guest_enter();
if (unlikely(vcpu->arch.switch_db_regs)) {
set_debugreg(0, 7);
set_debugreg(vcpu->arch.eff_db[0], 0);
set_debugreg(vcpu->arch.eff_db[1], 1);
set_debugreg(vcpu->arch.eff_db[2], 2);
set_debugreg(vcpu->arch.eff_db[3], 3);
}
trace_kvm_entry(vcpu->vcpu_id);
kvm_x86_ops->run(vcpu);
/*
* If the guest has used debug registers, at least dr7
* will be disabled while returning to the host.
* If we don't have active breakpoints in the host, we don't
* care about the messed up debug address registers. But if
* we have some of them active, restore the old state.
*/
if (hw_breakpoint_active())
hw_breakpoint_restore();
vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu,
native_read_tsc());
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
local_irq_enable();
++vcpu->stat.exits;
/*
* We must have an instruction between local_irq_enable() and
* kvm_guest_exit(), so the timer interrupt isn't delayed by
* the interrupt shadow. The stat.exits increment will do nicely.
* But we need to prevent reordering, hence this barrier():
*/
barrier();
kvm_guest_exit();
preempt_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
/*
* Profile KVM exit RIPs:
*/
if (unlikely(prof_on == KVM_PROFILING)) {
unsigned long rip = kvm_rip_read(vcpu);
profile_hit(KVM_PROFILING, (void *)rip);
}
if (unlikely(vcpu->arch.tsc_always_catchup))
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
if (vcpu->arch.apic_attention)
kvm_lapic_sync_from_vapic(vcpu);
r = kvm_x86_ops->handle_exit(vcpu);
return r;
cancel_injection:
kvm_x86_ops->cancel_injection(vcpu);
if (unlikely(vcpu->arch.apic_attention))
kvm_lapic_sync_from_vapic(vcpu);
out:
return r;
}
static int __vcpu_run(struct kvm_vcpu *vcpu)
{
int r;
struct kvm *kvm = vcpu->kvm;
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
pr_debug("vcpu %d received sipi with vector # %x\n",
vcpu->vcpu_id, vcpu->arch.sipi_vector);
kvm_lapic_reset(vcpu);
r = kvm_vcpu_reset(vcpu);
if (r)
return r;
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
r = vapic_enter(vcpu);
if (r) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
return r;
}
r = 1;
while (r > 0) {
if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted)
r = vcpu_enter_guest(vcpu);
else {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_block(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
{
switch(vcpu->arch.mp_state) {
case KVM_MP_STATE_HALTED:
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
case KVM_MP_STATE_RUNNABLE:
vcpu->arch.apf.halted = false;
break;
case KVM_MP_STATE_SIPI_RECEIVED:
default:
r = -EINTR;
break;
}
}
}
if (r <= 0)
break;
clear_bit(KVM_REQ_PENDING_TIMER, &vcpu->requests);
if (kvm_cpu_has_pending_timer(vcpu))
kvm_inject_pending_timer_irqs(vcpu);
if (dm_request_for_irq_injection(vcpu)) {
r = -EINTR;
vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.request_irq_exits;
}
kvm_check_async_pf_completion(vcpu);
if (signal_pending(current)) {
r = -EINTR;
vcpu->run->exit_reason = KVM_EXIT_INTR;
++vcpu->stat.signal_exits;
}
if (need_resched()) {
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_resched(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
}
}
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
vapic_exit(vcpu);
return r;
}
static inline int complete_emulated_io(struct kvm_vcpu *vcpu)
{
int r;
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (r != EMULATE_DONE)
return 0;
return 1;
}
static int complete_emulated_pio(struct kvm_vcpu *vcpu)
{
BUG_ON(!vcpu->arch.pio.count);
return complete_emulated_io(vcpu);
}
/*
* Implements the following, as a state machine:
*
* read:
* for each fragment
* for each mmio piece in the fragment
* write gpa, len
* exit
* copy data
* execute insn
*
* write:
* for each fragment
* for each mmio piece in the fragment
* write gpa, len
* copy data
* exit
*/
static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
struct kvm_mmio_fragment *frag;
unsigned len;
BUG_ON(!vcpu->mmio_needed);
/* Complete previous fragment */
frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment];
len = min(8u, frag->len);
if (!vcpu->mmio_is_write)
memcpy(frag->data, run->mmio.data, len);
if (frag->len <= 8) {
/* Switch to the next fragment. */
frag++;
vcpu->mmio_cur_fragment++;
} else {
/* Go forward to the next mmio piece. */
frag->data += len;
frag->gpa += len;
frag->len -= len;
}
if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
vcpu->mmio_needed = 0;
if (vcpu->mmio_is_write)
return 1;
vcpu->mmio_read_completed = 1;
return complete_emulated_io(vcpu);
}
run->exit_reason = KVM_EXIT_MMIO;
run->mmio.phys_addr = frag->gpa;
if (vcpu->mmio_is_write)
memcpy(run->mmio.data, frag->data, min(8u, frag->len));
run->mmio.len = min(8u, frag->len);
run->mmio.is_write = vcpu->mmio_is_write;
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
return 0;
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int r;
sigset_t sigsaved;
if (!tsk_used_math(current) && init_fpu(current))
return -ENOMEM;
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
r = -EAGAIN;
goto out;
}
/* re-sync apic's tpr */
if (!irqchip_in_kernel(vcpu->kvm)) {
if (kvm_set_cr8(vcpu, kvm_run->cr8) != 0) {
r = -EINVAL;
goto out;
}
}
if (unlikely(vcpu->arch.complete_userspace_io)) {
int (*cui)(struct kvm_vcpu *) = vcpu->arch.complete_userspace_io;
vcpu->arch.complete_userspace_io = NULL;
r = cui(vcpu);
if (r <= 0)
goto out;
} else
WARN_ON(vcpu->arch.pio.count || vcpu->mmio_needed);
r = __vcpu_run(vcpu);
out:
post_kvm_run_save(vcpu);
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);
return r;
}
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
/*
* We are here if userspace calls get_regs() in the middle of
* instruction emulation. Registers state needs to be copied
* back from emulation context to vcpu. Userspace shouldn't do
* that usually, but some bad designed PV devices (vmware
* backdoor interface) need this to work
*/
emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
}
regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
regs->rbx = kvm_register_read(vcpu, VCPU_REGS_RBX);
regs->rcx = kvm_register_read(vcpu, VCPU_REGS_RCX);
regs->rdx = kvm_register_read(vcpu, VCPU_REGS_RDX);
regs->rsi = kvm_register_read(vcpu, VCPU_REGS_RSI);
regs->rdi = kvm_register_read(vcpu, VCPU_REGS_RDI);
regs->rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
regs->rbp = kvm_register_read(vcpu, VCPU_REGS_RBP);
#ifdef CONFIG_X86_64
regs->r8 = kvm_register_read(vcpu, VCPU_REGS_R8);
regs->r9 = kvm_register_read(vcpu, VCPU_REGS_R9);
regs->r10 = kvm_register_read(vcpu, VCPU_REGS_R10);
regs->r11 = kvm_register_read(vcpu, VCPU_REGS_R11);
regs->r12 = kvm_register_read(vcpu, VCPU_REGS_R12);
regs->r13 = kvm_register_read(vcpu, VCPU_REGS_R13);
regs->r14 = kvm_register_read(vcpu, VCPU_REGS_R14);
regs->r15 = kvm_register_read(vcpu, VCPU_REGS_R15);
#endif
regs->rip = kvm_rip_read(vcpu);
regs->rflags = kvm_get_rflags(vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_register_write(vcpu, VCPU_REGS_RAX, regs->rax);
kvm_register_write(vcpu, VCPU_REGS_RBX, regs->rbx);
kvm_register_write(vcpu, VCPU_REGS_RCX, regs->rcx);
kvm_register_write(vcpu, VCPU_REGS_RDX, regs->rdx);
kvm_register_write(vcpu, VCPU_REGS_RSI, regs->rsi);
kvm_register_write(vcpu, VCPU_REGS_RDI, regs->rdi);
kvm_register_write(vcpu, VCPU_REGS_RSP, regs->rsp);
kvm_register_write(vcpu, VCPU_REGS_RBP, regs->rbp);
#ifdef CONFIG_X86_64
kvm_register_write(vcpu, VCPU_REGS_R8, regs->r8);
kvm_register_write(vcpu, VCPU_REGS_R9, regs->r9);
kvm_register_write(vcpu, VCPU_REGS_R10, regs->r10);
kvm_register_write(vcpu, VCPU_REGS_R11, regs->r11);
kvm_register_write(vcpu, VCPU_REGS_R12, regs->r12);
kvm_register_write(vcpu, VCPU_REGS_R13, regs->r13);
kvm_register_write(vcpu, VCPU_REGS_R14, regs->r14);
kvm_register_write(vcpu, VCPU_REGS_R15, regs->r15);
#endif
kvm_rip_write(vcpu, regs->rip);
kvm_set_rflags(vcpu, regs->rflags);
vcpu->arch.exception.pending = false;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0;
}
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
{
struct kvm_segment cs;
kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
*db = cs.db;
*l = cs.l;
}
EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
struct desc_ptr dt;
kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
kvm_get_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
kvm_get_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
kvm_get_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
kvm_get_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
kvm_x86_ops->get_idt(vcpu, &dt);
sregs->idt.limit = dt.size;
sregs->idt.base = dt.address;
kvm_x86_ops->get_gdt(vcpu, &dt);
sregs->gdt.limit = dt.size;
sregs->gdt.base = dt.address;
sregs->cr0 = kvm_read_cr0(vcpu);
sregs->cr2 = vcpu->arch.cr2;
sregs->cr3 = kvm_read_cr3(vcpu);
sregs->cr4 = kvm_read_cr4(vcpu);
sregs->cr8 = kvm_get_cr8(vcpu);
sregs->efer = vcpu->arch.efer;
sregs->apic_base = kvm_get_apic_base(vcpu);
memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
if (vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft)
set_bit(vcpu->arch.interrupt.nr,
(unsigned long *)sregs->interrupt_bitmap);
return 0;
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
mp_state->mp_state = vcpu->arch.mp_state;
return 0;
}
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
vcpu->arch.mp_state = mp_state->mp_state;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0;
}
int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
int reason, bool has_error_code, u32 error_code)
{
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
int ret;
init_emulate_ctxt(vcpu);
ret = emulator_task_switch(ctxt, tss_selector, idt_index, reason,
has_error_code, error_code);
if (ret)
return EMULATE_FAIL;
kvm_rip_write(vcpu, ctxt->eip);
kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
return EMULATE_DONE;
}
EXPORT_SYMBOL_GPL(kvm_task_switch);
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
struct kvm_sregs *sregs)
{
int mmu_reset_needed = 0;
int pending_vec, max_bits, idx;
struct desc_ptr dt;
if (!guest_cpuid_has_xsave(vcpu) && (sregs->cr4 & X86_CR4_OSXSAVE))
return -EINVAL;
dt.size = sregs->idt.limit;
dt.address = sregs->idt.base;
kvm_x86_ops->set_idt(vcpu, &dt);
dt.size = sregs->gdt.limit;
dt.address = sregs->gdt.base;
kvm_x86_ops->set_gdt(vcpu, &dt);
vcpu->arch.cr2 = sregs->cr2;
mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3;
vcpu->arch.cr3 = sregs->cr3;
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
kvm_set_cr8(vcpu, sregs->cr8);
mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
kvm_x86_ops->set_efer(vcpu, sregs->efer);
kvm_set_apic_base(vcpu, sregs->apic_base);
mmu_reset_needed |= kvm_read_cr0(vcpu) != sregs->cr0;
kvm_x86_ops->set_cr0(vcpu, sregs->cr0);
vcpu->arch.cr0 = sregs->cr0;
mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4;
kvm_x86_ops->set_cr4(vcpu, sregs->cr4);
if (sregs->cr4 & X86_CR4_OSXSAVE)
kvm_update_cpuid(vcpu);
idx = srcu_read_lock(&vcpu->kvm->srcu);
if (!is_long_mode(vcpu) && is_pae(vcpu)) {
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
mmu_reset_needed = 1;
}
srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (mmu_reset_needed)
kvm_mmu_reset_context(vcpu);
max_bits = KVM_NR_INTERRUPTS;
pending_vec = find_first_bit(
(const unsigned long *)sregs->interrupt_bitmap, max_bits);
if (pending_vec < max_bits) {
kvm_queue_interrupt(vcpu, pending_vec, false);
pr_debug("Set back pending irq %d\n", pending_vec);
}
kvm_set_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
kvm_set_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
kvm_set_segment(vcpu, &sregs->es, VCPU_SREG_ES);
kvm_set_segment(vcpu, &sregs->fs, VCPU_SREG_FS);
kvm_set_segment(vcpu, &sregs->gs, VCPU_SREG_GS);
kvm_set_segment(vcpu, &sregs->ss, VCPU_SREG_SS);
kvm_set_segment(vcpu, &sregs->tr, VCPU_SREG_TR);
kvm_set_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
update_cr8_intercept(vcpu);
/* Older userspace won't unhalt the vcpu on reset. */
if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
!is_protmode(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0;
}
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
unsigned long rflags;
int i, r;
if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
r = -EBUSY;
if (vcpu->arch.exception.pending)
goto out;
if (dbg->control & KVM_GUESTDBG_INJECT_DB)
kvm_queue_exception(vcpu, DB_VECTOR);
else
kvm_queue_exception(vcpu, BP_VECTOR);
}
/*
* Read rflags as long as potentially injected trace flags are still
* filtered out.
*/
rflags = kvm_get_rflags(vcpu);
vcpu->guest_debug = dbg->control;
if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
vcpu->guest_debug = 0;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
for (i = 0; i < KVM_NR_DB_REGS; ++i)
vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
} else {
for (i = 0; i < KVM_NR_DB_REGS; i++)
vcpu->arch.eff_db[i] = vcpu->arch.db[i];
}
kvm_update_dr7(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
get_segment_base(vcpu, VCPU_SREG_CS);
/*
* Trigger an rflags update that will inject or remove the trace
* flags.
*/
kvm_set_rflags(vcpu, rflags);
kvm_x86_ops->update_db_bp_intercept(vcpu);
r = 0;
out:
return r;
}
/*
* Translate a guest virtual address to a guest physical address.
*/
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
struct kvm_translation *tr)
{
unsigned long vaddr = tr->linear_address;
gpa_t gpa;
int idx;
idx = srcu_read_lock(&vcpu->kvm->srcu);
gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
tr->physical_address = gpa;
tr->valid = gpa != UNMAPPED_GVA;
tr->writeable = 1;
tr->usermode = 0;
return 0;
}
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
struct i387_fxsave_struct *fxsave =
&vcpu->arch.guest_fpu.state->fxsave;
memcpy(fpu->fpr, fxsave->st_space, 128);
fpu->fcw = fxsave->cwd;
fpu->fsw = fxsave->swd;
fpu->ftwx = fxsave->twd;
fpu->last_opcode = fxsave->fop;
fpu->last_ip = fxsave->rip;
fpu->last_dp = fxsave->rdp;
memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
return 0;
}
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
{
struct i387_fxsave_struct *fxsave =
&vcpu->arch.guest_fpu.state->fxsave;
memcpy(fxsave->st_space, fpu->fpr, 128);
fxsave->cwd = fpu->fcw;
fxsave->swd = fpu->fsw;
fxsave->twd = fpu->ftwx;
fxsave->fop = fpu->last_opcode;
fxsave->rip = fpu->last_ip;
fxsave->rdp = fpu->last_dp;
memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
return 0;
}
int fx_init(struct kvm_vcpu *vcpu)
{
int err;
err = fpu_alloc(&vcpu->arch.guest_fpu);
if (err)
return err;
fpu_finit(&vcpu->arch.guest_fpu);
/*
* Ensure guest xcr0 is valid for loading
*/
vcpu->arch.xcr0 = XSTATE_FP;
vcpu->arch.cr0 |= X86_CR0_ET;
return 0;
}
EXPORT_SYMBOL_GPL(fx_init);
static void fx_free(struct kvm_vcpu *vcpu)
{
fpu_free(&vcpu->arch.guest_fpu);
}
void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
if (vcpu->guest_fpu_loaded)
return;
/*
* Restore all possible states in the guest,
* and assume host would use all available bits.
* Guest xcr0 would be loaded later.
*/
kvm_put_guest_xcr0(vcpu);
vcpu->guest_fpu_loaded = 1;
__kernel_fpu_begin();
fpu_restore_checking(&vcpu->arch.guest_fpu);
trace_kvm_fpu(1);
}
void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
{
kvm_put_guest_xcr0(vcpu);
if (!vcpu->guest_fpu_loaded)
return;
vcpu->guest_fpu_loaded = 0;
fpu_save_init(&vcpu->arch.guest_fpu);
__kernel_fpu_end();
++vcpu->stat.fpu_reload;
kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
trace_kvm_fpu(0);
}
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
kvmclock_reset(vcpu);
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
fx_free(vcpu);
kvm_x86_ops->vcpu_free(vcpu);
}
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
unsigned int id)
{
if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
printk_once(KERN_WARNING
"kvm: SMP vm created on host with unstable TSC; "
"guest TSC will not be reliable\n");
return kvm_x86_ops->vcpu_create(kvm, id);
}
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
{
int r;
vcpu->arch.mtrr_state.have_fixed = 1;
r = vcpu_load(vcpu);
if (r)
return r;
r = kvm_vcpu_reset(vcpu);
if (r == 0)
r = kvm_mmu_setup(vcpu);
vcpu_put(vcpu);
return r;
}
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
int r;
struct msr_data msr;
r = vcpu_load(vcpu);
if (r)
return r;
msr.data = 0x0;
msr.index = MSR_IA32_TSC;
msr.host_initiated = true;
kvm_write_tsc(vcpu, &msr);
vcpu_put(vcpu);
return r;
}
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
{
int r;
vcpu->arch.apf.msr_val = 0;
r = vcpu_load(vcpu);
BUG_ON(r);
kvm_mmu_unload(vcpu);
vcpu_put(vcpu);
fx_free(vcpu);
kvm_x86_ops->vcpu_free(vcpu);
}
static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
{
atomic_set(&vcpu->arch.nmi_queued, 0);
vcpu->arch.nmi_pending = 0;
vcpu->arch.nmi_injected = false;
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
vcpu->arch.dr6 = DR6_FIXED_1;
vcpu->arch.dr7 = DR7_FIXED_1;
kvm_update_dr7(vcpu);
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.apf.msr_val = 0;
vcpu->arch.st.msr_val = 0;
kvmclock_reset(vcpu);
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
vcpu->arch.apf.halted = false;
kvm_pmu_reset(vcpu);
memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
vcpu->arch.regs_avail = ~0;
vcpu->arch.regs_dirty = ~0;
return kvm_x86_ops->vcpu_reset(vcpu);
}
int kvm_arch_hardware_enable(void *garbage)
{
struct kvm *kvm;
struct kvm_vcpu *vcpu;
int i;
int ret;
u64 local_tsc;
u64 max_tsc = 0;
bool stable, backwards_tsc = false;
kvm_shared_msr_cpu_online();
ret = kvm_x86_ops->hardware_enable(garbage);
if (ret != 0)
return ret;
local_tsc = native_read_tsc();
stable = !check_tsc_unstable();
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
if (!stable && vcpu->cpu == smp_processor_id())
set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
if (stable && vcpu->arch.last_host_tsc > local_tsc) {
backwards_tsc = true;
if (vcpu->arch.last_host_tsc > max_tsc)
max_tsc = vcpu->arch.last_host_tsc;
}
}
}
/*
* Sometimes, even reliable TSCs go backwards. This happens on
* platforms that reset TSC during suspend or hibernate actions, but
* maintain synchronization. We must compensate. Fortunately, we can
* detect that condition here, which happens early in CPU bringup,
* before any KVM threads can be running. Unfortunately, we can't
* bring the TSCs fully up to date with real time, as we aren't yet far
* enough into CPU bringup that we know how much real time has actually
* elapsed; our helper function, get_kernel_ns() will be using boot
* variables that haven't been updated yet.
*
* So we simply find the maximum observed TSC above, then record the
* adjustment to TSC in each VCPU. When the VCPU later gets loaded,
* the adjustment will be applied. Note that we accumulate
* adjustments, in case multiple suspend cycles happen before some VCPU
* gets a chance to run again. In the event that no KVM threads get a
* chance to run, we will miss the entire elapsed period, as we'll have
* reset last_host_tsc, so VCPUs will not have the TSC adjusted and may
* loose cycle time. This isn't too big a deal, since the loss will be
* uniform across all VCPUs (not to mention the scenario is extremely
* unlikely). It is possible that a second hibernate recovery happens
* much faster than a first, causing the observed TSC here to be
* smaller; this would require additional padding adjustment, which is
* why we set last_host_tsc to the local tsc observed here.
*
* N.B. - this code below runs only on platforms with reliable TSC,
* as that is the only way backwards_tsc is set above. Also note
* that this runs for ALL vcpus, which is not a bug; all VCPUs should
* have the same delta_cyc adjustment applied if backwards_tsc
* is detected. Note further, this adjustment is only done once,
* as we reset last_host_tsc on all VCPUs to stop this from being
* called multiple times (one for each physical CPU bringup).
*
* Platforms with unreliable TSCs don't have to deal with this, they
* will be compensated by the logic in vcpu_load, which sets the TSC to
* catchup mode. This will catchup all VCPUs to real time, but cannot
* guarantee that they stay in perfect synchronization.
*/
if (backwards_tsc) {
u64 delta_cyc = max_tsc - local_tsc;
list_for_each_entry(kvm, &vm_list, vm_list) {
kvm_for_each_vcpu(i, vcpu, kvm) {
vcpu->arch.tsc_offset_adjustment += delta_cyc;
vcpu->arch.last_host_tsc = local_tsc;
set_bit(KVM_REQ_MASTERCLOCK_UPDATE,
&vcpu->requests);
}
/*
* We have to disable TSC offset matching.. if you were
* booting a VM while issuing an S4 host suspend....
* you may have some problem. Solving this issue is
* left as an exercise to the reader.
*/
kvm->arch.last_tsc_nsec = 0;
kvm->arch.last_tsc_write = 0;
}
}
return 0;
}
void kvm_arch_hardware_disable(void *garbage)
{
kvm_x86_ops->hardware_disable(garbage);
drop_user_return_notifiers(garbage);
}
int kvm_arch_hardware_setup(void)
{
return kvm_x86_ops->hardware_setup();
}
void kvm_arch_hardware_unsetup(void)
{
kvm_x86_ops->hardware_unsetup();
}
void kvm_arch_check_processor_compat(void *rtn)
{
kvm_x86_ops->check_processor_compatibility(rtn);
}
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
{
return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
}
struct static_key kvm_no_apic_vcpu __read_mostly;
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page;
struct kvm *kvm;
int r;
BUG_ON(vcpu->kvm == NULL);
kvm = vcpu->kvm;
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
else
vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
r = -ENOMEM;
goto fail;
}
vcpu->arch.pio_data = page_address(page);
kvm_set_tsc_khz(vcpu, max_tsc_khz);
r = kvm_mmu_create(vcpu);
if (r < 0)
goto fail_free_pio_data;
if (irqchip_in_kernel(kvm)) {
r = kvm_create_lapic(vcpu);
if (r < 0)
goto fail_mmu_destroy;
} else
static_key_slow_inc(&kvm_no_apic_vcpu);
vcpu->arch.mce_banks = kzalloc(KVM_MAX_MCE_BANKS * sizeof(u64) * 4,
GFP_KERNEL);
if (!vcpu->arch.mce_banks) {
r = -ENOMEM;
goto fail_free_lapic;
}
vcpu->arch.mcg_cap = KVM_MAX_MCE_BANKS;
if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
goto fail_free_mce_banks;
r = fx_init(vcpu);
if (r)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
return 0;
fail_free_wbinvd_dirty_mask:
free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
fail_free_mce_banks:
kfree(vcpu->arch.mce_banks);
fail_free_lapic:
kvm_free_lapic(vcpu);
fail_mmu_destroy:
kvm_mmu_destroy(vcpu);
fail_free_pio_data:
free_page((unsigned long)vcpu->arch.pio_data);
fail:
return r;
}
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
{
int idx;
kvm_pmu_destroy(vcpu);
kfree(vcpu->arch.mce_banks);
kvm_free_lapic(vcpu);
idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_mmu_destroy(vcpu);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
free_page((unsigned long)vcpu->arch.pio_data);
if (!irqchip_in_kernel(vcpu->kvm))
static_key_slow_dec(&kvm_no_apic_vcpu);
}
int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
{
if (type)
return -EINVAL;
INIT_LIST_HEAD(&kvm->arch.active_mmu_pages);
INIT_LIST_HEAD(&kvm->arch.assigned_dev_head);
/* Reserve bit 0 of irq_sources_bitmap for userspace irq source */
set_bit(KVM_USERSPACE_IRQ_SOURCE_ID, &kvm->arch.irq_sources_bitmap);
/* Reserve bit 1 of irq_sources_bitmap for irqfd-resampler */
set_bit(KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
&kvm->arch.irq_sources_bitmap);
raw_spin_lock_init(&kvm->arch.tsc_write_lock);
mutex_init(&kvm->arch.apic_map_lock);
spin_lock_init(&kvm->arch.pvclock_gtod_sync_lock);
pvclock_update_vm_gtod_copy(kvm);
return 0;
}
static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
{
int r;
r = vcpu_load(vcpu);
BUG_ON(r);
kvm_mmu_unload(vcpu);
vcpu_put(vcpu);
}
static void kvm_free_vcpus(struct kvm *kvm)
{
unsigned int i;
struct kvm_vcpu *vcpu;
/*
* Unpin any mmu pages first.
*/
kvm_for_each_vcpu(i, vcpu, kvm) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_unload_vcpu_mmu(vcpu);
}
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_arch_vcpu_free(vcpu);
mutex_lock(&kvm->lock);
for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
kvm->vcpus[i] = NULL;
atomic_set(&kvm->online_vcpus, 0);
mutex_unlock(&kvm->lock);
}
void kvm_arch_sync_events(struct kvm *kvm)
{
kvm_free_all_assigned_devices(kvm);
kvm_free_pit(kvm);
}
void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_iommu_unmap_guest(kvm);
kfree(kvm->arch.vpic);
kfree(kvm->arch.vioapic);
kvm_free_vcpus(kvm);
if (kvm->arch.apic_access_page)
put_page(kvm->arch.apic_access_page);
if (kvm->arch.ept_identity_pagetable)
put_page(kvm->arch.ept_identity_pagetable);
kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
}
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont)
{
int i;
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
if (!dont || free->arch.rmap[i] != dont->arch.rmap[i]) {
kvm_kvfree(free->arch.rmap[i]);
free->arch.rmap[i] = NULL;
}
if (i == 0)
continue;
if (!dont || free->arch.lpage_info[i - 1] !=
dont->arch.lpage_info[i - 1]) {
kvm_kvfree(free->arch.lpage_info[i - 1]);
free->arch.lpage_info[i - 1] = NULL;
}
}
}
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
{
int i;
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
unsigned long ugfn;
int lpages;
int level = i + 1;
lpages = gfn_to_index(slot->base_gfn + npages - 1,
slot->base_gfn, level) + 1;
slot->arch.rmap[i] =
kvm_kvzalloc(lpages * sizeof(*slot->arch.rmap[i]));
if (!slot->arch.rmap[i])
goto out_free;
if (i == 0)
continue;
slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages *
sizeof(*slot->arch.lpage_info[i - 1]));
if (!slot->arch.lpage_info[i - 1])
goto out_free;
if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
slot->arch.lpage_info[i - 1][0].write_count = 1;
if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1;
ugfn = slot->userspace_addr >> PAGE_SHIFT;
/*
* If the gfn and userspace address are not aligned wrt each
* other, or if explicitly asked to, disable large page
* support for this slot
*/
if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
!kvm_largepages_enabled()) {
unsigned long j;
for (j = 0; j < lpages; ++j)
slot->arch.lpage_info[i - 1][j].write_count = 1;
}
}
return 0;
out_free:
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
kvm_kvfree(slot->arch.rmap[i]);
slot->arch.rmap[i] = NULL;
if (i == 0)
continue;
kvm_kvfree(slot->arch.lpage_info[i - 1]);
slot->arch.lpage_info[i - 1] = NULL;
}
return -ENOMEM;
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
bool user_alloc)
{
int npages = memslot->npages;
/*
* Only private memory slots need to be mapped here since
* KVM_SET_MEMORY_REGION ioctl is no longer supported.
*/
if ((memslot->id >= KVM_USER_MEM_SLOTS) && npages && !old.npages) {
unsigned long userspace_addr;
/*
* MAP_SHARED to prevent internal slot pages from being moved
* by fork()/COW.
*/
userspace_addr = vm_mmap(NULL, 0, npages * PAGE_SIZE,
PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANONYMOUS, 0);
if (IS_ERR((void *)userspace_addr))
return PTR_ERR((void *)userspace_addr);
memslot->userspace_addr = userspace_addr;
}
return 0;
}
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
bool user_alloc)
{
int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
if ((mem->slot >= KVM_USER_MEM_SLOTS) && old.npages && !npages) {
int ret;
ret = vm_munmap(old.userspace_addr,
old.npages * PAGE_SIZE);
if (ret < 0)
printk(KERN_WARNING
"kvm_vm_ioctl_set_memory_region: "
"failed to munmap memory\n");
}
if (!kvm->arch.n_requested_mmu_pages)
nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
if (nr_mmu_pages)
kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
/*
* Write protect all pages for dirty logging.
* Existing largepage mappings are destroyed here and new ones will
* not be created until the end of the logging.
*/
if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
kvm_mmu_slot_remove_write_access(kvm, mem->slot);
/*
* If memory slot is created, or moved, we need to clear all
* mmio sptes.
*/
if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT) {
kvm_mmu_zap_all(kvm);
kvm_reload_remote_mmus(kvm);
}
}
void kvm_arch_flush_shadow_all(struct kvm *kvm)
{
kvm_mmu_zap_all(kvm);
kvm_reload_remote_mmus(kvm);
}
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
kvm_arch_flush_shadow_all(kvm);
}
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
!vcpu->arch.apf.halted)
|| !list_empty_careful(&vcpu->async_pf.done)
|| vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
|| atomic_read(&vcpu->arch.nmi_queued) ||
(kvm_arch_interrupt_allowed(vcpu) &&
kvm_cpu_has_interrupt(vcpu));
}
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
}
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
{
return kvm_x86_ops->interrupt_allowed(vcpu);
}
bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
{
unsigned long current_rip = kvm_rip_read(vcpu) +
get_segment_base(vcpu, VCPU_SREG_CS);
return current_rip == linear_rip;
}
EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
{
unsigned long rflags;
rflags = kvm_x86_ops->get_rflags(vcpu);
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
rflags &= ~X86_EFLAGS_TF;
return rflags;
}
EXPORT_SYMBOL_GPL(kvm_get_rflags);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
rflags |= X86_EFLAGS_TF;
kvm_x86_ops->set_rflags(vcpu, rflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
EXPORT_SYMBOL_GPL(kvm_set_rflags);
void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{
int r;
if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) ||
is_error_page(work->page))
return;
r = kvm_mmu_reload(vcpu);
if (unlikely(r))
return;
if (!vcpu->arch.mmu.direct_map &&
work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu))
return;
vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
}
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
{
return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));
}
static inline u32 kvm_async_pf_next_probe(u32 key)
{
return (key + 1) & (roundup_pow_of_two(ASYNC_PF_PER_VCPU) - 1);
}
static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
u32 key = kvm_async_pf_hash_fn(gfn);
while (vcpu->arch.apf.gfns[key] != ~0)
key = kvm_async_pf_next_probe(key);
vcpu->arch.apf.gfns[key] = gfn;
}
static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn)
{
int i;
u32 key = kvm_async_pf_hash_fn(gfn);
for (i = 0; i < roundup_pow_of_two(ASYNC_PF_PER_VCPU) &&
(vcpu->arch.apf.gfns[key] != gfn &&
vcpu->arch.apf.gfns[key] != ~0); i++)
key = kvm_async_pf_next_probe(key);
return key;
}
bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
return vcpu->arch.apf.gfns[kvm_async_pf_gfn_slot(vcpu, gfn)] == gfn;
}
static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
{
u32 i, j, k;
i = j = kvm_async_pf_gfn_slot(vcpu, gfn);
while (true) {
vcpu->arch.apf.gfns[i] = ~0;
do {
j = kvm_async_pf_next_probe(j);
if (vcpu->arch.apf.gfns[j] == ~0)
return;
k = kvm_async_pf_hash_fn(vcpu->arch.apf.gfns[j]);
/*
* k lies cyclically in ]i,j]
* | i.k.j |
* |....j i.k.| or |.k..j i...|
*/
} while ((i <= j) ? (i < k && k <= j) : (i < k || k <= j));
vcpu->arch.apf.gfns[i] = vcpu->arch.apf.gfns[j];
i = j;
}
}
static int apf_put_user(struct kvm_vcpu *vcpu, u32 val)
{
return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apf.data, &val,
sizeof(val));
}
void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
struct x86_exception fault;
trace_kvm_async_pf_not_present(work->arch.token, work->gva);
kvm_add_async_pf_gfn(vcpu, work->arch.gfn);
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) ||
(vcpu->arch.apf.send_user_only &&
kvm_x86_ops->get_cpl(vcpu) == 0))
kvm_make_request(KVM_REQ_APF_HALT, vcpu);
else if (!apf_put_user(vcpu, KVM_PV_REASON_PAGE_NOT_PRESENT)) {
fault.vector = PF_VECTOR;
fault.error_code_valid = true;
fault.error_code = 0;
fault.nested_page_fault = false;
fault.address = work->arch.token;
kvm_inject_page_fault(vcpu, &fault);
}
}
void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
struct kvm_async_pf *work)
{
struct x86_exception fault;
trace_kvm_async_pf_ready(work->arch.token, work->gva);
if (is_error_page(work->page))
work->arch.token = ~0; /* broadcast wakeup */
else
kvm_del_async_pf_gfn(vcpu, work->arch.gfn);
if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) &&
!apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) {
fault.vector = PF_VECTOR;
fault.error_code_valid = true;
fault.error_code = 0;
fault.nested_page_fault = false;
fault.address = work->arch.token;
kvm_inject_page_fault(vcpu, &fault);
}
vcpu->arch.apf.halted = false;
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
{
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
return true;
else
return !kvm_event_needs_reinjection(vcpu) &&
kvm_x86_ops->interrupt_allowed(vcpu);
}
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmrun);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_5594_1 |
crossvul-cpp_data_good_3573_0 | /*
* An async IO implementation for Linux
* Written by Benjamin LaHaise <bcrl@kvack.org>
*
* Implements an efficient asynchronous io interface.
*
* Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
*
* See ../COPYING for licensing terms.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/time.h>
#include <linux/aio_abi.h>
#include <linux/module.h>
#include <linux/syscalls.h>
#include <linux/backing-dev.h>
#include <linux/uio.h>
#define DEBUG 0
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/mmu_context.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/aio.h>
#include <linux/highmem.h>
#include <linux/workqueue.h>
#include <linux/security.h>
#include <linux/eventfd.h>
#include <linux/blkdev.h>
#include <linux/compat.h>
#include <asm/kmap_types.h>
#include <asm/uaccess.h>
#if DEBUG > 1
#define dprintk printk
#else
#define dprintk(x...) do { ; } while (0)
#endif
/*------ sysctl variables----*/
static DEFINE_SPINLOCK(aio_nr_lock);
unsigned long aio_nr; /* current system wide number of aio requests */
unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
/*----end sysctl variables---*/
static struct kmem_cache *kiocb_cachep;
static struct kmem_cache *kioctx_cachep;
static struct workqueue_struct *aio_wq;
/* Used for rare fput completion. */
static void aio_fput_routine(struct work_struct *);
static DECLARE_WORK(fput_work, aio_fput_routine);
static DEFINE_SPINLOCK(fput_lock);
static LIST_HEAD(fput_head);
static void aio_kick_handler(struct work_struct *);
static void aio_queue_work(struct kioctx *);
/* aio_setup
* Creates the slab caches used by the aio routines, panic on
* failure as this is done early during the boot sequence.
*/
static int __init aio_setup(void)
{
kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
aio_wq = alloc_workqueue("aio", 0, 1); /* used to limit concurrency */
BUG_ON(!aio_wq);
pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
return 0;
}
__initcall(aio_setup);
static void aio_free_ring(struct kioctx *ctx)
{
struct aio_ring_info *info = &ctx->ring_info;
long i;
for (i=0; i<info->nr_pages; i++)
put_page(info->ring_pages[i]);
if (info->mmap_size) {
down_write(&ctx->mm->mmap_sem);
do_munmap(ctx->mm, info->mmap_base, info->mmap_size);
up_write(&ctx->mm->mmap_sem);
}
if (info->ring_pages && info->ring_pages != info->internal_pages)
kfree(info->ring_pages);
info->ring_pages = NULL;
info->nr = 0;
}
static int aio_setup_ring(struct kioctx *ctx)
{
struct aio_ring *ring;
struct aio_ring_info *info = &ctx->ring_info;
unsigned nr_events = ctx->max_reqs;
unsigned long size;
int nr_pages;
/* Compensate for the ring buffer's head/tail overlap entry */
nr_events += 2; /* 1 is required, 2 for good luck */
size = sizeof(struct aio_ring);
size += sizeof(struct io_event) * nr_events;
nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
if (nr_pages < 0)
return -EINVAL;
nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
info->nr = 0;
info->ring_pages = info->internal_pages;
if (nr_pages > AIO_RING_PAGES) {
info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
if (!info->ring_pages)
return -ENOMEM;
}
info->mmap_size = nr_pages * PAGE_SIZE;
dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
down_write(&ctx->mm->mmap_sem);
info->mmap_base = do_mmap(NULL, 0, info->mmap_size,
PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE,
0);
if (IS_ERR((void *)info->mmap_base)) {
up_write(&ctx->mm->mmap_sem);
info->mmap_size = 0;
aio_free_ring(ctx);
return -EAGAIN;
}
dprintk("mmap address: 0x%08lx\n", info->mmap_base);
info->nr_pages = get_user_pages(current, ctx->mm,
info->mmap_base, nr_pages,
1, 0, info->ring_pages, NULL);
up_write(&ctx->mm->mmap_sem);
if (unlikely(info->nr_pages != nr_pages)) {
aio_free_ring(ctx);
return -EAGAIN;
}
ctx->user_id = info->mmap_base;
info->nr = nr_events; /* trusted copy */
ring = kmap_atomic(info->ring_pages[0], KM_USER0);
ring->nr = nr_events; /* user copy */
ring->id = ctx->user_id;
ring->head = ring->tail = 0;
ring->magic = AIO_RING_MAGIC;
ring->compat_features = AIO_RING_COMPAT_FEATURES;
ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
ring->header_length = sizeof(struct aio_ring);
kunmap_atomic(ring, KM_USER0);
return 0;
}
/* aio_ring_event: returns a pointer to the event at the given index from
* kmap_atomic(, km). Release the pointer with put_aio_ring_event();
*/
#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
#define aio_ring_event(info, nr, km) ({ \
unsigned pos = (nr) + AIO_EVENTS_OFFSET; \
struct io_event *__event; \
__event = kmap_atomic( \
(info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \
__event += pos % AIO_EVENTS_PER_PAGE; \
__event; \
})
#define put_aio_ring_event(event, km) do { \
struct io_event *__event = (event); \
(void)__event; \
kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
} while(0)
static void ctx_rcu_free(struct rcu_head *head)
{
struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
unsigned nr_events = ctx->max_reqs;
kmem_cache_free(kioctx_cachep, ctx);
if (nr_events) {
spin_lock(&aio_nr_lock);
BUG_ON(aio_nr - nr_events > aio_nr);
aio_nr -= nr_events;
spin_unlock(&aio_nr_lock);
}
}
/* __put_ioctx
* Called when the last user of an aio context has gone away,
* and the struct needs to be freed.
*/
static void __put_ioctx(struct kioctx *ctx)
{
BUG_ON(ctx->reqs_active);
cancel_delayed_work(&ctx->wq);
cancel_work_sync(&ctx->wq.work);
aio_free_ring(ctx);
mmdrop(ctx->mm);
ctx->mm = NULL;
pr_debug("__put_ioctx: freeing %p\n", ctx);
call_rcu(&ctx->rcu_head, ctx_rcu_free);
}
static inline void get_ioctx(struct kioctx *kioctx)
{
BUG_ON(atomic_read(&kioctx->users) <= 0);
atomic_inc(&kioctx->users);
}
static inline int try_get_ioctx(struct kioctx *kioctx)
{
return atomic_inc_not_zero(&kioctx->users);
}
static inline void put_ioctx(struct kioctx *kioctx)
{
BUG_ON(atomic_read(&kioctx->users) <= 0);
if (unlikely(atomic_dec_and_test(&kioctx->users)))
__put_ioctx(kioctx);
}
/* ioctx_alloc
* Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
*/
static struct kioctx *ioctx_alloc(unsigned nr_events)
{
struct mm_struct *mm;
struct kioctx *ctx;
int did_sync = 0;
/* Prevent overflows */
if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
(nr_events > (0x10000000U / sizeof(struct kiocb)))) {
pr_debug("ENOMEM: nr_events too high\n");
return ERR_PTR(-EINVAL);
}
if ((unsigned long)nr_events > aio_max_nr)
return ERR_PTR(-EAGAIN);
ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
ctx->max_reqs = nr_events;
mm = ctx->mm = current->mm;
atomic_inc(&mm->mm_count);
atomic_set(&ctx->users, 1);
spin_lock_init(&ctx->ctx_lock);
spin_lock_init(&ctx->ring_info.ring_lock);
init_waitqueue_head(&ctx->wait);
INIT_LIST_HEAD(&ctx->active_reqs);
INIT_LIST_HEAD(&ctx->run_list);
INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
if (aio_setup_ring(ctx) < 0)
goto out_freectx;
/* limit the number of system wide aios */
do {
spin_lock_bh(&aio_nr_lock);
if (aio_nr + nr_events > aio_max_nr ||
aio_nr + nr_events < aio_nr)
ctx->max_reqs = 0;
else
aio_nr += ctx->max_reqs;
spin_unlock_bh(&aio_nr_lock);
if (ctx->max_reqs || did_sync)
break;
/* wait for rcu callbacks to have completed before giving up */
synchronize_rcu();
did_sync = 1;
ctx->max_reqs = nr_events;
} while (1);
if (ctx->max_reqs == 0)
goto out_cleanup;
/* now link into global list. */
spin_lock(&mm->ioctx_lock);
hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
spin_unlock(&mm->ioctx_lock);
dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
return ctx;
out_cleanup:
__put_ioctx(ctx);
return ERR_PTR(-EAGAIN);
out_freectx:
mmdrop(mm);
kmem_cache_free(kioctx_cachep, ctx);
ctx = ERR_PTR(-ENOMEM);
dprintk("aio: error allocating ioctx %p\n", ctx);
return ctx;
}
/* aio_cancel_all
* Cancels all outstanding aio requests on an aio context. Used
* when the processes owning a context have all exited to encourage
* the rapid destruction of the kioctx.
*/
static void aio_cancel_all(struct kioctx *ctx)
{
int (*cancel)(struct kiocb *, struct io_event *);
struct io_event res;
spin_lock_irq(&ctx->ctx_lock);
ctx->dead = 1;
while (!list_empty(&ctx->active_reqs)) {
struct list_head *pos = ctx->active_reqs.next;
struct kiocb *iocb = list_kiocb(pos);
list_del_init(&iocb->ki_list);
cancel = iocb->ki_cancel;
kiocbSetCancelled(iocb);
if (cancel) {
iocb->ki_users++;
spin_unlock_irq(&ctx->ctx_lock);
cancel(iocb, &res);
spin_lock_irq(&ctx->ctx_lock);
}
}
spin_unlock_irq(&ctx->ctx_lock);
}
static void wait_for_all_aios(struct kioctx *ctx)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
spin_lock_irq(&ctx->ctx_lock);
if (!ctx->reqs_active)
goto out;
add_wait_queue(&ctx->wait, &wait);
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
while (ctx->reqs_active) {
spin_unlock_irq(&ctx->ctx_lock);
io_schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
spin_lock_irq(&ctx->ctx_lock);
}
__set_task_state(tsk, TASK_RUNNING);
remove_wait_queue(&ctx->wait, &wait);
out:
spin_unlock_irq(&ctx->ctx_lock);
}
/* wait_on_sync_kiocb:
* Waits on the given sync kiocb to complete.
*/
ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
{
while (iocb->ki_users) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (!iocb->ki_users)
break;
io_schedule();
}
__set_current_state(TASK_RUNNING);
return iocb->ki_user_data;
}
EXPORT_SYMBOL(wait_on_sync_kiocb);
/* exit_aio: called when the last user of mm goes away. At this point,
* there is no way for any new requests to be submited or any of the
* io_* syscalls to be called on the context. However, there may be
* outstanding requests which hold references to the context; as they
* go away, they will call put_ioctx and release any pinned memory
* associated with the request (held via struct page * references).
*/
void exit_aio(struct mm_struct *mm)
{
struct kioctx *ctx;
while (!hlist_empty(&mm->ioctx_list)) {
ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
hlist_del_rcu(&ctx->list);
aio_cancel_all(ctx);
wait_for_all_aios(ctx);
/*
* Ensure we don't leave the ctx on the aio_wq
*/
cancel_work_sync(&ctx->wq.work);
if (1 != atomic_read(&ctx->users))
printk(KERN_DEBUG
"exit_aio:ioctx still alive: %d %d %d\n",
atomic_read(&ctx->users), ctx->dead,
ctx->reqs_active);
put_ioctx(ctx);
}
}
/* aio_get_req
* Allocate a slot for an aio request. Increments the users count
* of the kioctx so that the kioctx stays around until all requests are
* complete. Returns NULL if no requests are free.
*
* Returns with kiocb->users set to 2. The io submit code path holds
* an extra reference while submitting the i/o.
* This prevents races between the aio code path referencing the
* req (after submitting it) and aio_complete() freeing the req.
*/
static struct kiocb *__aio_get_req(struct kioctx *ctx)
{
struct kiocb *req = NULL;
req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
if (unlikely(!req))
return NULL;
req->ki_flags = 0;
req->ki_users = 2;
req->ki_key = 0;
req->ki_ctx = ctx;
req->ki_cancel = NULL;
req->ki_retry = NULL;
req->ki_dtor = NULL;
req->private = NULL;
req->ki_iovec = NULL;
INIT_LIST_HEAD(&req->ki_run_list);
req->ki_eventfd = NULL;
return req;
}
/*
* struct kiocb's are allocated in batches to reduce the number of
* times the ctx lock is acquired and released.
*/
#define KIOCB_BATCH_SIZE 32L
struct kiocb_batch {
struct list_head head;
long count; /* number of requests left to allocate */
};
static void kiocb_batch_init(struct kiocb_batch *batch, long total)
{
INIT_LIST_HEAD(&batch->head);
batch->count = total;
}
static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
{
struct kiocb *req, *n;
if (list_empty(&batch->head))
return;
spin_lock_irq(&ctx->ctx_lock);
list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
list_del(&req->ki_batch);
list_del(&req->ki_list);
kmem_cache_free(kiocb_cachep, req);
ctx->reqs_active--;
}
spin_unlock_irq(&ctx->ctx_lock);
}
/*
* Allocate a batch of kiocbs. This avoids taking and dropping the
* context lock a lot during setup.
*/
static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
{
unsigned short allocated, to_alloc;
long avail;
bool called_fput = false;
struct kiocb *req, *n;
struct aio_ring *ring;
to_alloc = min(batch->count, KIOCB_BATCH_SIZE);
for (allocated = 0; allocated < to_alloc; allocated++) {
req = __aio_get_req(ctx);
if (!req)
/* allocation failed, go with what we've got */
break;
list_add(&req->ki_batch, &batch->head);
}
if (allocated == 0)
goto out;
retry:
spin_lock_irq(&ctx->ctx_lock);
ring = kmap_atomic(ctx->ring_info.ring_pages[0]);
avail = aio_ring_avail(&ctx->ring_info, ring) - ctx->reqs_active;
BUG_ON(avail < 0);
if (avail == 0 && !called_fput) {
/*
* Handle a potential starvation case. It is possible that
* we hold the last reference on a struct file, causing us
* to delay the final fput to non-irq context. In this case,
* ctx->reqs_active is artificially high. Calling the fput
* routine here may free up a slot in the event completion
* ring, allowing this allocation to succeed.
*/
kunmap_atomic(ring);
spin_unlock_irq(&ctx->ctx_lock);
aio_fput_routine(NULL);
called_fput = true;
goto retry;
}
if (avail < allocated) {
/* Trim back the number of requests. */
list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
list_del(&req->ki_batch);
kmem_cache_free(kiocb_cachep, req);
if (--allocated <= avail)
break;
}
}
batch->count -= allocated;
list_for_each_entry(req, &batch->head, ki_batch) {
list_add(&req->ki_list, &ctx->active_reqs);
ctx->reqs_active++;
}
kunmap_atomic(ring);
spin_unlock_irq(&ctx->ctx_lock);
out:
return allocated;
}
static inline struct kiocb *aio_get_req(struct kioctx *ctx,
struct kiocb_batch *batch)
{
struct kiocb *req;
if (list_empty(&batch->head))
if (kiocb_batch_refill(ctx, batch) == 0)
return NULL;
req = list_first_entry(&batch->head, struct kiocb, ki_batch);
list_del(&req->ki_batch);
return req;
}
static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
{
assert_spin_locked(&ctx->ctx_lock);
if (req->ki_eventfd != NULL)
eventfd_ctx_put(req->ki_eventfd);
if (req->ki_dtor)
req->ki_dtor(req);
if (req->ki_iovec != &req->ki_inline_vec)
kfree(req->ki_iovec);
kmem_cache_free(kiocb_cachep, req);
ctx->reqs_active--;
if (unlikely(!ctx->reqs_active && ctx->dead))
wake_up_all(&ctx->wait);
}
static void aio_fput_routine(struct work_struct *data)
{
spin_lock_irq(&fput_lock);
while (likely(!list_empty(&fput_head))) {
struct kiocb *req = list_kiocb(fput_head.next);
struct kioctx *ctx = req->ki_ctx;
list_del(&req->ki_list);
spin_unlock_irq(&fput_lock);
/* Complete the fput(s) */
if (req->ki_filp != NULL)
fput(req->ki_filp);
/* Link the iocb into the context's free list */
spin_lock_irq(&ctx->ctx_lock);
really_put_req(ctx, req);
spin_unlock_irq(&ctx->ctx_lock);
put_ioctx(ctx);
spin_lock_irq(&fput_lock);
}
spin_unlock_irq(&fput_lock);
}
/* __aio_put_req
* Returns true if this put was the last user of the request.
*/
static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
{
dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
req, atomic_long_read(&req->ki_filp->f_count));
assert_spin_locked(&ctx->ctx_lock);
req->ki_users--;
BUG_ON(req->ki_users < 0);
if (likely(req->ki_users))
return 0;
list_del(&req->ki_list); /* remove from active_reqs */
req->ki_cancel = NULL;
req->ki_retry = NULL;
/*
* Try to optimize the aio and eventfd file* puts, by avoiding to
* schedule work in case it is not final fput() time. In normal cases,
* we would not be holding the last reference to the file*, so
* this function will be executed w/out any aio kthread wakeup.
*/
if (unlikely(!fput_atomic(req->ki_filp))) {
get_ioctx(ctx);
spin_lock(&fput_lock);
list_add(&req->ki_list, &fput_head);
spin_unlock(&fput_lock);
schedule_work(&fput_work);
} else {
req->ki_filp = NULL;
really_put_req(ctx, req);
}
return 1;
}
/* aio_put_req
* Returns true if this put was the last user of the kiocb,
* false if the request is still in use.
*/
int aio_put_req(struct kiocb *req)
{
struct kioctx *ctx = req->ki_ctx;
int ret;
spin_lock_irq(&ctx->ctx_lock);
ret = __aio_put_req(ctx, req);
spin_unlock_irq(&ctx->ctx_lock);
return ret;
}
EXPORT_SYMBOL(aio_put_req);
static struct kioctx *lookup_ioctx(unsigned long ctx_id)
{
struct mm_struct *mm = current->mm;
struct kioctx *ctx, *ret = NULL;
struct hlist_node *n;
rcu_read_lock();
hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
/*
* RCU protects us against accessing freed memory but
* we have to be careful not to get a reference when the
* reference count already dropped to 0 (ctx->dead test
* is unreliable because of races).
*/
if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
ret = ctx;
break;
}
}
rcu_read_unlock();
return ret;
}
/*
* Queue up a kiocb to be retried. Assumes that the kiocb
* has already been marked as kicked, and places it on
* the retry run list for the corresponding ioctx, if it
* isn't already queued. Returns 1 if it actually queued
* the kiocb (to tell the caller to activate the work
* queue to process it), or 0, if it found that it was
* already queued.
*/
static inline int __queue_kicked_iocb(struct kiocb *iocb)
{
struct kioctx *ctx = iocb->ki_ctx;
assert_spin_locked(&ctx->ctx_lock);
if (list_empty(&iocb->ki_run_list)) {
list_add_tail(&iocb->ki_run_list,
&ctx->run_list);
return 1;
}
return 0;
}
/* aio_run_iocb
* This is the core aio execution routine. It is
* invoked both for initial i/o submission and
* subsequent retries via the aio_kick_handler.
* Expects to be invoked with iocb->ki_ctx->lock
* already held. The lock is released and reacquired
* as needed during processing.
*
* Calls the iocb retry method (already setup for the
* iocb on initial submission) for operation specific
* handling, but takes care of most of common retry
* execution details for a given iocb. The retry method
* needs to be non-blocking as far as possible, to avoid
* holding up other iocbs waiting to be serviced by the
* retry kernel thread.
*
* The trickier parts in this code have to do with
* ensuring that only one retry instance is in progress
* for a given iocb at any time. Providing that guarantee
* simplifies the coding of individual aio operations as
* it avoids various potential races.
*/
static ssize_t aio_run_iocb(struct kiocb *iocb)
{
struct kioctx *ctx = iocb->ki_ctx;
ssize_t (*retry)(struct kiocb *);
ssize_t ret;
if (!(retry = iocb->ki_retry)) {
printk("aio_run_iocb: iocb->ki_retry = NULL\n");
return 0;
}
/*
* We don't want the next retry iteration for this
* operation to start until this one has returned and
* updated the iocb state. However, wait_queue functions
* can trigger a kick_iocb from interrupt context in the
* meantime, indicating that data is available for the next
* iteration. We want to remember that and enable the
* next retry iteration _after_ we are through with
* this one.
*
* So, in order to be able to register a "kick", but
* prevent it from being queued now, we clear the kick
* flag, but make the kick code *think* that the iocb is
* still on the run list until we are actually done.
* When we are done with this iteration, we check if
* the iocb was kicked in the meantime and if so, queue
* it up afresh.
*/
kiocbClearKicked(iocb);
/*
* This is so that aio_complete knows it doesn't need to
* pull the iocb off the run list (We can't just call
* INIT_LIST_HEAD because we don't want a kick_iocb to
* queue this on the run list yet)
*/
iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL;
spin_unlock_irq(&ctx->ctx_lock);
/* Quit retrying if the i/o has been cancelled */
if (kiocbIsCancelled(iocb)) {
ret = -EINTR;
aio_complete(iocb, ret, 0);
/* must not access the iocb after this */
goto out;
}
/*
* Now we are all set to call the retry method in async
* context.
*/
ret = retry(iocb);
if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
/*
* There's no easy way to restart the syscall since other AIO's
* may be already running. Just fail this IO with EINTR.
*/
if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
ret = -EINTR;
aio_complete(iocb, ret, 0);
}
out:
spin_lock_irq(&ctx->ctx_lock);
if (-EIOCBRETRY == ret) {
/*
* OK, now that we are done with this iteration
* and know that there is more left to go,
* this is where we let go so that a subsequent
* "kick" can start the next iteration
*/
/* will make __queue_kicked_iocb succeed from here on */
INIT_LIST_HEAD(&iocb->ki_run_list);
/* we must queue the next iteration ourselves, if it
* has already been kicked */
if (kiocbIsKicked(iocb)) {
__queue_kicked_iocb(iocb);
/*
* __queue_kicked_iocb will always return 1 here, because
* iocb->ki_run_list is empty at this point so it should
* be safe to unconditionally queue the context into the
* work queue.
*/
aio_queue_work(ctx);
}
}
return ret;
}
/*
* __aio_run_iocbs:
* Process all pending retries queued on the ioctx
* run list.
* Assumes it is operating within the aio issuer's mm
* context.
*/
static int __aio_run_iocbs(struct kioctx *ctx)
{
struct kiocb *iocb;
struct list_head run_list;
assert_spin_locked(&ctx->ctx_lock);
list_replace_init(&ctx->run_list, &run_list);
while (!list_empty(&run_list)) {
iocb = list_entry(run_list.next, struct kiocb,
ki_run_list);
list_del(&iocb->ki_run_list);
/*
* Hold an extra reference while retrying i/o.
*/
iocb->ki_users++; /* grab extra reference */
aio_run_iocb(iocb);
__aio_put_req(ctx, iocb);
}
if (!list_empty(&ctx->run_list))
return 1;
return 0;
}
static void aio_queue_work(struct kioctx * ctx)
{
unsigned long timeout;
/*
* if someone is waiting, get the work started right
* away, otherwise, use a longer delay
*/
smp_mb();
if (waitqueue_active(&ctx->wait))
timeout = 1;
else
timeout = HZ/10;
queue_delayed_work(aio_wq, &ctx->wq, timeout);
}
/*
* aio_run_all_iocbs:
* Process all pending retries queued on the ioctx
* run list, and keep running them until the list
* stays empty.
* Assumes it is operating within the aio issuer's mm context.
*/
static inline void aio_run_all_iocbs(struct kioctx *ctx)
{
spin_lock_irq(&ctx->ctx_lock);
while (__aio_run_iocbs(ctx))
;
spin_unlock_irq(&ctx->ctx_lock);
}
/*
* aio_kick_handler:
* Work queue handler triggered to process pending
* retries on an ioctx. Takes on the aio issuer's
* mm context before running the iocbs, so that
* copy_xxx_user operates on the issuer's address
* space.
* Run on aiod's context.
*/
static void aio_kick_handler(struct work_struct *work)
{
struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
mm_segment_t oldfs = get_fs();
struct mm_struct *mm;
int requeue;
set_fs(USER_DS);
use_mm(ctx->mm);
spin_lock_irq(&ctx->ctx_lock);
requeue =__aio_run_iocbs(ctx);
mm = ctx->mm;
spin_unlock_irq(&ctx->ctx_lock);
unuse_mm(mm);
set_fs(oldfs);
/*
* we're in a worker thread already, don't use queue_delayed_work,
*/
if (requeue)
queue_delayed_work(aio_wq, &ctx->wq, 0);
}
/*
* Called by kick_iocb to queue the kiocb for retry
* and if required activate the aio work queue to process
* it
*/
static void try_queue_kicked_iocb(struct kiocb *iocb)
{
struct kioctx *ctx = iocb->ki_ctx;
unsigned long flags;
int run = 0;
spin_lock_irqsave(&ctx->ctx_lock, flags);
/* set this inside the lock so that we can't race with aio_run_iocb()
* testing it and putting the iocb on the run list under the lock */
if (!kiocbTryKick(iocb))
run = __queue_kicked_iocb(iocb);
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
if (run)
aio_queue_work(ctx);
}
/*
* kick_iocb:
* Called typically from a wait queue callback context
* to trigger a retry of the iocb.
* The retry is usually executed by aio workqueue
* threads (See aio_kick_handler).
*/
void kick_iocb(struct kiocb *iocb)
{
/* sync iocbs are easy: they can only ever be executing from a
* single context. */
if (is_sync_kiocb(iocb)) {
kiocbSetKicked(iocb);
wake_up_process(iocb->ki_obj.tsk);
return;
}
try_queue_kicked_iocb(iocb);
}
EXPORT_SYMBOL(kick_iocb);
/* aio_complete
* Called when the io request on the given iocb is complete.
* Returns true if this is the last user of the request. The
* only other user of the request can be the cancellation code.
*/
int aio_complete(struct kiocb *iocb, long res, long res2)
{
struct kioctx *ctx = iocb->ki_ctx;
struct aio_ring_info *info;
struct aio_ring *ring;
struct io_event *event;
unsigned long flags;
unsigned long tail;
int ret;
/*
* Special case handling for sync iocbs:
* - events go directly into the iocb for fast handling
* - the sync task with the iocb in its stack holds the single iocb
* ref, no other paths have a way to get another ref
* - the sync task helpfully left a reference to itself in the iocb
*/
if (is_sync_kiocb(iocb)) {
BUG_ON(iocb->ki_users != 1);
iocb->ki_user_data = res;
iocb->ki_users = 0;
wake_up_process(iocb->ki_obj.tsk);
return 1;
}
info = &ctx->ring_info;
/* add a completion event to the ring buffer.
* must be done holding ctx->ctx_lock to prevent
* other code from messing with the tail
* pointer since we might be called from irq
* context.
*/
spin_lock_irqsave(&ctx->ctx_lock, flags);
if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list))
list_del_init(&iocb->ki_run_list);
/*
* cancelled requests don't get events, userland was given one
* when the event got cancelled.
*/
if (kiocbIsCancelled(iocb))
goto put_rq;
ring = kmap_atomic(info->ring_pages[0], KM_IRQ1);
tail = info->tail;
event = aio_ring_event(info, tail, KM_IRQ0);
if (++tail >= info->nr)
tail = 0;
event->obj = (u64)(unsigned long)iocb->ki_obj.user;
event->data = iocb->ki_user_data;
event->res = res;
event->res2 = res2;
dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
res, res2);
/* after flagging the request as done, we
* must never even look at it again
*/
smp_wmb(); /* make event visible before updating tail */
info->tail = tail;
ring->tail = tail;
put_aio_ring_event(event, KM_IRQ0);
kunmap_atomic(ring, KM_IRQ1);
pr_debug("added to ring %p at [%lu]\n", iocb, tail);
/*
* Check if the user asked us to deliver the result through an
* eventfd. The eventfd_signal() function is safe to be called
* from IRQ context.
*/
if (iocb->ki_eventfd != NULL)
eventfd_signal(iocb->ki_eventfd, 1);
put_rq:
/* everything turned out well, dispose of the aiocb. */
ret = __aio_put_req(ctx, iocb);
/*
* We have to order our ring_info tail store above and test
* of the wait list below outside the wait lock. This is
* like in wake_up_bit() where clearing a bit has to be
* ordered with the unlocked test.
*/
smp_mb();
if (waitqueue_active(&ctx->wait))
wake_up(&ctx->wait);
spin_unlock_irqrestore(&ctx->ctx_lock, flags);
return ret;
}
EXPORT_SYMBOL(aio_complete);
/* aio_read_evt
* Pull an event off of the ioctx's event ring. Returns the number of
* events fetched (0 or 1 ;-)
* FIXME: make this use cmpxchg.
* TODO: make the ringbuffer user mmap()able (requires FIXME).
*/
static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
{
struct aio_ring_info *info = &ioctx->ring_info;
struct aio_ring *ring;
unsigned long head;
int ret = 0;
ring = kmap_atomic(info->ring_pages[0], KM_USER0);
dprintk("in aio_read_evt h%lu t%lu m%lu\n",
(unsigned long)ring->head, (unsigned long)ring->tail,
(unsigned long)ring->nr);
if (ring->head == ring->tail)
goto out;
spin_lock(&info->ring_lock);
head = ring->head % info->nr;
if (head != ring->tail) {
struct io_event *evp = aio_ring_event(info, head, KM_USER1);
*ent = *evp;
head = (head + 1) % info->nr;
smp_mb(); /* finish reading the event before updatng the head */
ring->head = head;
ret = 1;
put_aio_ring_event(evp, KM_USER1);
}
spin_unlock(&info->ring_lock);
out:
kunmap_atomic(ring, KM_USER0);
dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
(unsigned long)ring->head, (unsigned long)ring->tail);
return ret;
}
struct aio_timeout {
struct timer_list timer;
int timed_out;
struct task_struct *p;
};
static void timeout_func(unsigned long data)
{
struct aio_timeout *to = (struct aio_timeout *)data;
to->timed_out = 1;
wake_up_process(to->p);
}
static inline void init_timeout(struct aio_timeout *to)
{
setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to);
to->timed_out = 0;
to->p = current;
}
static inline void set_timeout(long start_jiffies, struct aio_timeout *to,
const struct timespec *ts)
{
to->timer.expires = start_jiffies + timespec_to_jiffies(ts);
if (time_after(to->timer.expires, jiffies))
add_timer(&to->timer);
else
to->timed_out = 1;
}
static inline void clear_timeout(struct aio_timeout *to)
{
del_singleshot_timer_sync(&to->timer);
}
static int read_events(struct kioctx *ctx,
long min_nr, long nr,
struct io_event __user *event,
struct timespec __user *timeout)
{
long start_jiffies = jiffies;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
int ret;
int i = 0;
struct io_event ent;
struct aio_timeout to;
int retry = 0;
/* needed to zero any padding within an entry (there shouldn't be
* any, but C is fun!
*/
memset(&ent, 0, sizeof(ent));
retry:
ret = 0;
while (likely(i < nr)) {
ret = aio_read_evt(ctx, &ent);
if (unlikely(ret <= 0))
break;
dprintk("read event: %Lx %Lx %Lx %Lx\n",
ent.data, ent.obj, ent.res, ent.res2);
/* Could we split the check in two? */
ret = -EFAULT;
if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
dprintk("aio: lost an event due to EFAULT.\n");
break;
}
ret = 0;
/* Good, event copied to userland, update counts. */
event ++;
i ++;
}
if (min_nr <= i)
return i;
if (ret)
return ret;
/* End fast path */
/* racey check, but it gets redone */
if (!retry && unlikely(!list_empty(&ctx->run_list))) {
retry = 1;
aio_run_all_iocbs(ctx);
goto retry;
}
init_timeout(&to);
if (timeout) {
struct timespec ts;
ret = -EFAULT;
if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
goto out;
set_timeout(start_jiffies, &to, &ts);
}
while (likely(i < nr)) {
add_wait_queue_exclusive(&ctx->wait, &wait);
do {
set_task_state(tsk, TASK_INTERRUPTIBLE);
ret = aio_read_evt(ctx, &ent);
if (ret)
break;
if (min_nr <= i)
break;
if (unlikely(ctx->dead)) {
ret = -EINVAL;
break;
}
if (to.timed_out) /* Only check after read evt */
break;
/* Try to only show up in io wait if there are ops
* in flight */
if (ctx->reqs_active)
io_schedule();
else
schedule();
if (signal_pending(tsk)) {
ret = -EINTR;
break;
}
/*ret = aio_read_evt(ctx, &ent);*/
} while (1) ;
set_task_state(tsk, TASK_RUNNING);
remove_wait_queue(&ctx->wait, &wait);
if (unlikely(ret <= 0))
break;
ret = -EFAULT;
if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
dprintk("aio: lost an event due to EFAULT.\n");
break;
}
/* Good, event copied to userland, update counts. */
event ++;
i ++;
}
if (timeout)
clear_timeout(&to);
out:
destroy_timer_on_stack(&to.timer);
return i ? i : ret;
}
/* Take an ioctx and remove it from the list of ioctx's. Protects
* against races with itself via ->dead.
*/
static void io_destroy(struct kioctx *ioctx)
{
struct mm_struct *mm = current->mm;
int was_dead;
/* delete the entry from the list is someone else hasn't already */
spin_lock(&mm->ioctx_lock);
was_dead = ioctx->dead;
ioctx->dead = 1;
hlist_del_rcu(&ioctx->list);
spin_unlock(&mm->ioctx_lock);
dprintk("aio_release(%p)\n", ioctx);
if (likely(!was_dead))
put_ioctx(ioctx); /* twice for the list */
aio_cancel_all(ioctx);
wait_for_all_aios(ioctx);
/*
* Wake up any waiters. The setting of ctx->dead must be seen
* by other CPUs at this point. Right now, we rely on the
* locking done by the above calls to ensure this consistency.
*/
wake_up_all(&ioctx->wait);
put_ioctx(ioctx); /* once for the lookup */
}
/* sys_io_setup:
* Create an aio_context capable of receiving at least nr_events.
* ctxp must not point to an aio_context that already exists, and
* must be initialized to 0 prior to the call. On successful
* creation of the aio_context, *ctxp is filled in with the resulting
* handle. May fail with -EINVAL if *ctxp is not initialized,
* if the specified nr_events exceeds internal limits. May fail
* with -EAGAIN if the specified nr_events exceeds the user's limit
* of available events. May fail with -ENOMEM if insufficient kernel
* resources are available. May fail with -EFAULT if an invalid
* pointer is passed for ctxp. Will fail with -ENOSYS if not
* implemented.
*/
SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
{
struct kioctx *ioctx = NULL;
unsigned long ctx;
long ret;
ret = get_user(ctx, ctxp);
if (unlikely(ret))
goto out;
ret = -EINVAL;
if (unlikely(ctx || nr_events == 0)) {
pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
ctx, nr_events);
goto out;
}
ioctx = ioctx_alloc(nr_events);
ret = PTR_ERR(ioctx);
if (!IS_ERR(ioctx)) {
ret = put_user(ioctx->user_id, ctxp);
if (!ret)
return 0;
get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
io_destroy(ioctx);
}
out:
return ret;
}
/* sys_io_destroy:
* Destroy the aio_context specified. May cancel any outstanding
* AIOs and block on completion. Will fail with -ENOSYS if not
* implemented. May fail with -EINVAL if the context pointed to
* is invalid.
*/
SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
{
struct kioctx *ioctx = lookup_ioctx(ctx);
if (likely(NULL != ioctx)) {
io_destroy(ioctx);
return 0;
}
pr_debug("EINVAL: io_destroy: invalid context id\n");
return -EINVAL;
}
static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret)
{
struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg];
BUG_ON(ret <= 0);
while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) {
ssize_t this = min((ssize_t)iov->iov_len, ret);
iov->iov_base += this;
iov->iov_len -= this;
iocb->ki_left -= this;
ret -= this;
if (iov->iov_len == 0) {
iocb->ki_cur_seg++;
iov++;
}
}
/* the caller should not have done more io than what fit in
* the remaining iovecs */
BUG_ON(ret > 0 && iocb->ki_left == 0);
}
static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
ssize_t (*rw_op)(struct kiocb *, const struct iovec *,
unsigned long, loff_t);
ssize_t ret = 0;
unsigned short opcode;
if ((iocb->ki_opcode == IOCB_CMD_PREADV) ||
(iocb->ki_opcode == IOCB_CMD_PREAD)) {
rw_op = file->f_op->aio_read;
opcode = IOCB_CMD_PREADV;
} else {
rw_op = file->f_op->aio_write;
opcode = IOCB_CMD_PWRITEV;
}
/* This matches the pread()/pwrite() logic */
if (iocb->ki_pos < 0)
return -EINVAL;
do {
ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
iocb->ki_nr_segs - iocb->ki_cur_seg,
iocb->ki_pos);
if (ret > 0)
aio_advance_iovec(iocb, ret);
/* retry all partial writes. retry partial reads as long as its a
* regular file. */
} while (ret > 0 && iocb->ki_left > 0 &&
(opcode == IOCB_CMD_PWRITEV ||
(!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
/* This means we must have transferred all that we could */
/* No need to retry anymore */
if ((ret == 0) || (iocb->ki_left == 0))
ret = iocb->ki_nbytes - iocb->ki_left;
/* If we managed to write some out we return that, rather than
* the eventual error. */
if (opcode == IOCB_CMD_PWRITEV
&& ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY
&& iocb->ki_nbytes - iocb->ki_left)
ret = iocb->ki_nbytes - iocb->ki_left;
return ret;
}
static ssize_t aio_fdsync(struct kiocb *iocb)
{
struct file *file = iocb->ki_filp;
ssize_t ret = -EINVAL;
if (file->f_op->aio_fsync)
ret = file->f_op->aio_fsync(iocb, 1);
return ret;
}
static ssize_t aio_fsync(struct kiocb *iocb)
{
struct file *file = iocb->ki_filp;
ssize_t ret = -EINVAL;
if (file->f_op->aio_fsync)
ret = file->f_op->aio_fsync(iocb, 0);
return ret;
}
static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
{
ssize_t ret;
#ifdef CONFIG_COMPAT
if (compat)
ret = compat_rw_copy_check_uvector(type,
(struct compat_iovec __user *)kiocb->ki_buf,
kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
&kiocb->ki_iovec, 1);
else
#endif
ret = rw_copy_check_uvector(type,
(struct iovec __user *)kiocb->ki_buf,
kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
&kiocb->ki_iovec, 1);
if (ret < 0)
goto out;
kiocb->ki_nr_segs = kiocb->ki_nbytes;
kiocb->ki_cur_seg = 0;
/* ki_nbytes/left now reflect bytes instead of segs */
kiocb->ki_nbytes = ret;
kiocb->ki_left = ret;
ret = 0;
out:
return ret;
}
static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
{
kiocb->ki_iovec = &kiocb->ki_inline_vec;
kiocb->ki_iovec->iov_base = kiocb->ki_buf;
kiocb->ki_iovec->iov_len = kiocb->ki_left;
kiocb->ki_nr_segs = 1;
kiocb->ki_cur_seg = 0;
return 0;
}
/*
* aio_setup_iocb:
* Performs the initial checks and aio retry method
* setup for the kiocb at the time of io submission.
*/
static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
{
struct file *file = kiocb->ki_filp;
ssize_t ret = 0;
switch (kiocb->ki_opcode) {
case IOCB_CMD_PREAD:
ret = -EBADF;
if (unlikely(!(file->f_mode & FMODE_READ)))
break;
ret = -EFAULT;
if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
kiocb->ki_left)))
break;
ret = security_file_permission(file, MAY_READ);
if (unlikely(ret))
break;
ret = aio_setup_single_vector(kiocb);
if (ret)
break;
ret = -EINVAL;
if (file->f_op->aio_read)
kiocb->ki_retry = aio_rw_vect_retry;
break;
case IOCB_CMD_PWRITE:
ret = -EBADF;
if (unlikely(!(file->f_mode & FMODE_WRITE)))
break;
ret = -EFAULT;
if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
kiocb->ki_left)))
break;
ret = security_file_permission(file, MAY_WRITE);
if (unlikely(ret))
break;
ret = aio_setup_single_vector(kiocb);
if (ret)
break;
ret = -EINVAL;
if (file->f_op->aio_write)
kiocb->ki_retry = aio_rw_vect_retry;
break;
case IOCB_CMD_PREADV:
ret = -EBADF;
if (unlikely(!(file->f_mode & FMODE_READ)))
break;
ret = security_file_permission(file, MAY_READ);
if (unlikely(ret))
break;
ret = aio_setup_vectored_rw(READ, kiocb, compat);
if (ret)
break;
ret = -EINVAL;
if (file->f_op->aio_read)
kiocb->ki_retry = aio_rw_vect_retry;
break;
case IOCB_CMD_PWRITEV:
ret = -EBADF;
if (unlikely(!(file->f_mode & FMODE_WRITE)))
break;
ret = security_file_permission(file, MAY_WRITE);
if (unlikely(ret))
break;
ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
if (ret)
break;
ret = -EINVAL;
if (file->f_op->aio_write)
kiocb->ki_retry = aio_rw_vect_retry;
break;
case IOCB_CMD_FDSYNC:
ret = -EINVAL;
if (file->f_op->aio_fsync)
kiocb->ki_retry = aio_fdsync;
break;
case IOCB_CMD_FSYNC:
ret = -EINVAL;
if (file->f_op->aio_fsync)
kiocb->ki_retry = aio_fsync;
break;
default:
dprintk("EINVAL: io_submit: no operation provided\n");
ret = -EINVAL;
}
if (!kiocb->ki_retry)
return ret;
return 0;
}
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
struct iocb *iocb, struct kiocb_batch *batch,
bool compat)
{
struct kiocb *req;
struct file *file;
ssize_t ret;
/* enforce forwards compatibility on users */
if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
pr_debug("EINVAL: io_submit: reserve field set\n");
return -EINVAL;
}
/* prevent overflows */
if (unlikely(
(iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
(iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
((ssize_t)iocb->aio_nbytes < 0)
)) {
pr_debug("EINVAL: io_submit: overflow check\n");
return -EINVAL;
}
file = fget(iocb->aio_fildes);
if (unlikely(!file))
return -EBADF;
req = aio_get_req(ctx, batch); /* returns with 2 references to req */
if (unlikely(!req)) {
fput(file);
return -EAGAIN;
}
req->ki_filp = file;
if (iocb->aio_flags & IOCB_FLAG_RESFD) {
/*
* If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
* instance of the file* now. The file descriptor must be
* an eventfd() fd, and will be signaled for each completed
* event using the eventfd_signal() function.
*/
req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
if (IS_ERR(req->ki_eventfd)) {
ret = PTR_ERR(req->ki_eventfd);
req->ki_eventfd = NULL;
goto out_put_req;
}
}
ret = put_user(req->ki_key, &user_iocb->aio_key);
if (unlikely(ret)) {
dprintk("EFAULT: aio_key\n");
goto out_put_req;
}
req->ki_obj.user = user_iocb;
req->ki_user_data = iocb->aio_data;
req->ki_pos = iocb->aio_offset;
req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
req->ki_opcode = iocb->aio_lio_opcode;
ret = aio_setup_iocb(req, compat);
if (ret)
goto out_put_req;
spin_lock_irq(&ctx->ctx_lock);
/*
* We could have raced with io_destroy() and are currently holding a
* reference to ctx which should be destroyed. We cannot submit IO
* since ctx gets freed as soon as io_submit() puts its reference. The
* check here is reliable: io_destroy() sets ctx->dead before waiting
* for outstanding IO and the barrier between these two is realized by
* unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we
* increment ctx->reqs_active before checking for ctx->dead and the
* barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
* don't see ctx->dead set here, io_destroy() waits for our IO to
* finish.
*/
if (ctx->dead) {
spin_unlock_irq(&ctx->ctx_lock);
ret = -EINVAL;
goto out_put_req;
}
aio_run_iocb(req);
if (!list_empty(&ctx->run_list)) {
/* drain the run list */
while (__aio_run_iocbs(ctx))
;
}
spin_unlock_irq(&ctx->ctx_lock);
aio_put_req(req); /* drop extra ref to req */
return 0;
out_put_req:
aio_put_req(req); /* drop extra ref to req */
aio_put_req(req); /* drop i/o ref to req */
return ret;
}
long do_io_submit(aio_context_t ctx_id, long nr,
struct iocb __user *__user *iocbpp, bool compat)
{
struct kioctx *ctx;
long ret = 0;
int i = 0;
struct blk_plug plug;
struct kiocb_batch batch;
if (unlikely(nr < 0))
return -EINVAL;
if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
nr = LONG_MAX/sizeof(*iocbpp);
if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
return -EFAULT;
ctx = lookup_ioctx(ctx_id);
if (unlikely(!ctx)) {
pr_debug("EINVAL: io_submit: invalid context id\n");
return -EINVAL;
}
kiocb_batch_init(&batch, nr);
blk_start_plug(&plug);
/*
* AKPM: should this return a partial result if some of the IOs were
* successfully submitted?
*/
for (i=0; i<nr; i++) {
struct iocb __user *user_iocb;
struct iocb tmp;
if (unlikely(__get_user(user_iocb, iocbpp + i))) {
ret = -EFAULT;
break;
}
if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
ret = -EFAULT;
break;
}
ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat);
if (ret)
break;
}
blk_finish_plug(&plug);
kiocb_batch_free(ctx, &batch);
put_ioctx(ctx);
return i ? i : ret;
}
/* sys_io_submit:
* Queue the nr iocbs pointed to by iocbpp for processing. Returns
* the number of iocbs queued. May return -EINVAL if the aio_context
* specified by ctx_id is invalid, if nr is < 0, if the iocb at
* *iocbpp[0] is not properly initialized, if the operation specified
* is invalid for the file descriptor in the iocb. May fail with
* -EFAULT if any of the data structures point to invalid data. May
* fail with -EBADF if the file descriptor specified in the first
* iocb is invalid. May fail with -EAGAIN if insufficient resources
* are available to queue any iocbs. Will return 0 if nr is 0. Will
* fail with -ENOSYS if not implemented.
*/
SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
struct iocb __user * __user *, iocbpp)
{
return do_io_submit(ctx_id, nr, iocbpp, 0);
}
/* lookup_kiocb
* Finds a given iocb for cancellation.
*/
static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
u32 key)
{
struct list_head *pos;
assert_spin_locked(&ctx->ctx_lock);
/* TODO: use a hash or array, this sucks. */
list_for_each(pos, &ctx->active_reqs) {
struct kiocb *kiocb = list_kiocb(pos);
if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
return kiocb;
}
return NULL;
}
/* sys_io_cancel:
* Attempts to cancel an iocb previously passed to io_submit. If
* the operation is successfully cancelled, the resulting event is
* copied into the memory pointed to by result without being placed
* into the completion queue and 0 is returned. May fail with
* -EFAULT if any of the data structures pointed to are invalid.
* May fail with -EINVAL if aio_context specified by ctx_id is
* invalid. May fail with -EAGAIN if the iocb specified was not
* cancelled. Will fail with -ENOSYS if not implemented.
*/
SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
struct io_event __user *, result)
{
int (*cancel)(struct kiocb *iocb, struct io_event *res);
struct kioctx *ctx;
struct kiocb *kiocb;
u32 key;
int ret;
ret = get_user(key, &iocb->aio_key);
if (unlikely(ret))
return -EFAULT;
ctx = lookup_ioctx(ctx_id);
if (unlikely(!ctx))
return -EINVAL;
spin_lock_irq(&ctx->ctx_lock);
ret = -EAGAIN;
kiocb = lookup_kiocb(ctx, iocb, key);
if (kiocb && kiocb->ki_cancel) {
cancel = kiocb->ki_cancel;
kiocb->ki_users ++;
kiocbSetCancelled(kiocb);
} else
cancel = NULL;
spin_unlock_irq(&ctx->ctx_lock);
if (NULL != cancel) {
struct io_event tmp;
pr_debug("calling cancel\n");
memset(&tmp, 0, sizeof(tmp));
tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
tmp.data = kiocb->ki_user_data;
ret = cancel(kiocb, &tmp);
if (!ret) {
/* Cancellation succeeded -- copy the result
* into the user's buffer.
*/
if (copy_to_user(result, &tmp, sizeof(tmp)))
ret = -EFAULT;
}
} else
ret = -EINVAL;
put_ioctx(ctx);
return ret;
}
/* io_getevents:
* Attempts to read at least min_nr events and up to nr events from
* the completion queue for the aio_context specified by ctx_id. If
* it succeeds, the number of read events is returned. May fail with
* -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
* out of range, if timeout is out of range. May fail with -EFAULT
* if any of the memory specified is invalid. May return 0 or
* < min_nr if the timeout specified by timeout has elapsed
* before sufficient events are available, where timeout == NULL
* specifies an infinite timeout. Note that the timeout pointed to by
* timeout is relative and will be updated if not NULL and the
* operation blocks. Will fail with -ENOSYS if not implemented.
*/
SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
long, min_nr,
long, nr,
struct io_event __user *, events,
struct timespec __user *, timeout)
{
struct kioctx *ioctx = lookup_ioctx(ctx_id);
long ret = -EINVAL;
if (likely(ioctx)) {
if (likely(min_nr <= nr && min_nr >= 0))
ret = read_events(ioctx, min_nr, nr, events, timeout);
put_ioctx(ioctx);
}
asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout);
return ret;
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3573_0 |
crossvul-cpp_data_good_3486_19 | /*
* 'traps.c' handles hardware traps and faults after we have saved some
* state in 'entry.S'.
*
* SuperH version: Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf
* Copyright (C) 2000 David Howells
* Copyright (C) 2002 - 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/io.h>
#include <linux/bug.h>
#include <linux/debug_locks.h>
#include <linux/kdebug.h>
#include <linux/kexec.h>
#include <linux/limits.h>
#include <linux/sysfs.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/alignment.h>
#include <asm/fpu.h>
#include <asm/kprobes.h>
#ifdef CONFIG_CPU_SH2
# define TRAP_RESERVED_INST 4
# define TRAP_ILLEGAL_SLOT_INST 6
# define TRAP_ADDRESS_ERROR 9
# ifdef CONFIG_CPU_SH2A
# define TRAP_UBC 12
# define TRAP_FPU_ERROR 13
# define TRAP_DIVZERO_ERROR 17
# define TRAP_DIVOVF_ERROR 18
# endif
#else
#define TRAP_RESERVED_INST 12
#define TRAP_ILLEGAL_SLOT_INST 13
#endif
static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
{
unsigned long p;
int i;
printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
for (p = bottom & ~31; p < top; ) {
printk("%04lx: ", p & 0xffff);
for (i = 0; i < 8; i++, p += 4) {
unsigned int val;
if (p < bottom || p >= top)
printk(" ");
else {
if (__get_user(val, (unsigned int __user *)p)) {
printk("\n");
return;
}
printk("%08x ", val);
}
}
printk("\n");
}
}
static DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
{
static int die_counter;
oops_enter();
spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
print_modules();
show_regs(regs);
printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
task_pid_nr(current), task_stack_page(current) + 1);
if (!user_mode(regs) || in_interrupt())
dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
(unsigned long)task_stack_page(current));
notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV);
bust_spinlocks(0);
add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
oops_exit();
if (kexec_should_crash(current))
crash_kexec(regs);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
do_exit(SIGSEGV);
}
static inline void die_if_kernel(const char *str, struct pt_regs *regs,
long err)
{
if (!user_mode(regs))
die(str, regs, err);
}
/*
* try and fix up kernelspace address errors
* - userspace errors just cause EFAULT to be returned, resulting in SEGV
* - kernel/userspace interfaces cause a jump to an appropriate handler
* - other kernel errors are bad
*/
static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
{
if (!user_mode(regs)) {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
return;
}
die(str, regs, err);
}
}
static inline void sign_extend(unsigned int count, unsigned char *dst)
{
#ifdef __LITTLE_ENDIAN__
if ((count == 1) && dst[0] & 0x80) {
dst[1] = 0xff;
dst[2] = 0xff;
dst[3] = 0xff;
}
if ((count == 2) && dst[1] & 0x80) {
dst[2] = 0xff;
dst[3] = 0xff;
}
#else
if ((count == 1) && dst[3] & 0x80) {
dst[2] = 0xff;
dst[1] = 0xff;
dst[0] = 0xff;
}
if ((count == 2) && dst[2] & 0x80) {
dst[1] = 0xff;
dst[0] = 0xff;
}
#endif
}
static struct mem_access user_mem_access = {
copy_from_user,
copy_to_user,
};
/*
* handle an instruction that does an unaligned memory access by emulating the
* desired behaviour
* - note that PC _may not_ point to the faulting instruction
* (if that instruction is in a branch delay slot)
* - return 0 if emulation okay, -EFAULT on existential error
*/
static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
struct mem_access *ma)
{
int ret, index, count;
unsigned long *rm, *rn;
unsigned char *src, *dst;
unsigned char __user *srcu, *dstu;
index = (instruction>>8)&15; /* 0x0F00 */
rn = ®s->regs[index];
index = (instruction>>4)&15; /* 0x00F0 */
rm = ®s->regs[index];
count = 1<<(instruction&3);
switch (count) {
case 1: inc_unaligned_byte_access(); break;
case 2: inc_unaligned_word_access(); break;
case 4: inc_unaligned_dword_access(); break;
case 8: inc_unaligned_multi_access(); break;
}
ret = -EFAULT;
switch (instruction>>12) {
case 0: /* mov.[bwl] to/from memory via r0+rn */
if (instruction & 8) {
/* from memory */
srcu = (unsigned char __user *)*rm;
srcu += regs->regs[0];
dst = (unsigned char *)rn;
*(unsigned long *)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
#endif
if (ma->from(dst, srcu, count))
goto fetch_fault;
sign_extend(count, dst);
} else {
/* to memory */
src = (unsigned char *)rm;
#if !defined(__LITTLE_ENDIAN__)
src += 4-count;
#endif
dstu = (unsigned char __user *)*rn;
dstu += regs->regs[0];
if (ma->to(dstu, src, count))
goto fetch_fault;
}
ret = 0;
break;
case 1: /* mov.l Rm,@(disp,Rn) */
src = (unsigned char*) rm;
dstu = (unsigned char __user *)*rn;
dstu += (instruction&0x000F)<<2;
if (ma->to(dstu, src, 4))
goto fetch_fault;
ret = 0;
break;
case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
if (instruction & 4)
*rn -= count;
src = (unsigned char*) rm;
dstu = (unsigned char __user *)*rn;
#if !defined(__LITTLE_ENDIAN__)
src += 4-count;
#endif
if (ma->to(dstu, src, count))
goto fetch_fault;
ret = 0;
break;
case 5: /* mov.l @(disp,Rm),Rn */
srcu = (unsigned char __user *)*rm;
srcu += (instruction & 0x000F) << 2;
dst = (unsigned char *)rn;
*(unsigned long *)dst = 0;
if (ma->from(dst, srcu, 4))
goto fetch_fault;
ret = 0;
break;
case 6: /* mov.[bwl] from memory, possibly with post-increment */
srcu = (unsigned char __user *)*rm;
if (instruction & 4)
*rm += count;
dst = (unsigned char*) rn;
*(unsigned long*)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
#endif
if (ma->from(dst, srcu, count))
goto fetch_fault;
sign_extend(count, dst);
ret = 0;
break;
case 8:
switch ((instruction&0xFF00)>>8) {
case 0x81: /* mov.w R0,@(disp,Rn) */
src = (unsigned char *) ®s->regs[0];
#if !defined(__LITTLE_ENDIAN__)
src += 2;
#endif
dstu = (unsigned char __user *)*rm; /* called Rn in the spec */
dstu += (instruction & 0x000F) << 1;
if (ma->to(dstu, src, 2))
goto fetch_fault;
ret = 0;
break;
case 0x85: /* mov.w @(disp,Rm),R0 */
srcu = (unsigned char __user *)*rm;
srcu += (instruction & 0x000F) << 1;
dst = (unsigned char *) ®s->regs[0];
*(unsigned long *)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 2;
#endif
if (ma->from(dst, srcu, 2))
goto fetch_fault;
sign_extend(2, dst);
ret = 0;
break;
}
break;
}
return ret;
fetch_fault:
/* Argh. Address not only misaligned but also non-existent.
* Raise an EFAULT and see if it's trapped
*/
die_if_no_fixup("Fault in unaligned fixup", regs, 0);
return -EFAULT;
}
/*
* emulate the instruction in the delay slot
* - fetches the instruction from PC+2
*/
static inline int handle_delayslot(struct pt_regs *regs,
insn_size_t old_instruction,
struct mem_access *ma)
{
insn_size_t instruction;
void __user *addr = (void __user *)(regs->pc +
instruction_size(old_instruction));
if (copy_from_user(&instruction, addr, sizeof(instruction))) {
/* the instruction-fetch faulted */
if (user_mode(regs))
return -EFAULT;
/* kernel */
die("delay-slot-insn faulting in handle_unaligned_delayslot",
regs, 0);
}
return handle_unaligned_ins(instruction, regs, ma);
}
/*
* handle an instruction that does an unaligned memory access
* - have to be careful of branch delay-slot instructions that fault
* SH3:
* - if the branch would be taken PC points to the branch
* - if the branch would not be taken, PC points to delay-slot
* SH4:
* - PC always points to delayed branch
* - return 0 if handled, -EFAULT if failed (may not return if in kernel)
*/
/* Macros to determine offset from current PC for branch instructions */
/* Explicit type coercion is used to force sign extension where needed */
#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
struct mem_access *ma, int expected,
unsigned long address)
{
u_int rm;
int ret, index;
/*
* XXX: We can't handle mixed 16/32-bit instructions yet
*/
if (instruction_size(instruction) != 2)
return -EINVAL;
index = (instruction>>8)&15; /* 0x0F00 */
rm = regs->regs[index];
/*
* Log the unexpected fixups, and then pass them on to perf.
*
* We intentionally don't report the expected cases to perf as
* otherwise the trapped I/O case will skew the results too much
* to be useful.
*/
if (!expected) {
unaligned_fixups_notify(current, instruction, regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
regs, address);
}
ret = -EFAULT;
switch (instruction&0xF000) {
case 0x0000:
if (instruction==0x000B) {
/* rts */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc = regs->pr;
}
else if ((instruction&0x00FF)==0x0023) {
/* braf @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc += rm + 4;
}
else if ((instruction&0x00FF)==0x0003) {
/* bsrf @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc += rm + 4;
}
}
else {
/* mov.[bwl] to/from memory via r0+rn */
goto simple;
}
break;
case 0x1000: /* mov.l Rm,@(disp,Rn) */
goto simple;
case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
goto simple;
case 0x4000:
if ((instruction&0x00FF)==0x002B) {
/* jmp @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc = rm;
}
else if ((instruction&0x00FF)==0x000B) {
/* jsr @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc = rm;
}
}
else {
/* mov.[bwl] to/from memory via r0+rn */
goto simple;
}
break;
case 0x5000: /* mov.l @(disp,Rm),Rn */
goto simple;
case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
goto simple;
case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
switch (instruction&0x0F00) {
case 0x0100: /* mov.w R0,@(disp,Rm) */
goto simple;
case 0x0500: /* mov.w @(disp,Rm),R0 */
goto simple;
case 0x0B00: /* bf lab - no delayslot*/
break;
case 0x0F00: /* bf/s lab */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
if ((regs->sr & 0x00000001) != 0)
regs->pc += 4; /* next after slot */
else
#endif
regs->pc += SH_PC_8BIT_OFFSET(instruction);
}
break;
case 0x0900: /* bt lab - no delayslot */
break;
case 0x0D00: /* bt/s lab */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
if ((regs->sr & 0x00000001) == 0)
regs->pc += 4; /* next after slot */
else
#endif
regs->pc += SH_PC_8BIT_OFFSET(instruction);
}
break;
}
break;
case 0xA000: /* bra label */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc += SH_PC_12BIT_OFFSET(instruction);
break;
case 0xB000: /* bsr label */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc += SH_PC_12BIT_OFFSET(instruction);
}
break;
}
return ret;
/* handle non-delay-slot instruction */
simple:
ret = handle_unaligned_ins(instruction, regs, ma);
if (ret==0)
regs->pc += instruction_size(instruction);
return ret;
}
/*
* Handle various address error exceptions:
* - instruction address error:
* misaligned PC
* PC >= 0x80000000 in user mode
* - data address error (read and write)
* misaligned data access
* access to >= 0x80000000 is user mode
* Unfortuntaly we can't distinguish between instruction address error
* and data address errors caused by read accesses.
*/
asmlinkage void do_address_error(struct pt_regs *regs,
unsigned long writeaccess,
unsigned long address)
{
unsigned long error_code = 0;
mm_segment_t oldfs;
siginfo_t info;
insn_size_t instruction;
int tmp;
/* Intentional ifdef */
#ifdef CONFIG_CPU_HAS_SR_RB
error_code = lookup_exception_vector();
#endif
oldfs = get_fs();
if (user_mode(regs)) {
int si_code = BUS_ADRERR;
unsigned int user_action;
local_irq_enable();
inc_unaligned_user_access();
set_fs(USER_DS);
if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1),
sizeof(instruction))) {
set_fs(oldfs);
goto uspace_segv;
}
set_fs(oldfs);
/* shout about userspace fixups */
unaligned_fixups_notify(current, instruction, regs);
user_action = unaligned_user_action();
if (user_action & UM_FIXUP)
goto fixup;
if (user_action & UM_SIGNAL)
goto uspace_segv;
else {
/* ignore */
regs->pc += instruction_size(instruction);
return;
}
fixup:
/* bad PC is not something we can fix */
if (regs->pc & 1) {
si_code = BUS_ADRALN;
goto uspace_segv;
}
set_fs(USER_DS);
tmp = handle_unaligned_access(instruction, regs,
&user_mem_access, 0,
address);
set_fs(oldfs);
if (tmp == 0)
return; /* sorted */
uspace_segv:
printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
"access (PC %lx PR %lx)\n", current->comm, regs->pc,
regs->pr);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
} else {
inc_unaligned_kernel_access();
if (regs->pc & 1)
die("unaligned program counter", regs, error_code);
set_fs(KERNEL_DS);
if (copy_from_user(&instruction, (void __user *)(regs->pc),
sizeof(instruction))) {
/* Argh. Fault on the instruction itself.
This should never happen non-SMP
*/
set_fs(oldfs);
die("insn faulting in do_address_error", regs, 0);
}
unaligned_fixups_notify(current, instruction, regs);
handle_unaligned_access(instruction, regs, &user_mem_access,
0, address);
set_fs(oldfs);
}
}
#ifdef CONFIG_SH_DSP
/*
* SH-DSP support gerg@snapgear.com.
*/
int is_dsp_inst(struct pt_regs *regs)
{
unsigned short inst = 0;
/*
* Safe guard if DSP mode is already enabled or we're lacking
* the DSP altogether.
*/
if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
return 0;
get_user(inst, ((unsigned short *) regs->pc));
inst &= 0xf000;
/* Check for any type of DSP or support instruction */
if ((inst == 0xf000) || (inst == 0x4000))
return 1;
return 0;
}
#else
#define is_dsp_inst(regs) (0)
#endif /* CONFIG_SH_DSP */
#ifdef CONFIG_CPU_SH2A
asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
siginfo_t info;
switch (r4) {
case TRAP_DIVZERO_ERROR:
info.si_code = FPE_INTDIV;
break;
case TRAP_DIVOVF_ERROR:
info.si_code = FPE_INTOVF;
break;
}
force_sig_info(SIGFPE, &info, current);
}
#endif
asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
unsigned long error_code;
struct task_struct *tsk = current;
#ifdef CONFIG_SH_FPU_EMU
unsigned short inst = 0;
int err;
get_user(inst, (unsigned short*)regs->pc);
err = do_fpu_inst(inst, regs);
if (!err) {
regs->pc += instruction_size(inst);
return;
}
/* not a FPU inst. */
#endif
#ifdef CONFIG_SH_DSP
/* Check if it's a DSP instruction */
if (is_dsp_inst(regs)) {
/* Enable DSP mode, and restart instruction. */
regs->sr |= SR_DSP;
/* Save DSP mode */
tsk->thread.dsp_status.status |= SR_DSP;
return;
}
#endif
error_code = lookup_exception_vector();
local_irq_enable();
force_sig(SIGILL, tsk);
die_if_no_fixup("reserved instruction", regs, error_code);
}
#ifdef CONFIG_SH_FPU_EMU
static int emulate_branch(unsigned short inst, struct pt_regs *regs)
{
/*
* bfs: 8fxx: PC+=d*2+4;
* bts: 8dxx: PC+=d*2+4;
* bra: axxx: PC+=D*2+4;
* bsr: bxxx: PC+=D*2+4 after PR=PC+4;
* braf:0x23: PC+=Rn*2+4;
* bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
* jmp: 4x2b: PC=Rn;
* jsr: 4x0b: PC=Rn after PR=PC+4;
* rts: 000b: PC=PR;
*/
if (((inst & 0xf000) == 0xb000) || /* bsr */
((inst & 0xf0ff) == 0x0003) || /* bsrf */
((inst & 0xf0ff) == 0x400b)) /* jsr */
regs->pr = regs->pc + 4;
if ((inst & 0xfd00) == 0x8d00) { /* bfs, bts */
regs->pc += SH_PC_8BIT_OFFSET(inst);
return 0;
}
if ((inst & 0xe000) == 0xa000) { /* bra, bsr */
regs->pc += SH_PC_12BIT_OFFSET(inst);
return 0;
}
if ((inst & 0xf0df) == 0x0003) { /* braf, bsrf */
regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
return 0;
}
if ((inst & 0xf0df) == 0x400b) { /* jmp, jsr */
regs->pc = regs->regs[(inst & 0x0f00) >> 8];
return 0;
}
if ((inst & 0xffff) == 0x000b) { /* rts */
regs->pc = regs->pr;
return 0;
}
return 1;
}
#endif
asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
unsigned long inst;
struct task_struct *tsk = current;
if (kprobe_handle_illslot(regs->pc) == 0)
return;
#ifdef CONFIG_SH_FPU_EMU
get_user(inst, (unsigned short *)regs->pc + 1);
if (!do_fpu_inst(inst, regs)) {
get_user(inst, (unsigned short *)regs->pc);
if (!emulate_branch(inst, regs))
return;
/* fault in branch.*/
}
/* not a FPU inst. */
#endif
inst = lookup_exception_vector();
local_irq_enable();
force_sig(SIGILL, tsk);
die_if_no_fixup("illegal slot instruction", regs, inst);
}
asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
long ex;
ex = lookup_exception_vector();
die_if_kernel("exception", regs, ex);
}
void __cpuinit per_cpu_trap_init(void)
{
extern void *vbr_base;
/* NOTE: The VBR value should be at P1
(or P2, virtural "fixed" address space).
It's definitely should not in physical address. */
asm volatile("ldc %0, vbr"
: /* no output */
: "r" (&vbr_base)
: "memory");
/* disable exception blocking now when the vbr has been setup */
clear_bl_bit();
}
void *set_exception_table_vec(unsigned int vec, void *handler)
{
extern void *exception_handling_table[];
void *old_handler;
old_handler = exception_handling_table[vec];
exception_handling_table[vec] = handler;
return old_handler;
}
void __init trap_init(void)
{
set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
defined(CONFIG_SH_FPU_EMU)
/*
* For SH-4 lacking an FPU, treat floating point instructions as
* reserved. They'll be handled in the math-emu case, or faulted on
* otherwise.
*/
set_exception_table_evt(0x800, do_reserved_inst);
set_exception_table_evt(0x820, do_illegal_slot_inst);
#elif defined(CONFIG_SH_FPU)
set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
#endif
#ifdef CONFIG_CPU_SH2
set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
#endif
#ifdef CONFIG_CPU_SH2A
set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
#ifdef CONFIG_SH_FPU
set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler);
#endif
#endif
#ifdef TRAP_UBC
set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
#endif
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
unsigned long stack;
if (!tsk)
tsk = current;
if (tsk == current)
sp = (unsigned long *)current_stack_pointer;
else
sp = (unsigned long *)tsk->thread.sp;
stack = (unsigned long)sp;
dump_mem("Stack: ", stack, THREAD_SIZE +
(unsigned long)task_stack_page(tsk));
show_trace(tsk, sp, NULL);
}
void dump_stack(void)
{
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3486_19 |
crossvul-cpp_data_bad_947_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO GGGGG RRRR IIIII FFFFF Y Y %
% MM MM O O G R R I F Y Y %
% M M M O O G GGG RRRR I FFF Y %
% M M O O G G R R I F Y %
% M M OOO GGGG R R IIIII F Y %
% %
% %
% MagickWand Module Methods %
% %
% Software Design %
% Cristy %
% March 2000 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use the mogrify program to resize an image, blur, crop, despeckle, dither,
% draw on, flip, join, re-sample, and much more. This tool is similiar to
% convert except that the original image file is overwritten (unless you
% change the file suffix with the -format option) with any changes you
% request.
%
*/
/*
Include declarations.
*/
#include "wand/studio.h"
#include "wand/MagickWand.h"
#include "wand/magick-wand-private.h"
#include "wand/mogrify-private.h"
#include "magick/blob-private.h"
#include "magick/color-private.h"
#include "magick/image-private.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/thread-private.h"
#include "magick/string-private.h"
#include "magick/timer-private.h"
#include "magick/utility-private.h"
#if defined(MAGICKCORE_HAVE_UTIME_H)
#include <utime.h>
#endif
/*
Define declarations.
*/
#define UndefinedCompressionQuality 0UL
/*
Constant declaration.
*/
static const char
MogrifyBackgroundColor[] = "#fff", /* white */
MogrifyBorderColor[] = "#dfdfdf", /* sRGB gray */
MogrifyMatteColor[] = "#bdbdbd"; /* slightly darker gray */
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C o m m a n d G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickCommandGenesis() applies image processing options to an image as
% prescribed by command line options.
%
% The format of the MagickCommandGenesis method is:
%
% MagickBooleanType MagickCommandGenesis(ImageInfo *image_info,
% MagickCommand command,int argc,char **argv,char **metadata,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o command: Choose from ConvertImageCommand, IdentifyImageCommand,
% MogrifyImageCommand, CompositeImageCommand, CompareImageCommand,
% ConjureImageCommand, StreamImageCommand, ImportImageCommand,
% DisplayImageCommand, or AnimateImageCommand.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o metadata: any metadata is returned here.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MagickCommandGenesis(ImageInfo *image_info,
MagickCommand command,int argc,char **argv,char **metadata,
ExceptionInfo *exception)
{
char
*option;
double
duration,
serial;
MagickBooleanType
concurrent,
regard_warnings,
status;
register ssize_t
i;
size_t
iterations,
number_threads;
ssize_t
n;
(void) setlocale(LC_ALL,"");
(void) setlocale(LC_NUMERIC,"C");
concurrent=MagickFalse;
duration=(-1.0);
iterations=1;
status=MagickTrue;
regard_warnings=MagickFalse;
for (i=1; i < (ssize_t) (argc-1); i++)
{
option=argv[i];
if ((strlen(option) == 1) || ((*option != '-') && (*option != '+')))
continue;
if (LocaleCompare("bench",option+1) == 0)
iterations=StringToUnsignedLong(argv[++i]);
if (LocaleCompare("concurrent",option+1) == 0)
concurrent=MagickTrue;
if (LocaleCompare("debug",option+1) == 0)
(void) SetLogEventMask(argv[++i]);
if (LocaleCompare("distribute-cache",option+1) == 0)
{
DistributePixelCacheServer(StringToInteger(argv[++i]),exception);
exit(0);
}
if (LocaleCompare("duration",option+1) == 0)
duration=StringToDouble(argv[++i],(char **) NULL);
if (LocaleCompare("regard-warnings",option+1) == 0)
regard_warnings=MagickTrue;
}
if (iterations == 1)
{
status=command(image_info,argc,argv,metadata,exception);
if (exception->severity != UndefinedException)
{
if ((exception->severity > ErrorException) ||
(regard_warnings != MagickFalse))
status=MagickFalse;
CatchException(exception);
}
if ((metadata != (char **) NULL) && (*metadata != (char *) NULL))
{
(void) fputs(*metadata,stdout);
*metadata=DestroyString(*metadata);
}
return(status);
}
number_threads=GetOpenMPMaximumThreads();
serial=0.0;
for (n=1; n <= (ssize_t) number_threads; n++)
{
double
e,
parallel,
user_time;
TimerInfo
*timer;
(void) SetMagickResourceLimit(ThreadResource,(MagickSizeType) n);
timer=AcquireTimerInfo();
if (concurrent == MagickFalse)
{
for (i=0; i < (ssize_t) iterations; i++)
{
if (status == MagickFalse)
continue;
if (duration > 0)
{
if (GetElapsedTime(timer) > duration)
continue;
(void) ContinueTimer(timer);
}
status=command(image_info,argc,argv,metadata,exception);
if (exception->severity != UndefinedException)
{
if ((exception->severity > ErrorException) ||
(regard_warnings != MagickFalse))
status=MagickFalse;
CatchException(exception);
}
if ((metadata != (char **) NULL) && (*metadata != (char *) NULL))
{
(void) fputs(*metadata,stdout);
*metadata=DestroyString(*metadata);
}
}
}
else
{
SetOpenMPNested(1);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp parallel for shared(status)
#endif
for (i=0; i < (ssize_t) iterations; i++)
{
if (status == MagickFalse)
continue;
if (duration > 0)
{
if (GetElapsedTime(timer) > duration)
continue;
(void) ContinueTimer(timer);
}
status=command(image_info,argc,argv,metadata,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_MagickCommandGenesis)
#endif
{
if (exception->severity != UndefinedException)
{
if ((exception->severity > ErrorException) ||
(regard_warnings != MagickFalse))
status=MagickFalse;
CatchException(exception);
}
if ((metadata != (char **) NULL) && (*metadata != (char *) NULL))
{
(void) fputs(*metadata,stdout);
*metadata=DestroyString(*metadata);
}
}
}
}
user_time=GetUserTime(timer);
parallel=GetElapsedTime(timer);
e=1.0;
if (n == 1)
serial=parallel;
else
e=((1.0/(1.0/((serial/(serial+parallel))+(1.0-(serial/(serial+parallel)))/
(double) n)))-(1.0/(double) n))/(1.0-1.0/(double) n);
(void) FormatLocaleFile(stderr,
" Performance[%.20g]: %.20gi %0.3fips %0.6fe %0.6fu %lu:%02lu.%03lu\n",
(double) n,(double) iterations,(double) iterations/parallel,e,user_time,
(unsigned long) (parallel/60.0),(unsigned long) floor(fmod(parallel,
60.0)),(unsigned long) (1000.0*(parallel-floor(parallel))+0.5));
timer=DestroyTimerInfo(timer);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImage() applies simple single image processing options to a single
% image that may be part of a large list, but also handles any 'region'
% image handling.
%
% The image in the list may be modified in three different ways...
%
% * directly modified (EG: -negate, -gamma, -level, -annotate, -draw),
% * replaced by a new image (EG: -spread, -resize, -rotate, -morphology)
% * replace by a list of images (only the -separate option!)
%
% In each case the result is returned into the list, and a pointer to the
% modified image (last image added if replaced by a list of images) is
% returned.
%
% ASIDE: The -crop is present but restricted to non-tile single image crops
%
% This means if all the images are being processed (such as by
% MogrifyImages(), next image to be processed will be as per the pointer
% (*image)->next. Also the image list may grow as a result of some specific
% operations but as images are never merged or deleted, it will never shrink
% in length. Typically the list will remain the same length.
%
% WARNING: As the image pointed to may be replaced, the first image in the
% list may also change. GetFirstImageInList() should be used by caller if
% they wish return the Image pointer to the first image in list.
%
%
% The format of the MogrifyImage method is:
%
% MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc,
% const char **argv,Image **image)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Image *GetImageCache(const ImageInfo *image_info,const char *path,
ExceptionInfo *exception)
{
char
key[MaxTextExtent];
ExceptionInfo
*sans_exception;
Image
*image;
ImageInfo
*read_info;
/*
Read an image into a image cache if not already present. Return the image
that is in the cache under that filename.
*/
(void) FormatLocaleString(key,MaxTextExtent,"cache:%s",path);
sans_exception=AcquireExceptionInfo();
image=(Image *) GetImageRegistry(ImageRegistryType,key,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (image != (Image *) NULL)
return(image);
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(read_info->filename,path,MaxTextExtent);
image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
if (image != (Image *) NULL)
(void) SetImageRegistry(ImageRegistryType,key,image,exception);
return(image);
}
static inline MagickBooleanType IsPathWritable(const char *path)
{
if (IsPathAccessible(path) == MagickFalse)
return(MagickFalse);
if (access_utf8(path,W_OK) != 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType MonitorProgress(const char *text,
const MagickOffsetType offset,const MagickSizeType extent,
void *wand_unused(client_data))
{
char
message[MaxTextExtent],
tag[MaxTextExtent];
const char
*locale_message;
register char
*p;
wand_unreferenced(client_data);
if ((extent <= 1) || (offset < 0) || (offset >= (MagickOffsetType) extent))
return(MagickTrue);
if ((offset != (MagickOffsetType) (extent-1)) && ((offset % 50) != 0))
return(MagickTrue);
(void) CopyMagickString(tag,text,MaxTextExtent);
p=strrchr(tag,'/');
if (p != (char *) NULL)
*p='\0';
(void) FormatLocaleString(message,MaxTextExtent,"Monitor/%s",tag);
locale_message=GetLocaleMessage(message);
if (locale_message == message)
locale_message=tag;
if (p == (char *) NULL)
(void) FormatLocaleFile(stderr,"%s: %ld of %lu, %02ld%% complete\r",
locale_message,(long) offset,(unsigned long) extent,(long)
(100L*offset/(extent-1)));
else
(void) FormatLocaleFile(stderr,"%s[%s]: %ld of %lu, %02ld%% complete\r",
locale_message,p+1,(long) offset,(unsigned long) extent,(long)
(100L*offset/(extent-1)));
if (offset == (MagickOffsetType) (extent-1))
(void) FormatLocaleFile(stderr,"\n");
(void) fflush(stderr);
return(MagickTrue);
}
static Image *SparseColorOption(const Image *image,const ChannelType channel,
const SparseColorMethod method,const char *arguments,
const MagickBooleanType color_from_image,ExceptionInfo *exception)
{
ChannelType
channels;
char
token[MaxTextExtent];
const char
*p;
double
*sparse_arguments;
Image
*sparse_image;
MagickBooleanType
error;
MagickPixelPacket
color;
register size_t
x;
size_t
number_arguments,
number_colors;
/*
SparseColorOption() parses the complex -sparse-color argument into an an
array of floating point values then calls SparseColorImage(). Argument is
a complex mix of floating-point pixel coodinates, and color specifications
(or direct floating point numbers). The number of floats needed to
represent a color varies depending on the current channel setting.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Limit channels according to image - and add up number of color channel.
*/
channels=channel;
if (image->colorspace != CMYKColorspace)
channels=(ChannelType) (channels & ~IndexChannel); /* no index channel */
if (image->matte == MagickFalse)
channels=(ChannelType) (channels & ~OpacityChannel); /* no alpha channel */
number_colors=0;
if ((channels & RedChannel) != 0)
number_colors++;
if ((channels & GreenChannel) != 0)
number_colors++;
if ((channels & BlueChannel) != 0)
number_colors++;
if ((channels & IndexChannel) != 0)
number_colors++;
if ((channels & OpacityChannel) != 0)
number_colors++;
/*
Read string, to determine number of arguments needed,
*/
p=arguments;
x=0;
while( *p != '\0' )
{
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == ',' ) continue;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
if ( color_from_image ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "`%s': %s", "sparse-color",
"Color arg given, when colors are coming from image");
return( (Image *) NULL);
}
x += number_colors; /* color argument */
}
else {
x++; /* floating point argument */
}
}
error=MagickTrue;
if ( color_from_image ) {
/* just the control points are being given */
error = ( x % 2 != 0 ) ? MagickTrue : MagickFalse;
number_arguments=(x/2)*(2+number_colors);
}
else {
/* control points and color values */
error = ( x % (2+number_colors) != 0 ) ? MagickTrue : MagickFalse;
number_arguments=x;
}
if ( error ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "`%s': %s", "sparse-color",
"Invalid number of Arguments");
return( (Image *) NULL);
}
/* Allocate and fill in the floating point arguments */
sparse_arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*sparse_arguments));
if (sparse_arguments == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),ResourceLimitError,
" MemoryAllocationFailed\n""%s","SparseColorOption");
return( (Image *) NULL);
}
(void) memset(sparse_arguments,0,number_arguments*
sizeof(*sparse_arguments));
p=arguments;
x=0;
while( *p != '\0' && x < number_arguments ) {
/* X coordinate */
token[0]=',';
while ( token[0] == ',' )
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "`%s': %s", "sparse-color",
"Color found, instead of X-coord");
error = MagickTrue;
break;
}
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
/* Y coordinate */
token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "`%s': %s", "sparse-color",
"Color found, instead of Y-coord");
error = MagickTrue;
break;
}
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
/* color values for this control point */
#if 0
if ( (color_from_image ) {
/* get color from image */
/* HOW??? */
}
else
#endif
{
/* color name or function given in string argument */
token[0]=',';
while ( token[0] == ',' )
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
/* Color string given */
(void) QueryMagickColor(token,&color,exception);
if ( channels & RedChannel )
sparse_arguments[x++] = QuantumScale*color.red;
if ( channels & GreenChannel )
sparse_arguments[x++] = QuantumScale*color.green;
if ( channels & BlueChannel )
sparse_arguments[x++] = QuantumScale*color.blue;
if ( channels & IndexChannel )
sparse_arguments[x++] = QuantumScale*color.index;
if ( channels & OpacityChannel )
sparse_arguments[x++] = QuantumScale*color.opacity;
}
else {
/* Colors given as a set of floating point values - experimental */
/* NB: token contains the first floating point value to use! */
if ( channels & RedChannel ) {
while ( token[0] == ',' )
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ( channels & GreenChannel ) {
while ( token[0] == ',' )
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ( channels & BlueChannel ) {
while ( token[0] == ',' )
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ( channels & IndexChannel ) {
while ( token[0] == ',' )
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ( channels & OpacityChannel ) {
while ( token[0] == ',' )
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
}
}
}
if ( number_arguments != x && !error ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
" InvalidArgument","`%s': %s","sparse-color","Argument Parsing Error");
sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments);
return( (Image *) NULL);
}
if ( error )
return( (Image *) NULL);
/* Call the Interpolation function with the parsed arguments */
sparse_image=SparseColorImage(image,channels,method,number_arguments,
sparse_arguments,exception);
sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments);
return( sparse_image );
}
WandExport MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc,
const char **argv,Image **image,ExceptionInfo *exception)
{
ChannelType
channel;
const char
*format,
*option;
DrawInfo
*draw_info;
GeometryInfo
geometry_info;
Image
*region_image;
ImageInfo
*mogrify_info;
MagickStatusType
status;
MagickPixelPacket
fill;
MagickStatusType
flags;
QuantizeInfo
*quantize_info;
RectangleInfo
geometry,
region_geometry;
register ssize_t
i;
/*
Initialize method variables.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (argc < 0)
return(MagickTrue);
mogrify_info=CloneImageInfo(image_info);
draw_info=CloneDrawInfo(mogrify_info,(DrawInfo *) NULL);
quantize_info=AcquireQuantizeInfo(mogrify_info);
SetGeometryInfo(&geometry_info);
GetMagickPixelPacket(*image,&fill);
SetMagickPixelPacket(*image,&(*image)->background_color,(IndexPacket *) NULL,
&fill);
channel=mogrify_info->channel;
format=GetImageOption(mogrify_info,"format");
SetGeometry(*image,®ion_geometry);
region_image=NewImageList();
/*
Transmogrify the image.
*/
for (i=0; i < (ssize_t) argc; i++)
{
Image
*mogrify_image;
ssize_t
count;
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=MagickMax(ParseCommandOption(MagickCommandOptions,MagickFalse,option),
0L);
if ((i+count) >= (ssize_t) argc)
break;
status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception);
mogrify_image=(Image *) NULL;
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adaptive-blur",option+1) == 0)
{
/*
Adaptive blur image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=AdaptiveBlurImageChannel(*image,channel,
geometry_info.rho,geometry_info.sigma,exception);
break;
}
if (LocaleCompare("adaptive-resize",option+1) == 0)
{
/*
Adaptive resize image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=AdaptiveResizeImage(*image,geometry.width,
geometry.height,exception);
break;
}
if (LocaleCompare("adaptive-sharpen",option+1) == 0)
{
/*
Adaptive sharpen image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=AdaptiveSharpenImageChannel(*image,channel,
geometry_info.rho,geometry_info.sigma,exception);
break;
}
if (LocaleCompare("affine",option+1) == 0)
{
/*
Affine matrix.
*/
if (*option == '+')
{
GetAffineMatrix(&draw_info->affine);
break;
}
(void) ParseAffineGeometry(argv[i+1],&draw_info->affine,exception);
break;
}
if (LocaleCompare("alpha",option+1) == 0)
{
AlphaChannelType
alpha_type;
(void) SyncImageSettings(mogrify_info,*image);
alpha_type=(AlphaChannelType) ParseCommandOption(MagickAlphaOptions,
MagickFalse,argv[i+1]);
(void) SetImageAlphaChannel(*image,alpha_type);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("annotate",option+1) == 0)
{
char
*text,
geometry[MaxTextExtent];
/*
Annotate image.
*/
(void) SyncImageSettings(mogrify_info,*image);
SetGeometryInfo(&geometry_info);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
text=InterpretImageProperties(mogrify_info,*image,argv[i+2]);
InheritException(exception,&(*image)->exception);
if (text == (char *) NULL)
break;
(void) CloneString(&draw_info->text,text);
text=DestroyString(text);
(void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f",
geometry_info.xi,geometry_info.psi);
(void) CloneString(&draw_info->geometry,geometry);
draw_info->affine.sx=cos(DegreesToRadians(
fmod(geometry_info.rho,360.0)));
draw_info->affine.rx=sin(DegreesToRadians(
fmod(geometry_info.rho,360.0)));
draw_info->affine.ry=(-sin(DegreesToRadians(
fmod(geometry_info.sigma,360.0))));
draw_info->affine.sy=cos(DegreesToRadians(
fmod(geometry_info.sigma,360.0)));
(void) AnnotateImage(*image,draw_info);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("antialias",option+1) == 0)
{
draw_info->stroke_antialias=(*option == '-') ? MagickTrue :
MagickFalse;
draw_info->text_antialias=(*option == '-') ? MagickTrue :
MagickFalse;
break;
}
if (LocaleCompare("auto-gamma",option+1) == 0)
{
/*
Auto Adjust Gamma of image based on its mean
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) AutoGammaImageChannel(*image,channel);
break;
}
if (LocaleCompare("auto-level",option+1) == 0)
{
/*
Perfectly Normalize (max/min stretch) the image
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) AutoLevelImageChannel(*image,channel);
break;
}
if (LocaleCompare("auto-orient",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=AutoOrientImage(*image,(*image)->orientation,
exception);
break;
}
break;
}
case 'b':
{
if (LocaleCompare("black-threshold",option+1) == 0)
{
/*
Black threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) BlackThresholdImageChannel(*image,channel,argv[i+1],
exception);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("blue-shift",option+1) == 0)
{
/*
Blue shift image.
*/
(void) SyncImageSettings(mogrify_info,*image);
geometry_info.rho=1.5;
if (*option == '-')
flags=ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=BlueShiftImage(*image,geometry_info.rho,exception);
break;
}
if (LocaleCompare("blur",option+1) == 0)
{
/*
Gaussian blur image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=BlurImageChannel(*image,channel,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("border",option+1) == 0)
{
/*
Surround image with a border of solid color.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=BorderImage(*image,&geometry,exception);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
if (*option == '+')
{
(void) QueryColorDatabase(MogrifyBorderColor,
&draw_info->border_color,exception);
break;
}
(void) QueryColorDatabase(argv[i+1],&draw_info->border_color,
exception);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
(void) QueryColorDatabase(argv[i+1],&draw_info->undercolor,
exception);
break;
}
if (LocaleCompare("brightness-contrast",option+1) == 0)
{
double
brightness,
contrast;
GeometryInfo
geometry_info;
MagickStatusType
flags;
/*
Brightness / contrast image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
brightness=geometry_info.rho;
contrast=0.0;
if ((flags & SigmaValue) != 0)
contrast=geometry_info.sigma;
(void) BrightnessContrastImageChannel(*image,channel,brightness,
contrast);
InheritException(exception,&(*image)->exception);
break;
}
break;
}
case 'c':
{
if (LocaleCompare("canny",option+1) == 0)
{
/*
Detect edges in the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.10;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.30;
if ((flags & PercentValue) != 0)
{
geometry_info.xi/=100.0;
geometry_info.psi/=100.0;
}
mogrify_image=CannyEdgeImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception);
break;
}
if (LocaleCompare("cdl",option+1) == 0)
{
char
*color_correction_collection;
/*
Color correct with a color decision list.
*/
(void) SyncImageSettings(mogrify_info,*image);
color_correction_collection=FileToString(argv[i+1],~0UL,exception);
if (color_correction_collection == (char *) NULL)
break;
(void) ColorDecisionListImage(*image,color_correction_collection);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("channel",option+1) == 0)
{
if (*option == '+')
channel=DefaultChannels;
else
channel=(ChannelType) ParseChannelOption(argv[i+1]);
break;
}
if (LocaleCompare("charcoal",option+1) == 0)
{
/*
Charcoal image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=CharcoalImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("chop",option+1) == 0)
{
/*
Chop the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ChopImage(*image,&geometry,exception);
break;
}
if (LocaleCompare("clamp",option+1) == 0)
{
/*
Clamp image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ClampImageChannel(*image,channel);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("clip",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
{
(void) SetImageClipMask(*image,(Image *) NULL);
InheritException(exception,&(*image)->exception);
break;
}
(void) ClipImage(*image);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("clip-mask",option+1) == 0)
{
CacheView
*mask_view;
Image
*mask_image;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
y;
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
{
/*
Remove a mask.
*/
(void) SetImageMask(*image,(Image *) NULL);
InheritException(exception,&(*image)->exception);
break;
}
/*
Set the image mask.
FUTURE: This Should Be a SetImageAlphaChannel() call, Or two.
*/
mask_image=GetImageCache(mogrify_info,argv[i+1],exception);
if (mask_image == (Image *) NULL)
break;
if (SetImageStorageClass(mask_image,DirectClass) == MagickFalse)
return(MagickFalse);
mask_view=AcquireAuthenticCacheView(mask_image,exception);
for (y=0; y < (ssize_t) mask_image->rows; y++)
{
q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) mask_image->columns; x++)
{
if (mask_image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(GetPixelIntensity(mask_image,
q)));
SetPixelRed(q,GetPixelOpacity(q));
SetPixelGreen(q,GetPixelOpacity(q));
SetPixelBlue(q,GetPixelOpacity(q));
q++;
}
if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse)
break;
}
mask_view=DestroyCacheView(mask_view);
mask_image->matte=MagickTrue;
(void) SetImageClipMask(*image,mask_image);
mask_image=DestroyImage(mask_image);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("clip-path",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) ClipImagePath(*image,argv[i+1],*option == '-' ? MagickTrue :
MagickFalse);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("colorize",option+1) == 0)
{
/*
Colorize the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=ColorizeImage(*image,argv[i+1],draw_info->fill,
exception);
break;
}
if (LocaleCompare("color-matrix",option+1) == 0)
{
KernelInfo
*kernel;
(void) SyncImageSettings(mogrify_info,*image);
kernel=AcquireKernelInfo(argv[i+1]);
if (kernel == (KernelInfo *) NULL)
break;
mogrify_image=ColorMatrixImage(*image,kernel,exception);
kernel=DestroyKernelInfo(kernel);
break;
}
if (LocaleCompare("colors",option+1) == 0)
{
/*
Reduce the number of colors in the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
quantize_info->number_colors=StringToUnsignedLong(argv[i+1]);
if (quantize_info->number_colors == 0)
break;
if (((*image)->storage_class == DirectClass) ||
(*image)->colors > quantize_info->number_colors)
(void) QuantizeImage(quantize_info,*image);
else
(void) CompressImageColormap(*image);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
ColorspaceType
colorspace;
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
{
(void) TransformImageColorspace(*image,sRGBColorspace);
InheritException(exception,&(*image)->exception);
break;
}
colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
(void) TransformImageColorspace(*image,colorspace);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("connected-components",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=ConnectedComponentsImage(*image,
StringToInteger(argv[i+1]),exception);
break;
}
if (LocaleCompare("contrast",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) ContrastImage(*image,(*option == '-') ? MagickTrue :
MagickFalse);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("contrast-stretch",option+1) == 0)
{
double
black_point,
white_point;
MagickStatusType
flags;
/*
Contrast stretch image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
black_point=geometry_info.rho;
white_point=(flags & SigmaValue) != 0 ? geometry_info.sigma :
black_point;
if ((flags & PercentValue) != 0)
{
black_point*=(double) (*image)->columns*(*image)->rows/100.0;
white_point*=(double) (*image)->columns*(*image)->rows/100.0;
}
white_point=(MagickRealType) (*image)->columns*(*image)->rows-
white_point;
(void) ContrastStretchImageChannel(*image,channel,black_point,
white_point);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("convolve",option+1) == 0)
{
double
gamma;
KernelInfo
*kernel_info;
register ssize_t
j;
size_t
extent;
(void) SyncImageSettings(mogrify_info,*image);
kernel_info=AcquireKernelInfo(argv[i+1]);
if (kernel_info == (KernelInfo *) NULL)
break;
extent=kernel_info->width*kernel_info->height;
gamma=0.0;
for (j=0; j < (ssize_t) extent; j++)
gamma+=kernel_info->values[j];
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
for (j=0; j < (ssize_t) extent; j++)
kernel_info->values[j]*=gamma;
mogrify_image=MorphologyImage(*image,CorrelateMorphology,1,
kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("crop",option+1) == 0)
{
/*
Crop a image to a smaller size
*/
(void) SyncImageSettings(mogrify_info,*image);
#if 0
flags=ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
if (((geometry.width != 0) || (geometry.height != 0)) &&
((flags & XValue) == 0) && ((flags & YValue) == 0))
break;
#endif
#if 0
mogrify_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception);
mogrify_image->next = mogrify_image->previous = (Image *) NULL;
(void) TransformImage(&mogrify_image,argv[i+1],(char *) NULL);
InheritException(exception,&mogrify_image->exception);
#else
mogrify_image=CropImageToTiles(*image,argv[i+1],exception);
#endif
break;
}
if (LocaleCompare("cycle",option+1) == 0)
{
/*
Cycle an image colormap.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) CycleColormapImage(*image,(ssize_t) StringToLong(argv[i+1]));
InheritException(exception,&(*image)->exception);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("decipher",option+1) == 0)
{
StringInfo
*passkey;
/*
Decipher pixels.
*/
(void) SyncImageSettings(mogrify_info,*image);
passkey=FileToStringInfo(argv[i+1],~0UL,exception);
if (passkey != (StringInfo *) NULL)
{
(void) PasskeyDecipherImage(*image,passkey,exception);
passkey=DestroyStringInfo(passkey);
}
break;
}
if (LocaleCompare("density",option+1) == 0)
{
/*
Set image density.
*/
(void) CloneString(&draw_info->density,argv[i+1]);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
{
(void) SetImageDepth(*image,MAGICKCORE_QUANTUM_DEPTH);
break;
}
(void) SetImageDepth(*image,StringToUnsignedLong(argv[i+1]));
break;
}
if (LocaleCompare("deskew",option+1) == 0)
{
double
threshold;
/*
Straighten the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
threshold=40.0*QuantumRange/100.0;
else
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
mogrify_image=DeskewImage(*image,threshold,exception);
break;
}
if (LocaleCompare("despeckle",option+1) == 0)
{
/*
Reduce the speckles within an image.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=DespeckleImage(*image,exception);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
(void) CloneString(&draw_info->server_name,argv[i+1]);
break;
}
if (LocaleCompare("distort",option+1) == 0)
{
char
*args,
token[MaxTextExtent];
const char
*p;
DistortImageMethod
method;
double
*arguments;
register ssize_t
x;
size_t
number_arguments;
/*
Distort image.
*/
(void) SyncImageSettings(mogrify_info,*image);
method=(DistortImageMethod) ParseCommandOption(MagickDistortOptions,
MagickFalse,argv[i+1]);
if (method == ResizeDistortion)
{
double
resize_args[2];
/*
Resize distortion.
*/
(void) ParseRegionGeometry(*image,argv[i+2],&geometry,
exception);
resize_args[0]=(double) geometry.width;
resize_args[1]=(double) geometry.height;
mogrify_image=DistortImage(*image,method,(size_t) 2,
resize_args,MagickTrue,exception);
break;
}
args=InterpretImageProperties(mogrify_info,*image,argv[i+2]);
InheritException(exception,&(*image)->exception);
if (args == (char *) NULL)
break;
p=(char *) args;
for (x=0; *p != '\0'; x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
}
number_arguments=(size_t) x;
arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*arguments));
if (arguments == (double *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,
"MemoryAllocationFailed",(*image)->filename);
(void) memset(arguments,0,number_arguments*sizeof(*arguments));
p=(char *) args;
for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
arguments[x]=StringToDouble(token,(char **) NULL);
}
args=DestroyString(args);
mogrify_image=DistortImage(*image,method,number_arguments,arguments,
(*option == '+') ? MagickTrue : MagickFalse,exception);
arguments=(double *) RelinquishMagickMemory(arguments);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
quantize_info->dither=MagickFalse;
break;
}
quantize_info->dither=MagickTrue;
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,argv[i+1]);
if (quantize_info->dither_method == NoDitherMethod)
quantize_info->dither=MagickFalse;
break;
}
if (LocaleCompare("draw",option+1) == 0)
{
/*
Draw image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) CloneString(&draw_info->primitive,argv[i+1]);
(void) DrawImage(*image,draw_info);
InheritException(exception,&(*image)->exception);
break;
}
break;
}
case 'e':
{
if (LocaleCompare("edge",option+1) == 0)
{
/*
Enhance edges in the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=EdgeImage(*image,geometry_info.rho,exception);
break;
}
if (LocaleCompare("emboss",option+1) == 0)
{
/*
Gaussian embossen image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=EmbossImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("encipher",option+1) == 0)
{
StringInfo
*passkey;
/*
Encipher pixels.
*/
(void) SyncImageSettings(mogrify_info,*image);
passkey=FileToStringInfo(argv[i+1],~0UL,exception);
if (passkey != (StringInfo *) NULL)
{
(void) PasskeyEncipherImage(*image,passkey,exception);
passkey=DestroyStringInfo(passkey);
}
break;
}
if (LocaleCompare("encoding",option+1) == 0)
{
(void) CloneString(&draw_info->encoding,argv[i+1]);
break;
}
if (LocaleCompare("enhance",option+1) == 0)
{
/*
Enhance image.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=EnhanceImage(*image,exception);
break;
}
if (LocaleCompare("equalize",option+1) == 0)
{
/*
Equalize image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) EqualizeImageChannel(*image,channel);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("evaluate",option+1) == 0)
{
double
constant;
MagickEvaluateOperator
op;
(void) SyncImageSettings(mogrify_info,*image);
op=(MagickEvaluateOperator) ParseCommandOption(
MagickEvaluateOptions,MagickFalse,argv[i+1]);
constant=StringToDoubleInterval(argv[i+2],(double) QuantumRange+
1.0);
(void) EvaluateImageChannel(*image,channel,op,constant,exception);
break;
}
if (LocaleCompare("extent",option+1) == 0)
{
/*
Set the image extent.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
if (geometry.width == 0)
geometry.width=(*image)->columns;
if (geometry.height == 0)
geometry.height=(*image)->rows;
mogrify_image=ExtentImage(*image,&geometry,exception);
break;
}
break;
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
if (*option == '+')
{
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
break;
}
(void) CloneString(&draw_info->family,argv[i+1]);
break;
}
if (LocaleCompare("features",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageArtifact(*image,"identify:features");
break;
}
(void) SetImageArtifact(*image,"identify:features",argv[i+1]);
(void) SetImageArtifact(*image,"verbose","true");
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
ExceptionInfo
*sans;
GetMagickPixelPacket(*image,&fill);
if (*option == '+')
{
(void) QueryMagickColor("none",&fill,exception);
(void) QueryColorDatabase("none",&draw_info->fill,exception);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
break;
}
sans=AcquireExceptionInfo();
(void) QueryMagickColor(argv[i+1],&fill,sans);
status=QueryColorDatabase(argv[i+1],&draw_info->fill,sans);
sans=DestroyExceptionInfo(sans);
if (status == MagickFalse)
draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1],
exception);
break;
}
if (LocaleCompare("flip",option+1) == 0)
{
/*
Flip image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=FlipImage(*image,exception);
break;
}
if (LocaleCompare("floodfill",option+1) == 0)
{
MagickPixelPacket
target;
/*
Floodfill image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParsePageGeometry(*image,argv[i+1],&geometry,exception);
(void) QueryMagickColor(argv[i+2],&target,exception);
(void) FloodfillPaintImage(*image,channel,draw_info,&target,
geometry.x,geometry.y,*option == '-' ? MagickFalse : MagickTrue);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("flop",option+1) == 0)
{
/*
Flop image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=FlopImage(*image,exception);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
if (*option == '+')
{
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
break;
}
(void) CloneString(&draw_info->font,argv[i+1]);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
format=argv[i+1];
break;
}
if (LocaleCompare("frame",option+1) == 0)
{
FrameInfo
frame_info;
/*
Surround image with an ornamental border.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
frame_info.width=geometry.width;
frame_info.height=geometry.height;
frame_info.outer_bevel=geometry.x;
frame_info.inner_bevel=geometry.y;
frame_info.x=(ssize_t) frame_info.width;
frame_info.y=(ssize_t) frame_info.height;
frame_info.width=(*image)->columns+2*frame_info.width;
frame_info.height=(*image)->rows+2*frame_info.height;
mogrify_image=FrameImage(*image,&frame_info,exception);
break;
}
if (LocaleCompare("function",option+1) == 0)
{
char
*arguments,
token[MaxTextExtent];
const char
*p;
double
*parameters;
MagickFunction
function;
register ssize_t
x;
size_t
number_parameters;
/*
Function Modify Image Values
*/
(void) SyncImageSettings(mogrify_info,*image);
function=(MagickFunction) ParseCommandOption(MagickFunctionOptions,
MagickFalse,argv[i+1]);
arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2]);
InheritException(exception,&(*image)->exception);
if (arguments == (char *) NULL)
break;
p=(char *) arguments;
for (x=0; *p != '\0'; x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
}
number_parameters=(size_t) x;
parameters=(double *) AcquireQuantumMemory(number_parameters,
sizeof(*parameters));
if (parameters == (double *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,
"MemoryAllocationFailed",(*image)->filename);
(void) memset(parameters,0,number_parameters*
sizeof(*parameters));
p=(char *) arguments;
for (x=0; (x < (ssize_t) number_parameters) && (*p != '\0'); x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
parameters[x]=StringToDouble(token,(char **) NULL);
}
arguments=DestroyString(arguments);
(void) FunctionImageChannel(*image,channel,function,
number_parameters,parameters,exception);
parameters=(double *) RelinquishMagickMemory(parameters);
break;
}
break;
}
case 'g':
{
if (LocaleCompare("gamma",option+1) == 0)
{
/*
Gamma image.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
(*image)->gamma=StringToDouble(argv[i+1],(char **) NULL);
else
{
if (strchr(argv[i+1],',') != (char *) NULL)
(void) GammaImage(*image,argv[i+1]);
else
(void) GammaImageChannel(*image,channel,
StringToDouble(argv[i+1],(char **) NULL));
InheritException(exception,&(*image)->exception);
}
break;
}
if ((LocaleCompare("gaussian-blur",option+1) == 0) ||
(LocaleCompare("gaussian",option+1) == 0))
{
/*
Gaussian blur image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=GaussianBlurImageChannel(*image,channel,
geometry_info.rho,geometry_info.sigma,exception);
break;
}
if (LocaleCompare("geometry",option+1) == 0)
{
/*
Record Image offset, Resize last image.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
{
if ((*image)->geometry != (char *) NULL)
(*image)->geometry=DestroyString((*image)->geometry);
break;
}
flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
(void) CloneString(&(*image)->geometry,argv[i+1]);
else
mogrify_image=ResizeImage(*image,geometry.width,geometry.height,
(*image)->filter,(*image)->blur,exception);
break;
}
if (LocaleCompare("gravity",option+1) == 0)
{
if (*option == '+')
{
draw_info->gravity=UndefinedGravity;
break;
}
draw_info->gravity=(GravityType) ParseCommandOption(
MagickGravityOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("grayscale",option+1) == 0)
{
PixelIntensityMethod
method;
(void) SyncImagesSettings(mogrify_info,*image);
method=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,argv[i+1]);
(void) GrayscaleImage(*image,method);
InheritException(exception,&(*image)->exception);
break;
}
break;
}
case 'h':
{
if (LocaleCompare("highlight-color",option+1) == 0)
{
(void) SetImageArtifact(*image,"compare:highlight-color",argv[i+1]);
break;
}
if (LocaleCompare("hough-lines",option+1) == 0)
{
/*
Identify lines in the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
if ((flags & XiValue) == 0)
geometry_info.xi=40;
mogrify_image=HoughLineImage(*image,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,(size_t) geometry_info.xi,exception);
break;
}
break;
}
case 'i':
{
if (LocaleCompare("identify",option+1) == 0)
{
char
*text;
(void) SyncImageSettings(mogrify_info,*image);
if (format == (char *) NULL)
{
(void) IdentifyImage(*image,stdout,mogrify_info->verbose);
InheritException(exception,&(*image)->exception);
break;
}
text=InterpretImageProperties(mogrify_info,*image,format);
InheritException(exception,&(*image)->exception);
if (text == (char *) NULL)
break;
(void) fputs(text,stdout);
text=DestroyString(text);
break;
}
if (LocaleCompare("implode",option+1) == 0)
{
/*
Implode image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=ImplodeImage(*image,geometry_info.rho,exception);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("0",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->interline_spacing=geometry_info.rho;
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("0",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->interword_spacing=geometry_info.rho;
break;
}
if (LocaleCompare("interpolative-resize",option+1) == 0)
{
/*
Resize image using 'point sampled' interpolation
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=InterpolativeResizeImage(*image,geometry.width,
geometry.height,(*image)->interpolate,exception);
break;
}
break;
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("0",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->kerning=geometry_info.rho;
break;
}
if (LocaleCompare("kuwahara",option+1) == 0)
{
/*
Edge preserving blur.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho-0.5;
mogrify_image=KuwaharaImageChannel(*image,channel,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("lat",option+1) == 0)
{
/*
Local adaptive threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
mogrify_image=AdaptiveThresholdImage(*image,(size_t)
geometry_info.rho,(size_t) geometry_info.sigma,(ssize_t)
geometry_info.xi,exception);
break;
}
if (LocaleCompare("level",option+1) == 0)
{
MagickRealType
black_point,
gamma,
white_point;
MagickStatusType
flags;
/*
Parse levels.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
black_point=geometry_info.rho;
white_point=(MagickRealType) QuantumRange;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
gamma=1.0;
if ((flags & XiValue) != 0)
gamma=geometry_info.xi;
if ((flags & PercentValue) != 0)
{
black_point*=(MagickRealType) (QuantumRange/100.0);
white_point*=(MagickRealType) (QuantumRange/100.0);
}
if ((flags & SigmaValue) == 0)
white_point=(MagickRealType) QuantumRange-black_point;
if ((*option == '+') || ((flags & AspectValue) != 0))
(void) LevelizeImageChannel(*image,channel,black_point,
white_point,gamma);
else
(void) LevelImageChannel(*image,channel,black_point,white_point,
gamma);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("level-colors",option+1) == 0)
{
char
token[MaxTextExtent];
const char
*p;
MagickPixelPacket
black_point,
white_point;
p=(const char *) argv[i+1];
GetNextToken(p,&p,MaxTextExtent,token); /* get black point color */
if ((isalpha((int) *token) != 0) || ((*token == '#') != 0))
(void) QueryMagickColor(token,&black_point,exception);
else
(void) QueryMagickColor("#000000",&black_point,exception);
if (isalpha((int) token[0]) || (token[0] == '#'))
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == '\0')
white_point=black_point; /* set everything to that color */
else
{
if ((isalpha((int) *token) == 0) && ((*token == '#') == 0))
GetNextToken(p,&p,MaxTextExtent,token); /* Get white point color. */
if ((isalpha((int) *token) != 0) || ((*token == '#') != 0))
(void) QueryMagickColor(token,&white_point,exception);
else
(void) QueryMagickColor("#ffffff",&white_point,exception);
}
(void) LevelColorsImageChannel(*image,channel,&black_point,
&white_point,*option == '+' ? MagickTrue : MagickFalse);
break;
}
if (LocaleCompare("linear-stretch",option+1) == 0)
{
double
black_point,
white_point;
MagickStatusType
flags;
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
black_point=geometry_info.rho;
white_point=(MagickRealType) (*image)->columns*(*image)->rows;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
black_point*=(double) (*image)->columns*(*image)->rows/100.0;
white_point*=(double) (*image)->columns*(*image)->rows/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(MagickRealType) (*image)->columns*(*image)->rows-
black_point;
(void) LinearStretchImage(*image,black_point,white_point);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("linewidth",option+1) == 0)
{
draw_info->stroke_width=StringToDouble(argv[i+1],(char **) NULL);
break;
}
if (LocaleCompare("liquid-rescale",option+1) == 0)
{
/*
Liquid rescale image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
if ((flags & XValue) == 0)
geometry.x=1;
if ((flags & YValue) == 0)
geometry.y=0;
mogrify_image=LiquidRescaleImage(*image,geometry.width,
geometry.height,1.0*geometry.x,1.0*geometry.y,exception);
break;
}
if (LocaleCompare("local-contrast",option+1) == 0)
{
MagickStatusType
flags;
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & RhoValue) == 0)
geometry_info.rho=10;
if ((flags & SigmaValue) == 0)
geometry_info.sigma=12.5;
mogrify_image=LocalContrastImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("lowlight-color",option+1) == 0)
{
(void) SetImageArtifact(*image,"compare:lowlight-color",argv[i+1]);
break;
}
break;
}
case 'm':
{
if (LocaleCompare("magnify",option+1) == 0)
{
/*
Double image size.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=MagnifyImage(*image,exception);
break;
}
if (LocaleCompare("map",option+1) == 0)
{
Image
*remap_image;
/*
Transform image colors to match this set of colors.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
break;
remap_image=GetImageCache(mogrify_info,argv[i+1],exception);
if (remap_image == (Image *) NULL)
break;
(void) RemapImage(quantize_info,*image,remap_image);
InheritException(exception,&(*image)->exception);
remap_image=DestroyImage(remap_image);
break;
}
if (LocaleCompare("mask",option+1) == 0)
{
Image
*mask;
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
{
/*
Remove a mask.
*/
(void) SetImageMask(*image,(Image *) NULL);
InheritException(exception,&(*image)->exception);
break;
}
/*
Set the image mask.
*/
mask=GetImageCache(mogrify_info,argv[i+1],exception);
if (mask == (Image *) NULL)
break;
(void) SetImageMask(*image,mask);
mask=DestroyImage(mask);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("matte",option+1) == 0)
{
(void) SetImageAlphaChannel(*image,(*option == '-') ?
SetAlphaChannel : DeactivateAlphaChannel );
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("mean-shift",option+1) == 0)
{
/*
Delineate arbitrarily shaped clusters in the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
if ((flags & XiValue) == 0)
geometry_info.xi=0.10*QuantumRange;
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
mogrify_image=MeanShiftImage(*image,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,(size_t) geometry_info.xi,exception);
break;
}
if (LocaleCompare("median",option+1) == 0)
{
/*
Median filter image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=StatisticImageChannel(*image,channel,MedianStatistic,
(size_t) geometry_info.rho,(size_t) geometry_info.rho,exception);
break;
}
if (LocaleCompare("mode",option+1) == 0)
{
/*
Mode image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=StatisticImageChannel(*image,channel,ModeStatistic,
(size_t) geometry_info.rho,(size_t) geometry_info.rho,exception);
break;
}
if (LocaleCompare("modulate",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) ModulateImage(*image,argv[i+1]);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("moments",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageArtifact(*image,"identify:moments");
break;
}
(void) SetImageArtifact(*image,"identify:moments",argv[i+1]);
(void) SetImageArtifact(*image,"verbose","true");
break;
}
if (LocaleCompare("monitor",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageProgressMonitor(*image,
(MagickProgressMonitor) NULL,(void *) NULL);
break;
}
(void) SetImageProgressMonitor(*image,MonitorProgress,
(void *) NULL);
break;
}
if (LocaleCompare("monochrome",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) SetImageType(*image,BilevelType);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("morphology",option+1) == 0)
{
char
token[MaxTextExtent];
const char
*p;
KernelInfo
*kernel;
MorphologyMethod
method;
ssize_t
iterations;
/*
Morphological Image Operation
*/
(void) SyncImageSettings(mogrify_info,*image);
p=argv[i+1];
GetNextToken(p,&p,MaxTextExtent,token);
method=(MorphologyMethod) ParseCommandOption(
MagickMorphologyOptions,MagickFalse,token);
iterations=1L;
GetNextToken(p,&p,MaxTextExtent,token);
if ((*p == ':') || (*p == ','))
GetNextToken(p,&p,MaxTextExtent,token);
if ((*p != '\0'))
iterations=(ssize_t) StringToLong(p);
kernel=AcquireKernelInfo(argv[i+2]);
if (kernel == (KernelInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnabletoParseKernel","morphology");
status=MagickFalse;
break;
}
mogrify_image=MorphologyImageChannel(*image,channel,method,
iterations,kernel,exception);
kernel=DestroyKernelInfo(kernel);
break;
}
if (LocaleCompare("motion-blur",option+1) == 0)
{
/*
Motion blur image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=MotionBlurImageChannel(*image,channel,
geometry_info.rho,geometry_info.sigma,geometry_info.xi,exception);
break;
}
break;
}
case 'n':
{
if (LocaleCompare("negate",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) NegateImageChannel(*image,channel,*option == '+' ?
MagickTrue : MagickFalse);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("noise",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '-')
{
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=StatisticImageChannel(*image,channel,
NonpeakStatistic,(size_t) geometry_info.rho,(size_t)
geometry_info.sigma,exception);
}
else
{
NoiseType
noise;
noise=(NoiseType) ParseCommandOption(MagickNoiseOptions,
MagickFalse,argv[i+1]);
mogrify_image=AddNoiseImageChannel(*image,channel,noise,
exception);
}
break;
}
if (LocaleCompare("normalize",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) NormalizeImageChannel(*image,channel);
InheritException(exception,&(*image)->exception);
break;
}
break;
}
case 'o':
{
if (LocaleCompare("opaque",option+1) == 0)
{
MagickPixelPacket
target;
(void) SyncImageSettings(mogrify_info,*image);
(void) QueryMagickColor(argv[i+1],&target,exception);
(void) OpaquePaintImageChannel(*image,channel,&target,&fill,
*option == '-' ? MagickFalse : MagickTrue);
break;
}
if (LocaleCompare("ordered-dither",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) OrderedPosterizeImageChannel(*image,channel,argv[i+1],
exception);
break;
}
break;
}
case 'p':
{
if (LocaleCompare("paint",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=OilPaintImage(*image,geometry_info.rho,exception);
break;
}
if (LocaleCompare("pen",option+1) == 0)
{
if (*option == '+')
{
(void) QueryColorDatabase("none",&draw_info->fill,exception);
break;
}
(void) QueryColorDatabase(argv[i+1],&draw_info->fill,exception);
break;
}
if (LocaleCompare("perceptible",option+1) == 0)
{
/*
Perceptible image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) PerceptibleImageChannel(*image,channel,StringToDouble(
argv[i+1],(char **) NULL));
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("12",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->pointsize=geometry_info.rho;
break;
}
if (LocaleCompare("polaroid",option+1) == 0)
{
double
angle;
RandomInfo
*random_info;
/*
Simulate a Polaroid picture.
*/
(void) SyncImageSettings(mogrify_info,*image);
random_info=AcquireRandomInfo();
angle=22.5*(GetPseudoRandomValue(random_info)-0.5);
random_info=DestroyRandomInfo(random_info);
if (*option == '-')
{
SetGeometryInfo(&geometry_info);
flags=ParseGeometry(argv[i+1],&geometry_info);
angle=geometry_info.rho;
}
mogrify_image=PolaroidImage(*image,draw_info,angle,exception);
break;
}
if (LocaleCompare("posterize",option+1) == 0)
{
/*
Posterize image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) PosterizeImage(*image,StringToUnsignedLong(argv[i+1]),
quantize_info->dither);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("preview",option+1) == 0)
{
PreviewType
preview_type;
/*
Preview image.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
preview_type=UndefinedPreview;
else
preview_type=(PreviewType) ParseCommandOption(
MagickPreviewOptions,MagickFalse,argv[i+1]);
mogrify_image=PreviewImage(*image,preview_type,exception);
break;
}
if (LocaleCompare("profile",option+1) == 0)
{
const char
*name;
const StringInfo
*profile;
Image
*profile_image;
ImageInfo
*profile_info;
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
{
/*
Remove a profile from the image.
*/
(void) ProfileImage(*image,argv[i+1],(const unsigned char *)
NULL,0,MagickTrue);
InheritException(exception,&(*image)->exception);
break;
}
/*
Associate a profile with the image.
*/
profile_info=CloneImageInfo(mogrify_info);
profile=GetImageProfile(*image,"iptc");
if (profile != (StringInfo *) NULL)
profile_info->profile=(void *) CloneStringInfo(profile);
profile_image=GetImageCache(profile_info,argv[i+1],exception);
profile_info=DestroyImageInfo(profile_info);
if (profile_image == (Image *) NULL)
{
StringInfo
*profile;
profile_info=CloneImageInfo(mogrify_info);
(void) CopyMagickString(profile_info->filename,argv[i+1],
MaxTextExtent);
profile=FileToStringInfo(profile_info->filename,~0UL,exception);
if (profile != (StringInfo *) NULL)
{
(void) SetImageInfo(profile_info,0,exception);
(void) ProfileImage(*image,profile_info->magick,
GetStringInfoDatum(profile),(size_t)
GetStringInfoLength(profile),MagickFalse);
profile=DestroyStringInfo(profile);
}
profile_info=DestroyImageInfo(profile_info);
break;
}
ResetImageProfileIterator(profile_image);
name=GetNextImageProfile(profile_image);
while (name != (const char *) NULL)
{
profile=GetImageProfile(profile_image,name);
if (profile != (StringInfo *) NULL)
(void) ProfileImage(*image,name,GetStringInfoDatum(profile),
(size_t) GetStringInfoLength(profile),MagickFalse);
name=GetNextImageProfile(profile_image);
}
profile_image=DestroyImage(profile_image);
break;
}
break;
}
case 'q':
{
if (LocaleCompare("quantize",option+1) == 0)
{
if (*option == '+')
{
quantize_info->colorspace=UndefinedColorspace;
break;
}
quantize_info->colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("radial-blur",option+1) == 0 ||
LocaleCompare("rotational-blur",option+1) == 0)
{
/*
Radial blur image.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=RotationalBlurImageChannel(*image,channel,
StringToDouble(argv[i+1],(char **) NULL),exception);
break;
}
if (LocaleCompare("raise",option+1) == 0)
{
/*
Surround image with a raise of solid color.
*/
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
(void) RaiseImage(*image,&geometry,*option == '-' ? MagickTrue :
MagickFalse);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("random-threshold",option+1) == 0)
{
/*
Threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) RandomThresholdImageChannel(*image,channel,argv[i+1],
exception);
break;
}
if (LocaleCompare("recolor",option+1) == 0)
{
KernelInfo
*kernel;
(void) SyncImageSettings(mogrify_info,*image);
kernel=AcquireKernelInfo(argv[i+1]);
if (kernel == (KernelInfo *) NULL)
break;
mogrify_image=ColorMatrixImage(*image,kernel,exception);
kernel=DestroyKernelInfo(kernel);
break;
}
if (LocaleCompare("region",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
if (region_image != (Image *) NULL)
{
/*
Composite region.
*/
(void) CompositeImage(region_image,region_image->matte !=
MagickFalse ? CopyCompositeOp : OverCompositeOp,*image,
region_geometry.x,region_geometry.y);
InheritException(exception,®ion_image->exception);
*image=DestroyImage(*image);
*image=region_image;
region_image=(Image *) NULL;
}
if (*option == '+')
break;
/*
Apply transformations to a selected region of the image.
*/
(void) ParseGravityGeometry(*image,argv[i+1],®ion_geometry,
exception);
mogrify_image=CropImage(*image,®ion_geometry,exception);
if (mogrify_image == (Image *) NULL)
break;
region_image=(*image);
*image=mogrify_image;
mogrify_image=(Image *) NULL;
break;
}
if (LocaleCompare("render",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
draw_info->render=(*option == '+') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("remap",option+1) == 0)
{
Image
*remap_image;
/*
Transform image colors to match this set of colors.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
break;
remap_image=GetImageCache(mogrify_info,argv[i+1],exception);
if (remap_image == (Image *) NULL)
break;
(void) RemapImage(quantize_info,*image,remap_image);
InheritException(exception,&(*image)->exception);
remap_image=DestroyImage(remap_image);
break;
}
if (LocaleCompare("repage",option+1) == 0)
{
if (*option == '+')
{
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
break;
}
(void) ResetImagePage(*image,argv[i+1]);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("resample",option+1) == 0)
{
/*
Resample image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=ResampleImage(*image,geometry_info.rho,
geometry_info.sigma,(*image)->filter,(*image)->blur,exception);
break;
}
if (LocaleCompare("resize",option+1) == 0)
{
/*
Resize image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ResizeImage(*image,geometry.width,geometry.height,
(*image)->filter,(*image)->blur,exception);
break;
}
if (LocaleCompare("roll",option+1) == 0)
{
/*
Roll image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
if ((flags & PercentValue) != 0)
{
geometry.x*=(double) (*image)->columns/100.0;
geometry.y*=(double) (*image)->rows/100.0;
}
mogrify_image=RollImage(*image,geometry.x,geometry.y,exception);
break;
}
if (LocaleCompare("rotate",option+1) == 0)
{
char
*geometry;
/*
Check for conditional image rotation.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (strchr(argv[i+1],'>') != (char *) NULL)
if ((*image)->columns <= (*image)->rows)
break;
if (strchr(argv[i+1],'<') != (char *) NULL)
if ((*image)->columns >= (*image)->rows)
break;
/*
Rotate image.
*/
geometry=ConstantString(argv[i+1]);
(void) SubstituteString(&geometry,">","");
(void) SubstituteString(&geometry,"<","");
(void) ParseGeometry(geometry,&geometry_info);
geometry=DestroyString(geometry);
mogrify_image=RotateImage(*image,geometry_info.rho,exception);
break;
}
break;
}
case 's':
{
if (LocaleCompare("sample",option+1) == 0)
{
/*
Sample image with pixel replication.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=SampleImage(*image,geometry.width,geometry.height,
exception);
break;
}
if (LocaleCompare("scale",option+1) == 0)
{
/*
Resize image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ScaleImage(*image,geometry.width,geometry.height,
exception);
break;
}
if (LocaleCompare("selective-blur",option+1) == 0)
{
/*
Selectively blur pixels within a contrast threshold.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
mogrify_image=SelectiveBlurImageChannel(*image,channel,
geometry_info.rho,geometry_info.sigma,geometry_info.xi,exception);
break;
}
if (LocaleCompare("separate",option+1) == 0)
{
/*
Break channels into separate images.
WARNING: This can generate multiple images!
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=SeparateImages(*image,channel,exception);
break;
}
if (LocaleCompare("sepia-tone",option+1) == 0)
{
double
threshold;
/*
Sepia-tone image.
*/
(void) SyncImageSettings(mogrify_info,*image);
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
mogrify_image=SepiaToneImage(*image,threshold,exception);
break;
}
if (LocaleCompare("segment",option+1) == 0)
{
/*
Segment image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
(void) SegmentImage(*image,(*image)->colorspace,
mogrify_info->verbose,geometry_info.rho,geometry_info.sigma);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("set",option+1) == 0)
{
char
*value;
/*
Set image option.
*/
if (*option == '+')
{
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
(void) DeleteImageRegistry(argv[i+1]+9);
else
if (LocaleNCompare(argv[i+1],"option:",7) == 0)
{
(void) DeleteImageOption(mogrify_info,argv[i+1]+7);
(void) DeleteImageArtifact(*image,argv[i+1]+7);
}
else
(void) DeleteImageProperty(*image,argv[i+1]);
break;
}
value=InterpretImageProperties(mogrify_info,*image,argv[i+2]);
InheritException(exception,&(*image)->exception);
if (value == (char *) NULL)
break;
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
(void) SetImageRegistry(StringRegistryType,argv[i+1]+9,value,
exception);
else
if (LocaleNCompare(argv[i+1],"option:",7) == 0)
{
(void) SetImageOption(image_info,argv[i+1]+7,value);
(void) SetImageOption(mogrify_info,argv[i+1]+7,value);
(void) SetImageArtifact(*image,argv[i+1]+7,value);
}
else
(void) SetImageProperty(*image,argv[i+1],value);
value=DestroyString(value);
break;
}
if (LocaleCompare("shade",option+1) == 0)
{
/*
Shade image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=ShadeImage(*image,(*option == '-') ? MagickTrue :
MagickFalse,geometry_info.rho,geometry_info.sigma,exception);
break;
}
if (LocaleCompare("shadow",option+1) == 0)
{
/*
Shadow image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=4.0;
if ((flags & PsiValue) == 0)
geometry_info.psi=4.0;
mogrify_image=ShadowImage(*image,geometry_info.rho,
geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5),(ssize_t)
ceil(geometry_info.psi-0.5),exception);
break;
}
if (LocaleCompare("sharpen",option+1) == 0)
{
/*
Sharpen image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=SharpenImageChannel(*image,channel,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("shave",option+1) == 0)
{
/*
Shave the image edges.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ShaveImage(*image,&geometry,exception);
break;
}
if (LocaleCompare("shear",option+1) == 0)
{
/*
Shear image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=ShearImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("sigmoidal-contrast",option+1) == 0)
{
/*
Sigmoidal non-linearity contrast control.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=(double) QuantumRange/2.0;
if ((flags & PercentValue) != 0)
geometry_info.sigma=(double) QuantumRange*geometry_info.sigma/
100.0;
(void) SigmoidalContrastImageChannel(*image,channel,
(*option == '-') ? MagickTrue : MagickFalse,geometry_info.rho,
geometry_info.sigma);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("sketch",option+1) == 0)
{
/*
Sketch image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=SketchImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,exception);
break;
}
if (LocaleCompare("solarize",option+1) == 0)
{
double
threshold;
(void) SyncImageSettings(mogrify_info,*image);
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
(void) SolarizeImageChannel(*image,channel,threshold,exception);
break;
}
if (LocaleCompare("sparse-color",option+1) == 0)
{
SparseColorMethod
method;
char
*arguments;
/*
Sparse Color Interpolated Gradient
*/
(void) SyncImageSettings(mogrify_info,*image);
method=(SparseColorMethod) ParseCommandOption(
MagickSparseColorOptions,MagickFalse,argv[i+1]);
arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2]);
InheritException(exception,&(*image)->exception);
if (arguments == (char *) NULL)
break;
mogrify_image=SparseColorOption(*image,channel,method,arguments,
option[0] == '+' ? MagickTrue : MagickFalse,exception);
arguments=DestroyString(arguments);
break;
}
if (LocaleCompare("splice",option+1) == 0)
{
/*
Splice a solid color into the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=SpliceImage(*image,&geometry,exception);
break;
}
if (LocaleCompare("spread",option+1) == 0)
{
/*
Spread an image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=SpreadImage(*image,geometry_info.rho,exception);
break;
}
if (LocaleCompare("statistic",option+1) == 0)
{
StatisticType
type;
(void) SyncImageSettings(mogrify_info,*image);
type=(StatisticType) ParseCommandOption(MagickStatisticOptions,
MagickFalse,argv[i+1]);
(void) ParseGeometry(argv[i+2],&geometry_info);
mogrify_image=StatisticImageChannel(*image,channel,type,(size_t)
geometry_info.rho,(size_t) geometry_info.sigma,exception);
break;
}
if (LocaleCompare("stretch",option+1) == 0)
{
if (*option == '+')
{
draw_info->stretch=UndefinedStretch;
break;
}
draw_info->stretch=(StretchType) ParseCommandOption(
MagickStretchOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("strip",option+1) == 0)
{
/*
Strip image of profiles and comments.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) StripImage(*image);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("stroke",option+1) == 0)
{
ExceptionInfo
*sans;
if (*option == '+')
{
(void) QueryColorDatabase("none",&draw_info->stroke,exception);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(
draw_info->stroke_pattern);
break;
}
sans=AcquireExceptionInfo();
status=QueryColorDatabase(argv[i+1],&draw_info->stroke,sans);
sans=DestroyExceptionInfo(sans);
if (status == MagickFalse)
draw_info->stroke_pattern=GetImageCache(mogrify_info,argv[i+1],
exception);
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
draw_info->stroke_width=StringToDouble(argv[i+1],(char **) NULL);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
if (*option == '+')
{
draw_info->style=UndefinedStyle;
break;
}
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("swirl",option+1) == 0)
{
/*
Swirl image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=SwirlImage(*image,geometry_info.rho,exception);
break;
}
break;
}
case 't':
{
if (LocaleCompare("threshold",option+1) == 0)
{
double
threshold;
/*
Threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
threshold=(double) QuantumRange/2;
else
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
(void) BilevelImageChannel(*image,channel,threshold);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("thumbnail",option+1) == 0)
{
/*
Thumbnail image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ThumbnailImage(*image,geometry.width,geometry.height,
exception);
break;
}
if (LocaleCompare("tile",option+1) == 0)
{
if (*option == '+')
{
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
break;
}
draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1],
exception);
break;
}
if (LocaleCompare("tint",option+1) == 0)
{
/*
Tint the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=TintImage(*image,argv[i+1],draw_info->fill,exception);
break;
}
if (LocaleCompare("transform",option+1) == 0)
{
/*
Affine transform image.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=AffineTransformImage(*image,&draw_info->affine,
exception);
break;
}
if (LocaleCompare("transparent",option+1) == 0)
{
MagickPixelPacket
target;
(void) SyncImageSettings(mogrify_info,*image);
(void) QueryMagickColor(argv[i+1],&target,exception);
(void) TransparentPaintImage(*image,&target,(Quantum)
TransparentOpacity,*option == '-' ? MagickFalse : MagickTrue);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("transpose",option+1) == 0)
{
/*
Transpose image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=TransposeImage(*image,exception);
break;
}
if (LocaleCompare("transverse",option+1) == 0)
{
/*
Transverse image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=TransverseImage(*image,exception);
break;
}
if (LocaleCompare("treedepth",option+1) == 0)
{
quantize_info->tree_depth=StringToUnsignedLong(argv[i+1]);
break;
}
if (LocaleCompare("trim",option+1) == 0)
{
/*
Trim image.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=TrimImage(*image,exception);
break;
}
if (LocaleCompare("type",option+1) == 0)
{
ImageType
type;
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
type=UndefinedType;
else
type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
argv[i+1]);
(*image)->type=UndefinedType;
(void) SetImageType(*image,type);
InheritException(exception,&(*image)->exception);
break;
}
break;
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
(void) QueryColorDatabase(argv[i+1],&draw_info->undercolor,
exception);
break;
}
if (LocaleCompare("unique",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageArtifact(*image,"identify:unique-colors");
break;
}
(void) SetImageArtifact(*image,"identify:unique-colors","true");
(void) SetImageArtifact(*image,"verbose","true");
break;
}
if (LocaleCompare("unique-colors",option+1) == 0)
{
/*
Unique image colors.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=UniqueImageColors(*image,exception);
break;
}
if (LocaleCompare("unsharp",option+1) == 0)
{
/*
Unsharp mask image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=1.0;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.05;
mogrify_image=UnsharpMaskImageChannel(*image,channel,
geometry_info.rho,geometry_info.sigma,geometry_info.xi,
geometry_info.psi,exception);
break;
}
break;
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
(void) SetImageArtifact(*image,option+1,
*option == '+' ? "false" : "true");
break;
}
if (LocaleCompare("vignette",option+1) == 0)
{
/*
Vignette image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.1*(*image)->columns;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.1*(*image)->rows;
if ((flags & PercentValue) != 0)
{
geometry_info.xi*=(double) (*image)->columns/100.0;
geometry_info.psi*=(double) (*image)->rows/100.0;
}
mogrify_image=VignetteImage(*image,geometry_info.rho,
geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5),(ssize_t)
ceil(geometry_info.psi-0.5),exception);
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageVirtualPixelMethod(*image,
UndefinedVirtualPixelMethod);
break;
}
(void) SetImageVirtualPixelMethod(*image,(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,
argv[i+1]));
break;
}
break;
}
case 'w':
{
if (LocaleCompare("wave",option+1) == 0)
{
/*
Wave image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=WaveImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("wavelet-denoise",option+1) == 0)
{
/*
Wavelet denoise image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & PercentValue) != 0)
{
geometry_info.rho=QuantumRange*geometry_info.rho/100.0;
geometry_info.sigma=QuantumRange*geometry_info.sigma/100.0;
}
if ((flags & SigmaValue) == 0)
geometry_info.sigma=0.0;
mogrify_image=WaveletDenoiseImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("weight",option+1) == 0)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,
argv[i+1]);
if (weight == -1)
weight=StringToUnsignedLong(argv[i+1]);
draw_info->weight=(size_t) weight;
break;
}
if (LocaleCompare("white-threshold",option+1) == 0)
{
/*
White threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) WhiteThresholdImageChannel(*image,channel,argv[i+1],
exception);
InheritException(exception,&(*image)->exception);
break;
}
break;
}
default:
break;
}
/*
Replace current image with any image that was generated.
*/
if (mogrify_image != (Image *) NULL)
ReplaceImageInListReturnLast(image,mogrify_image);
i+=count;
}
if (region_image != (Image *) NULL)
{
/*
Composite transformed region onto image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) CompositeImage(region_image,region_image->matte != MagickFalse ?
CopyCompositeOp : OverCompositeOp,*image,region_geometry.x,
region_geometry.y);
InheritException(exception,®ion_image->exception);
*image=DestroyImage(*image);
*image=region_image;
region_image = (Image *) NULL;
}
/*
Free resources.
*/
quantize_info=DestroyQuantizeInfo(quantize_info);
draw_info=DestroyDrawInfo(draw_info);
mogrify_info=DestroyImageInfo(mogrify_info);
status=(MagickStatusType) (exception->severity < ErrorException ? 1 : 0);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e C o m m a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImageCommand() transforms an image or a sequence of images. These
% transforms include image scaling, image rotation, color reduction, and
% others. The transmogrified image overwrites the original image.
%
% The format of the MogrifyImageCommand method is:
%
% MagickBooleanType MogrifyImageCommand(ImageInfo *image_info,int argc,
% const char **argv,char **metadata,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o argc: the number of elements in the argument vector.
%
% o argv: A text array containing the command line arguments.
%
% o metadata: any metadata is returned here.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType MogrifyUsage(void)
{
static const char
miscellaneous[] =
" -debug events display copious debugging information\n"
" -distribute-cache port\n"
" distributed pixel cache spanning one or more servers\n"
" -help print program options\n"
" -list type print a list of supported option arguments\n"
" -log format format of debugging information\n"
" -version print version information",
operators[] =
" -adaptive-blur geometry\n"
" adaptively blur pixels; decrease effect near edges\n"
" -adaptive-resize geometry\n"
" adaptively resize image using 'mesh' interpolation\n"
" -adaptive-sharpen geometry\n"
" adaptively sharpen pixels; increase effect near edges\n"
" -alpha option on, activate, off, deactivate, set, opaque, copy\n"
" transparent, extract, background, or shape\n"
" -annotate geometry text\n"
" annotate the image with text\n"
" -auto-gamma automagically adjust gamma level of image\n"
" -auto-level automagically adjust color levels of image\n"
" -auto-orient automagically orient (rotate) image\n"
" -bench iterations measure performance\n"
" -black-threshold value\n"
" force all pixels below the threshold into black\n"
" -blue-shift simulate a scene at nighttime in the moonlight\n"
" -blur geometry reduce image noise and reduce detail levels\n"
" -border geometry surround image with a border of color\n"
" -bordercolor color border color\n"
" -brightness-contrast geometry\n"
" improve brightness / contrast of the image\n"
" -canny geometry detect edges in the image\n"
" -cdl filename color correct with a color decision list\n"
" -charcoal radius simulate a charcoal drawing\n"
" -chop geometry remove pixels from the image interior\n"
" -clamp keep pixel values in range (0-QuantumRange)\n"
" -clip clip along the first path from the 8BIM profile\n"
" -clip-mask filename associate a clip mask with the image\n"
" -clip-path id clip along a named path from the 8BIM profile\n"
" -colorize value colorize the image with the fill color\n"
" -color-matrix matrix apply color correction to the image\n"
" -connected-components connectivity\n"
" connected-components uniquely labeled\n"
" -contrast enhance or reduce the image contrast\n"
" -contrast-stretch geometry\n"
" improve contrast by `stretching' the intensity range\n"
" -convolve coefficients\n"
" apply a convolution kernel to the image\n"
" -cycle amount cycle the image colormap\n"
" -decipher filename convert cipher pixels to plain pixels\n"
" -deskew threshold straighten an image\n"
" -despeckle reduce the speckles within an image\n"
" -distort method args\n"
" distort images according to given method ad args\n"
" -draw string annotate the image with a graphic primitive\n"
" -edge radius apply a filter to detect edges in the image\n"
" -encipher filename convert plain pixels to cipher pixels\n"
" -emboss radius emboss an image\n"
" -enhance apply a digital filter to enhance a noisy image\n"
" -equalize perform histogram equalization to an image\n"
" -evaluate operator value\n"
" evaluate an arithmetic, relational, or logical expression\n"
" -extent geometry set the image size\n"
" -extract geometry extract area from image\n"
" -hough-lines geometry\n"
" identify lines in the image\n"
" -features distance analyze image features (e.g. contrast, correlation)\n"
" -fft implements the discrete Fourier transform (DFT)\n"
" -flip flip image vertically\n"
" -floodfill geometry color\n"
" floodfill the image with color\n"
" -flop flop image horizontally\n"
" -frame geometry surround image with an ornamental border\n"
" -function name parameters\n"
" apply function over image values\n"
" -gamma value level of gamma correction\n"
" -gaussian-blur geometry\n"
" reduce image noise and reduce detail levels\n"
" -geometry geometry preferred size or location of the image\n"
" -grayscale method convert image to grayscale\n"
" -help print program options\n"
" -identify identify the format and characteristics of the image\n"
" -ift implements the inverse discrete Fourier transform (DFT)\n"
" -implode amount implode image pixels about the center\n"
" -kuwahara geometry edge preserving noise reduction filter\n"
" -lat geometry local adaptive thresholding\n"
" -layers method optimize, merge, or compare image layers\n"
" -level value adjust the level of image contrast\n"
" -level-colors color,color\n"
" level image with the given colors\n"
" -linear-stretch geometry\n"
" improve contrast by `stretching with saturation'\n"
" -liquid-rescale geometry\n"
" rescale image with seam-carving\n"
" -local-contrast geometry\n"
" enhance local contrast\n"
" -magnify double the size of the image with pixel art scaling\n"
" -mean-shift geometry delineate arbitrarily shaped clusters in the image\n"
" -median geometry apply a median filter to the image\n"
" -mode geometry make each pixel the 'predominant color' of the\n"
" neighborhood\n"
" -modulate value vary the brightness, saturation, and hue\n"
" -monochrome transform image to black and white\n"
" -morphology method kernel\n"
" apply a morphology method to the image\n"
" -motion-blur geometry\n"
" simulate motion blur\n"
" -negate replace every pixel with its complementary color \n"
" -noise geometry add or reduce noise in an image\n"
" -normalize transform image to span the full range of colors\n"
" -opaque color change this color to the fill color\n"
" -ordered-dither NxN\n"
" add a noise pattern to the image with specific\n"
" amplitudes\n"
" -paint radius simulate an oil painting\n"
" -perceptible epsilon\n"
" pixel value less than |epsilon| become epsilon or\n"
" -epsilon\n"
" -polaroid angle simulate a Polaroid picture\n"
" -posterize levels reduce the image to a limited number of color levels\n"
" -profile filename add, delete, or apply an image profile\n"
" -quantize colorspace reduce colors in this colorspace\n"
" -radial-blur angle radial blur the image\n"
" -raise value lighten/darken image edges to create a 3-D effect\n"
" -random-threshold low,high\n"
" random threshold the image\n"
" -region geometry apply options to a portion of the image\n"
" -render render vector graphics\n"
" -resample geometry change the resolution of an image\n"
" -resize geometry resize the image\n"
" -roll geometry roll an image vertically or horizontally\n"
" -rotate degrees apply Paeth rotation to the image\n"
" -sample geometry scale image with pixel sampling\n"
" -scale geometry scale the image\n"
" -segment values segment an image\n"
" -selective-blur geometry\n"
" selectively blur pixels within a contrast threshold\n"
" -sepia-tone threshold\n"
" simulate a sepia-toned photo\n"
" -set property value set an image property\n"
" -shade degrees shade the image using a distant light source\n"
" -shadow geometry simulate an image shadow\n"
" -sharpen geometry sharpen the image\n"
" -shave geometry shave pixels from the image edges\n"
" -shear geometry slide one edge of the image along the X or Y axis\n"
" -sigmoidal-contrast geometry\n"
" increase the contrast without saturating highlights or\n"
" shadows\n"
" -sketch geometry simulate a pencil sketch\n"
" -solarize threshold negate all pixels above the threshold level\n"
" -sparse-color method args\n"
" fill in a image based on a few color points\n"
" -splice geometry splice the background color into the image\n"
" -spread radius displace image pixels by a random amount\n"
" -statistic type radius\n"
" replace each pixel with corresponding statistic from the neighborhood\n"
" -strip strip image of all profiles and comments\n"
" -swirl degrees swirl image pixels about the center\n"
" -threshold value threshold the image\n"
" -thumbnail geometry create a thumbnail of the image\n"
" -tile filename tile image when filling a graphic primitive\n"
" -tint value tint the image with the fill color\n"
" -transform affine transform image\n"
" -transparent color make this color transparent within the image\n"
" -transpose flip image vertically and rotate 90 degrees\n"
" -transverse flop image horizontally and rotate 270 degrees\n"
" -trim trim image edges\n"
" -type type image type\n"
" -unique-colors discard all but one of any pixel color\n"
" -unsharp geometry sharpen the image\n"
" -vignette geometry soften the edges of the image in vignette style\n"
" -wave geometry alter an image along a sine wave\n"
" -wavelet-denoise threshold\n"
" removes noise from the image using a wavelet transform\n"
" -white-threshold value\n"
" force all pixels above the threshold into white",
sequence_operators[] =
" -affinity filename transform image colors to match this set of colors\n"
" -append append an image sequence\n"
" -clut apply a color lookup table to the image\n"
" -coalesce merge a sequence of images\n"
" -combine combine a sequence of images\n"
" -compare mathematically and visually annotate the difference between an image and its reconstruction\n"
" -complex operator perform complex mathematics on an image sequence\n"
" -composite composite image\n"
" -copy geometry offset\n"
" copy pixels from one area of an image to another\n"
" -crop geometry cut out a rectangular region of the image\n"
" -deconstruct break down an image sequence into constituent parts\n"
" -evaluate-sequence operator\n"
" evaluate an arithmetic, relational, or logical expression\n"
" -flatten flatten a sequence of images\n"
" -fx expression apply mathematical expression to an image channel(s)\n"
" -hald-clut apply a Hald color lookup table to the image\n"
" -layers method optimize, merge, or compare image layers\n"
" -morph value morph an image sequence\n"
" -mosaic create a mosaic from an image sequence\n"
" -poly terms build a polynomial from the image sequence and the corresponding\n"
" terms (coefficients and degree pairs).\n"
" -print string interpret string and print to console\n"
" -process arguments process the image with a custom image filter\n"
" -separate separate an image channel into a grayscale image\n"
" -smush geometry smush an image sequence together\n"
" -write filename write images to this file",
settings[] =
" -adjoin join images into a single multi-image file\n"
" -affine matrix affine transform matrix\n"
" -alpha option activate, deactivate, reset, or set the alpha channel\n"
" -antialias remove pixel-aliasing\n"
" -authenticate password\n"
" decipher image with this password\n"
" -attenuate value lessen (or intensify) when adding noise to an image\n"
" -background color background color\n"
" -bias value add bias when convolving an image\n"
" -black-point-compensation\n"
" use black point compensation\n"
" -blue-primary point chromaticity blue primary point\n"
" -bordercolor color border color\n"
" -caption string assign a caption to an image\n"
" -cdl filename color correct with a color decision list\n"
" -channel type apply option to select image channels\n"
" -colors value preferred number of colors in the image\n"
" -colorspace type alternate image colorspace\n"
" -comment string annotate image with comment\n"
" -compose operator set image composite operator\n"
" -compress type type of pixel compression when writing the image\n"
" -decipher filename convert cipher pixels to plain pixels\n"
" -define format:option\n"
" define one or more image format options\n"
" -delay value display the next image after pausing\n"
" -density geometry horizontal and vertical density of the image\n"
" -depth value image depth\n"
" -direction type render text right-to-left or left-to-right\n"
" -display server get image or font from this X server\n"
" -dispose method layer disposal method\n"
" -dither method apply error diffusion to image\n"
" -encipher filename convert plain pixels to cipher pixels\n"
" -encoding type text encoding type\n"
" -endian type endianness (MSB or LSB) of the image\n"
" -family name render text with this font family\n"
" -features distance analyze image features (e.g. contrast, correlation)\n"
" -fill color color to use when filling a graphic primitive\n"
" -filter type use this filter when resizing an image\n"
" -flatten flatten a sequence of images\n"
" -font name render text with this font\n"
" -format \"string\" output formatted image characteristics\n"
" -function name apply a function to the image\n"
" -fuzz distance colors within this distance are considered equal\n"
" -gravity type horizontal and vertical text placement\n"
" -green-primary point chromaticity green primary point\n"
" -intensity method method to generate intensity value from pixel\n"
" -intent type type of rendering intent when managing the image color\n"
" -interlace type type of image interlacing scheme\n"
" -interline-spacing value\n"
" set the space between two text lines\n"
" -interpolate method pixel color interpolation method\n"
" -interword-spacing value\n"
" set the space between two words\n"
" -kerning value set the space between two letters\n"
" -label string assign a label to an image\n"
" -limit type value pixel cache resource limit\n"
" -loop iterations add Netscape loop extension to your GIF animation\n"
" -mask filename associate a mask with the image\n"
" -matte store matte channel if the image has one\n"
" -mattecolor color frame color\n"
" -monitor monitor progress\n"
" -morphology method kernel\n"
" apply a morphology method to the image\n"
" -orient type image orientation\n"
" -page geometry size and location of an image canvas (setting)\n"
" -path path write images to this path on disk\n"
" -ping efficiently determine image attributes\n"
" -pointsize value font point size\n"
" -precision value maximum number of significant digits to print\n"
" -preview type image preview type\n"
" -quality value JPEG/MIFF/PNG compression level\n"
" -quiet suppress all warning messages\n"
" -red-primary point chromaticity red primary point\n"
" -regard-warnings pay attention to warning messages\n"
" -remap filename transform image colors to match this set of colors\n"
" -repage geometry size and location of an image canvas\n"
" -respect-parentheses settings remain in effect until parenthesis boundary\n"
" -sampling-factor geometry\n"
" horizontal and vertical sampling factor\n"
" -scene value image scene number\n"
" -seed value seed a new sequence of pseudo-random numbers\n"
" -size geometry width and height of image\n"
" -stretch type render text with this font stretch\n"
" -stroke color graphic primitive stroke color\n"
" -strokewidth value graphic primitive stroke width\n"
" -style type render text with this font style\n"
" -synchronize synchronize image to storage device\n"
" -taint declare the image as modified\n"
" -texture filename name of texture to tile onto the image background\n"
" -tile-offset geometry\n"
" tile offset\n"
" -treedepth value color tree depth\n"
" -transparent-color color\n"
" transparent color\n"
" -undercolor color annotation bounding box color\n"
" -units type the units of image resolution\n"
" -verbose print detailed information about the image\n"
" -view FlashPix viewing transforms\n"
" -virtual-pixel method\n"
" virtual pixel access method\n"
" -weight type render text with this font weight\n"
" -white-point point chromaticity white point",
stack_operators[] =
" -delete indexes delete the image from the image sequence\n"
" -duplicate count,indexes\n"
" duplicate an image one or more times\n"
" -insert index insert last image into the image sequence\n"
" -reverse reverse image sequence\n"
" -swap indexes swap two images in the image sequence";
ListMagickVersion(stdout);
(void) printf("Usage: %s [options ...] file [ [options ...] file ...]\n",
GetClientName());
(void) printf("\nImage Settings:\n");
(void) puts(settings);
(void) printf("\nImage Operators:\n");
(void) puts(operators);
(void) printf("\nImage Sequence Operators:\n");
(void) puts(sequence_operators);
(void) printf("\nImage Stack Operators:\n");
(void) puts(stack_operators);
(void) printf("\nMiscellaneous Options:\n");
(void) puts(miscellaneous);
(void) printf(
"\nBy default, the image format of `file' is determined by its magic\n");
(void) printf(
"number. To specify a particular image format, precede the filename\n");
(void) printf(
"with an image format name and a colon (i.e. ps:image) or specify the\n");
(void) printf(
"image type as the filename suffix (i.e. image.ps). Specify 'file' as\n");
(void) printf("'-' for standard input or output.\n");
return(MagickFalse);
}
WandExport MagickBooleanType MogrifyImageCommand(ImageInfo *image_info,
int argc,char **argv,char **wand_unused(metadata),ExceptionInfo *exception)
{
#define DestroyMogrify() \
{ \
if (format != (char *) NULL) \
format=DestroyString(format); \
if (path != (char *) NULL) \
path=DestroyString(path); \
DestroyImageStack(); \
for (i=0; i < (ssize_t) argc; i++) \
argv[i]=DestroyString(argv[i]); \
argv=(char **) RelinquishMagickMemory(argv); \
}
#define ThrowMogrifyException(asperity,tag,option) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),asperity,tag,"`%s'", \
option); \
DestroyMogrify(); \
return(MagickFalse); \
}
#define ThrowMogrifyInvalidArgumentException(option,argument) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),OptionError, \
"InvalidArgument","`%s': %s",argument,option); \
DestroyMogrify(); \
return(MagickFalse); \
}
char
*format,
*option,
*path;
Image
*image;
ImageStack
image_stack[MaxImageStackDepth+1];
MagickBooleanType
global_colormap;
MagickBooleanType
fire,
pend,
respect_parenthesis;
MagickStatusType
status;
register ssize_t
i;
ssize_t
j,
k;
wand_unreferenced(metadata);
/*
Set defaults.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(exception != (ExceptionInfo *) NULL);
if (argc == 2)
{
option=argv[1];
if ((LocaleCompare("version",option+1) == 0) ||
(LocaleCompare("-version",option+1) == 0))
{
ListMagickVersion(stdout);
return(MagickTrue);
}
}
if (argc < 2)
return(MogrifyUsage());
format=(char *) NULL;
path=(char *) NULL;
global_colormap=MagickFalse;
k=0;
j=1;
NewImageStack();
option=(char *) NULL;
pend=MagickFalse;
respect_parenthesis=MagickFalse;
status=MagickTrue;
/*
Parse command line.
*/
ReadCommandlLine(argc,&argv);
status=ExpandFilenames(&argc,&argv);
if (status == MagickFalse)
ThrowMogrifyException(ResourceLimitError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
for (i=1; i < (ssize_t) argc; i++)
{
option=argv[i];
if (LocaleCompare(option,"(") == 0)
{
FireImageStack(MagickFalse,MagickTrue,pend);
if (k == MaxImageStackDepth)
ThrowMogrifyException(OptionError,"ParenthesisNestedTooDeeply",
option);
PushImageStack();
continue;
}
if (LocaleCompare(option,")") == 0)
{
FireImageStack(MagickFalse,MagickTrue,MagickTrue);
if (k == 0)
ThrowMogrifyException(OptionError,"UnableToParseExpression",option);
PopImageStack();
continue;
}
if (IsCommandOption(option) == MagickFalse)
{
char
backup_filename[MaxTextExtent],
*filename;
Image
*images;
struct stat
properties;
/*
Option is a file name: begin by reading image from specified file.
*/
FireImageStack(MagickFalse,MagickFalse,pend);
filename=argv[i];
if ((LocaleCompare(filename,"--") == 0) && (i < (ssize_t) (argc-1)))
filename=argv[++i];
(void) SetImageOption(image_info,"filename",filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
images=ReadImages(image_info,exception);
status&=(images != (Image *) NULL) &&
(exception->severity < ErrorException);
if (images == (Image *) NULL)
continue;
properties=(*GetBlobProperties(images));
if (format != (char *) NULL)
(void) CopyMagickString(images->filename,images->magick_filename,
MaxTextExtent);
if (path != (char *) NULL)
{
GetPathComponent(option,TailPath,filename);
(void) FormatLocaleString(images->filename,MaxTextExtent,"%s%c%s",
path,*DirectorySeparator,filename);
}
if (format != (char *) NULL)
AppendImageFormat(format,images->filename);
AppendImageStack(images);
FinalizeImageSettings(image_info,image,MagickFalse);
if (global_colormap != MagickFalse)
{
QuantizeInfo
*quantize_info;
quantize_info=AcquireQuantizeInfo(image_info);
(void) RemapImages(quantize_info,images,(Image *) NULL);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
*backup_filename='\0';
if ((LocaleCompare(image->filename,"-") != 0) &&
(IsPathWritable(image->filename) != MagickFalse))
{
register ssize_t
i;
/*
Rename image file as backup.
*/
(void) CopyMagickString(backup_filename,image->filename,
MaxTextExtent);
for (i=0; i < 6; i++)
{
(void) ConcatenateMagickString(backup_filename,"~",MaxTextExtent);
if (IsPathAccessible(backup_filename) == MagickFalse)
break;
}
if ((IsPathAccessible(backup_filename) != MagickFalse) ||
(rename_utf8(image->filename,backup_filename) != 0))
*backup_filename='\0';
}
/*
Write transmogrified image to disk.
*/
image_info->synchronize=MagickTrue;
status&=WriteImages(image_info,image,image->filename,exception);
if (status != MagickFalse)
{
#if defined(MAGICKCORE_HAVE_UTIME)
{
MagickBooleanType
preserve_timestamp;
preserve_timestamp=IsStringTrue(GetImageOption(image_info,
"preserve-timestamp"));
if (preserve_timestamp != MagickFalse)
{
struct utimbuf
timestamp;
timestamp.actime=properties.st_atime;
timestamp.modtime=properties.st_mtime;
(void) utime(image->filename,×tamp);
}
}
#endif
if (*backup_filename != '\0')
(void) remove_utf8(backup_filename);
}
RemoveAllImageStack();
continue;
}
pend=image != (Image *) NULL ? MagickTrue : MagickFalse;
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adaptive-blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("adaptive-resize",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("adaptive-sharpen",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("affine",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("alpha",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickAlphaOptions,MagickFalse,argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,"UnrecognizedAlphaChannelType",
argv[i]);
break;
}
if (LocaleCompare("annotate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
i++;
break;
}
if (LocaleCompare("antialias",option+1) == 0)
break;
if (LocaleCompare("append",option+1) == 0)
break;
if (LocaleCompare("attenuate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("authenticate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("auto-gamma",option+1) == 0)
break;
if (LocaleCompare("auto-level",option+1) == 0)
break;
if (LocaleCompare("auto-orient",option+1) == 0)
break;
if (LocaleCompare("average",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'b':
{
if (LocaleCompare("background",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("bias",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("black-point-compensation",option+1) == 0)
break;
if (LocaleCompare("black-threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("blue-primary",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("blue-shift",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("border",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("brightness-contrast",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'c':
{
if (LocaleCompare("cache",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("canny",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("caption",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("channel",option+1) == 0)
{
ssize_t
channel;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
channel=ParseChannelOption(argv[i]);
if (channel < 0)
ThrowMogrifyException(OptionError,"UnrecognizedChannelType",
argv[i]);
break;
}
if (LocaleCompare("cdl",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("charcoal",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("chop",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("clamp",option+1) == 0)
break;
if (LocaleCompare("clip",option+1) == 0)
break;
if (LocaleCompare("clip-mask",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("clut",option+1) == 0)
break;
if (LocaleCompare("coalesce",option+1) == 0)
break;
if (LocaleCompare("colorize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("color-matrix",option+1) == 0)
{
KernelInfo
*kernel_info;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
kernel_info=AcquireKernelInfo(argv[i]);
if (kernel_info == (KernelInfo *) NULL)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("colors",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
ssize_t
colorspace;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,
argv[i]);
if (colorspace < 0)
ThrowMogrifyException(OptionError,"UnrecognizedColorspace",
argv[i]);
break;
}
if (LocaleCompare("combine",option+1) == 0)
{
if (*option == '-')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("comment",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("compare",option+1) == 0)
break;
if (LocaleCompare("complex",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickComplexOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedComplexOperator",
argv[i]);
break;
}
if (LocaleCompare("composite",option+1) == 0)
break;
if (LocaleCompare("compress",option+1) == 0)
{
ssize_t
compress;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
compress=ParseCommandOption(MagickCompressOptions,MagickFalse,
argv[i]);
if (compress < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageCompression",
argv[i]);
break;
}
if (LocaleCompare("concurrent",option+1) == 0)
break;
if (LocaleCompare("connected-components",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("contrast",option+1) == 0)
break;
if (LocaleCompare("contrast-stretch",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("convolve",option+1) == 0)
{
KernelInfo
*kernel_info;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
kernel_info=AcquireKernelInfo(argv[i]);
if (kernel_info == (KernelInfo *) NULL)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("copy",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("crop",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("cycle",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'd':
{
if (LocaleCompare("decipher",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("deconstruct",option+1) == 0)
break;
if (LocaleCompare("debug",option+1) == 0)
{
ssize_t
event;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
event=ParseCommandOption(MagickLogEventOptions,MagickFalse,argv[i]);
if (event < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEventType",
argv[i]);
(void) SetLogEventMask(argv[i]);
break;
}
if (LocaleCompare("define",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (*option == '+')
{
const char
*define;
define=GetImageOption(image_info,argv[i]);
if (define == (const char *) NULL)
ThrowMogrifyException(OptionError,"NoSuchOption",argv[i]);
break;
}
break;
}
if (LocaleCompare("delay",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("delete",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("density",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("deskew",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("despeckle",option+1) == 0)
break;
if (LocaleCompare("dft",option+1) == 0)
break;
if (LocaleCompare("direction",option+1) == 0)
{
ssize_t
direction;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
argv[i]);
if (direction < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDirectionType",
argv[i]);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("dispose",option+1) == 0)
{
ssize_t
dispose;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse,
argv[i]);
if (dispose < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDisposeMethod",
argv[i]);
break;
}
if (LocaleCompare("distort",option+1) == 0)
{
ssize_t
op;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickDistortOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDistortMethod",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
ssize_t
method;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
method=ParseCommandOption(MagickDitherOptions,MagickFalse,argv[i]);
if (method < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDitherMethod",
argv[i]);
break;
}
if (LocaleCompare("draw",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("duplicate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("duration",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'e':
{
if (LocaleCompare("edge",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("emboss",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("encipher",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("encoding",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("endian",option+1) == 0)
{
ssize_t
endian;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
endian=ParseCommandOption(MagickEndianOptions,MagickFalse,argv[i]);
if (endian < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEndianType",
argv[i]);
break;
}
if (LocaleCompare("enhance",option+1) == 0)
break;
if (LocaleCompare("equalize",option+1) == 0)
break;
if (LocaleCompare("evaluate",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("evaluate-sequence",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator",
argv[i]);
break;
}
if (LocaleCompare("extent",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("extract",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("features",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("filter",option+1) == 0)
{
ssize_t
filter;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
filter=ParseCommandOption(MagickFilterOptions,MagickFalse,argv[i]);
if (filter < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageFilter",
argv[i]);
break;
}
if (LocaleCompare("flatten",option+1) == 0)
break;
if (LocaleCompare("flip",option+1) == 0)
break;
if (LocaleCompare("flop",option+1) == 0)
break;
if (LocaleCompare("floodfill",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
(void) CopyMagickString(argv[i]+1,"sans",MaxTextExtent);
(void) CloneString(&format,(char *) NULL);
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
(void) CloneString(&format,argv[i]);
(void) CopyMagickString(image_info->filename,format,MaxTextExtent);
(void) ConcatenateMagickString(image_info->filename,":",
MaxTextExtent);
(void) SetImageInfo(image_info,0,exception);
if (*image_info->magick == '\0')
ThrowMogrifyException(OptionError,"UnrecognizedImageFormat",
format);
break;
}
if (LocaleCompare("frame",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("function",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickFunctionOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedFunction",argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("fuzz",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("fx",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'g':
{
if (LocaleCompare("gamma",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if ((LocaleCompare("gaussian-blur",option+1) == 0) ||
(LocaleCompare("gaussian",option+1) == 0))
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("geometry",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("gravity",option+1) == 0)
{
ssize_t
gravity;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,
argv[i]);
if (gravity < 0)
ThrowMogrifyException(OptionError,"UnrecognizedGravityType",
argv[i]);
break;
}
if (LocaleCompare("grayscale",option+1) == 0)
{
ssize_t
method;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
method=ParseCommandOption(MagickPixelIntensityOptions,MagickFalse,
argv[i]);
if (method < 0)
ThrowMogrifyException(OptionError,"UnrecognizedIntensityMethod",
argv[i]);
break;
}
if (LocaleCompare("green-primary",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'h':
{
if (LocaleCompare("hald-clut",option+1) == 0)
break;
if (LocaleCompare("hough-lines",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if ((LocaleCompare("help",option+1) == 0) ||
(LocaleCompare("-help",option+1) == 0))
return(MogrifyUsage());
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'i':
{
if (LocaleCompare("identify",option+1) == 0)
break;
if (LocaleCompare("idft",option+1) == 0)
break;
if (LocaleCompare("implode",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("intensity",option+1) == 0)
{
ssize_t
intensity;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
intensity=ParseCommandOption(MagickPixelIntensityOptions,
MagickFalse,argv[i]);
if (intensity < 0)
ThrowMogrifyException(OptionError,
"UnrecognizedPixelIntensityMethod",argv[i]);
break;
}
if (LocaleCompare("intent",option+1) == 0)
{
ssize_t
intent;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
intent=ParseCommandOption(MagickIntentOptions,MagickFalse,argv[i]);
if (intent < 0)
ThrowMogrifyException(OptionError,"UnrecognizedIntentType",
argv[i]);
break;
}
if (LocaleCompare("interlace",option+1) == 0)
{
ssize_t
interlace;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
interlace=ParseCommandOption(MagickInterlaceOptions,MagickFalse,
argv[i]);
if (interlace < 0)
ThrowMogrifyException(OptionError,"UnrecognizedInterlaceType",
argv[i]);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
ssize_t
interpolate;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
interpolate=ParseCommandOption(MagickInterpolateOptions,MagickFalse,
argv[i]);
if (interpolate < 0)
ThrowMogrifyException(OptionError,"UnrecognizedInterpolateMethod",
argv[i]);
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("kuwahara",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'l':
{
if (LocaleCompare("label",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("lat",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
}
if (LocaleCompare("layers",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickLayerOptions,MagickFalse,argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,"UnrecognizedLayerMethod",
argv[i]);
break;
}
if (LocaleCompare("level",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("level-colors",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("linewidth",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("limit",option+1) == 0)
{
char
*p;
double
value;
ssize_t
resource;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
resource=ParseCommandOption(MagickResourceOptions,MagickFalse,
argv[i]);
if (resource < 0)
ThrowMogrifyException(OptionError,"UnrecognizedResourceType",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
value=StringToDouble(argv[i],&p);
(void) value;
if ((p == argv[i]) && (LocaleCompare("unlimited",argv[i]) != 0))
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("liquid-rescale",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("list",option+1) == 0)
{
ssize_t
list;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i]);
if (list < 0)
ThrowMogrifyException(OptionError,"UnrecognizedListType",argv[i]);
status=MogrifyImageInfo(image_info,(int) (i-j+1),(const char **)
argv+j,exception);
return(status == 0 ? MagickFalse : MagickTrue);
}
if (LocaleCompare("local-contrast",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("log",option+1) == 0)
{
if (*option == '+')
break;
i++;
if ((i == (ssize_t) argc) ||
(strchr(argv[i],'%') == (char *) NULL))
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("loop",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'm':
{
if (LocaleCompare("magnify",option+1) == 0)
break;
if (LocaleCompare("map",option+1) == 0)
{
global_colormap=(*option == '+') ? MagickTrue : MagickFalse;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("mask",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("matte",option+1) == 0)
break;
if (LocaleCompare("mattecolor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickMetricOptions,MagickTrue,argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,"UnrecognizedMetricType",
argv[i]);
break;
}
if (LocaleCompare("maximum",option+1) == 0)
break;
if (LocaleCompare("mean-shift",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("median",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("minimum",option+1) == 0)
break;
if (LocaleCompare("modulate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("mode",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("monitor",option+1) == 0)
break;
if (LocaleCompare("monochrome",option+1) == 0)
break;
if (LocaleCompare("morph",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("morphology",option+1) == 0)
{
char
token[MaxTextExtent];
KernelInfo
*kernel_info;
ssize_t
op;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
GetNextToken(argv[i],(const char **) NULL,MaxTextExtent,token);
op=ParseCommandOption(MagickMorphologyOptions,MagickFalse,token);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedMorphologyMethod",
token);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
kernel_info=AcquireKernelInfo(argv[i]);
if (kernel_info == (KernelInfo *) NULL)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("mosaic",option+1) == 0)
break;
if (LocaleCompare("motion-blur",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'n':
{
if (LocaleCompare("negate",option+1) == 0)
break;
if (LocaleCompare("noise",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (*option == '+')
{
ssize_t
noise;
noise=ParseCommandOption(MagickNoiseOptions,MagickFalse,argv[i]);
if (noise < 0)
ThrowMogrifyException(OptionError,"UnrecognizedNoiseType",
argv[i]);
break;
}
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("noop",option+1) == 0)
break;
if (LocaleCompare("normalize",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'o':
{
if (LocaleCompare("opaque",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("ordered-dither",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("orient",option+1) == 0)
{
ssize_t
orientation;
orientation=UndefinedOrientation;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
orientation=ParseCommandOption(MagickOrientationOptions,MagickFalse,
argv[i]);
if (orientation < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageOrientation",
argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'p':
{
if (LocaleCompare("page",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("paint",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("path",option+1) == 0)
{
(void) CloneString(&path,(char *) NULL);
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
(void) CloneString(&path,argv[i]);
break;
}
if (LocaleCompare("perceptible",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("polaroid",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("poly",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("posterize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("precision",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("print",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("process",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("profile",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'q':
{
if (LocaleCompare("quality",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("quantize",option+1) == 0)
{
ssize_t
colorspace;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,
argv[i]);
if (colorspace < 0)
ThrowMogrifyException(OptionError,"UnrecognizedColorspace",
argv[i]);
break;
}
if (LocaleCompare("quiet",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'r':
{
if (LocaleCompare("radial-blur",option+1) == 0 ||
LocaleCompare("rotational-blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("raise",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("random-threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("recolor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("red-primary",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
}
if (LocaleCompare("regard-warnings",option+1) == 0)
break;
if (LocaleCompare("region",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("remap",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("render",option+1) == 0)
break;
if (LocaleCompare("repage",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("resample",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("resize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleNCompare("respect-parentheses",option+1,17) == 0)
{
respect_parenthesis=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("reverse",option+1) == 0)
break;
if (LocaleCompare("roll",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("rotate",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 's':
{
if (LocaleCompare("sample",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sampling-factor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("scale",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("scene",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("seed",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("segment",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("selective-blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("separate",option+1) == 0)
break;
if (LocaleCompare("sepia-tone",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("set",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("shade",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("shadow",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sharpen",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("shave",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("shear",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sigmoidal-contrast",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("size",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sketch",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("smush",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
i++;
break;
}
if (LocaleCompare("solarize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sparse-color",option+1) == 0)
{
ssize_t
op;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickSparseColorOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedSparseColorMethod",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("splice",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("spread",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("statistic",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickStatisticOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedStatisticType",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("stretch",option+1) == 0)
{
ssize_t
stretch;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,argv[i]);
if (stretch < 0)
ThrowMogrifyException(OptionError,"UnrecognizedStyleType",
argv[i]);
break;
}
if (LocaleCompare("strip",option+1) == 0)
break;
if (LocaleCompare("stroke",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
ssize_t
style;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,argv[i]);
if (style < 0)
ThrowMogrifyException(OptionError,"UnrecognizedStyleType",
argv[i]);
break;
}
if (LocaleCompare("swap",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("swirl",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("synchronize",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 't':
{
if (LocaleCompare("taint",option+1) == 0)
break;
if (LocaleCompare("texture",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("tile",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("tile-offset",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("tint",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("transform",option+1) == 0)
break;
if (LocaleCompare("transpose",option+1) == 0)
break;
if (LocaleCompare("transverse",option+1) == 0)
break;
if (LocaleCompare("threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("thumbnail",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("transparent",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("transparent-color",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("treedepth",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("trim",option+1) == 0)
break;
if (LocaleCompare("type",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickTypeOptions,MagickFalse,argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageType",
argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("unique-colors",option+1) == 0)
break;
if (LocaleCompare("units",option+1) == 0)
{
ssize_t
units;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
units=ParseCommandOption(MagickResolutionOptions,MagickFalse,
argv[i]);
if (units < 0)
ThrowMogrifyException(OptionError,"UnrecognizedUnitsType",
argv[i]);
break;
}
if (LocaleCompare("unsharp",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
image_info->verbose=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if ((LocaleCompare("version",option+1) == 0) ||
(LocaleCompare("-version",option+1) == 0))
{
ListMagickVersion(stdout);
break;
}
if (LocaleCompare("view",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("vignette",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
ssize_t
method;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
method=ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,
argv[i]);
if (method < 0)
ThrowMogrifyException(OptionError,
"UnrecognizedVirtualPixelMethod",argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'w':
{
if (LocaleCompare("wave",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("wavelet-denoise",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("weight",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("white-point",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("white-threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("write",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case '?':
break;
default:
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
fire=(GetCommandOptionFlags(MagickCommandOptions,MagickFalse,option) &
FireOptionFlag) == 0 ? MagickFalse : MagickTrue;
if (fire != MagickFalse)
FireImageStack(MagickFalse,MagickTrue,MagickTrue);
}
if (k != 0)
ThrowMogrifyException(OptionError,"UnbalancedParenthesis",argv[i]);
if (i != (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingAnImageFilename",argv[i]);
DestroyMogrify();
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImageInfo() applies image processing settings to the image as
% prescribed by command line options.
%
% The format of the MogrifyImageInfo method is:
%
% MagickBooleanType MogrifyImageInfo(ImageInfo *image_info,const int argc,
% const char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MogrifyImageInfo(ImageInfo *image_info,
const int argc,const char **argv,ExceptionInfo *exception)
{
const char
*option;
GeometryInfo
geometry_info;
ssize_t
count;
register ssize_t
i;
/*
Initialize method variables.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (argc < 0)
return(MagickTrue);
/*
Set the image settings.
*/
for (i=0; i < (ssize_t) argc; i++)
{
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=ParseCommandOption(MagickCommandOptions,MagickFalse,option);
count=MagickMax(count,0L);
if ((i+count) >= (ssize_t) argc)
break;
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adjoin",option+1) == 0)
{
image_info->adjoin=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("antialias",option+1) == 0)
{
image_info->antialias=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("attenuate",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("authenticate",option+1) == 0)
{
if (*option == '+')
(void) CloneString(&image_info->authenticate,(char *) NULL);
else
(void) CloneString(&image_info->authenticate,argv[i+1]);
break;
}
break;
}
case 'b':
{
if (LocaleCompare("background",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) QueryColorDatabase(MogrifyBackgroundColor,
&image_info->background_color,exception);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorDatabase(argv[i+1],&image_info->background_color,
exception);
break;
}
if (LocaleCompare("bias",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("black-point-compensation",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("blue-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) QueryColorDatabase(MogrifyBorderColor,
&image_info->border_color,exception);
break;
}
(void) QueryColorDatabase(argv[i+1],&image_info->border_color,
exception);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,"undercolor","none");
break;
}
(void) SetImageOption(image_info,"undercolor",argv[i+1]);
break;
}
break;
}
case 'c':
{
if (LocaleCompare("cache",option+1) == 0)
{
MagickSizeType
limit;
limit=MagickResourceInfinity;
if (LocaleCompare("unlimited",argv[i+1]) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+1],100.0);
(void) SetMagickResourceLimit(MemoryResource,limit);
(void) SetMagickResourceLimit(MapResource,2*limit);
break;
}
if (LocaleCompare("caption",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("channel",option+1) == 0)
{
if (*option == '+')
{
image_info->channel=DefaultChannels;
break;
}
image_info->channel=(ChannelType) ParseChannelOption(argv[i+1]);
break;
}
if (LocaleCompare("colors",option+1) == 0)
{
image_info->colors=StringToUnsignedLong(argv[i+1]);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
if (*option == '+')
{
image_info->colorspace=UndefinedColorspace;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("comment",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("compose",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("compress",option+1) == 0)
{
if (*option == '+')
{
image_info->compression=UndefinedCompression;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("debug",option+1) == 0)
{
if (*option == '+')
(void) SetLogEventMask("none");
else
(void) SetLogEventMask(argv[i+1]);
image_info->debug=IsEventLogging();
break;
}
if (LocaleCompare("define",option+1) == 0)
{
if (*option == '+')
{
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
(void) DeleteImageRegistry(argv[i+1]+9);
else
(void) DeleteImageOption(image_info,argv[i+1]);
break;
}
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
{
(void) DefineImageRegistry(StringRegistryType,argv[i+1]+9,
exception);
break;
}
(void) DefineImageOption(image_info,argv[i+1]);
break;
}
if (LocaleCompare("delay",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("density",option+1) == 0)
{
/*
Set image density.
*/
if (*option == '+')
{
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
(void) SetImageOption(image_info,option+1,"72");
break;
}
(void) CloneString(&image_info->density,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
if (*option == '+')
{
image_info->depth=MAGICKCORE_QUANTUM_DEPTH;
break;
}
image_info->depth=StringToUnsignedLong(argv[i+1]);
break;
}
if (LocaleCompare("direction",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
if (*option == '+')
{
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
break;
}
(void) CloneString(&image_info->server_name,argv[i+1]);
break;
}
if (LocaleCompare("dispose",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
image_info->dither=MagickFalse;
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
image_info->dither=MagickTrue;
break;
}
break;
}
case 'e':
{
if (LocaleCompare("encoding",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("endian",option+1) == 0)
{
if (*option == '+')
{
image_info->endian=UndefinedEndian;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->endian=(EndianType) ParseCommandOption(
MagickEndianOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("extract",option+1) == 0)
{
/*
Set image extract geometry.
*/
if (*option == '+')
{
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
break;
}
(void) CloneString(&image_info->extract,argv[i+1]);
break;
}
break;
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
if (*option != '+')
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("filter",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
if (*option == '+')
{
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
break;
}
(void) CloneString(&image_info->font,argv[i+1]);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
register const char
*q;
for (q=strchr(argv[i+1],'%'); q != (char *) NULL; q=strchr(q+1,'%'))
if (strchr("Agkrz@[#",*(q+1)) != (char *) NULL)
image_info->ping=MagickFalse;
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("fuzz",option+1) == 0)
{
if (*option == '+')
{
image_info->fuzz=0.0;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->fuzz=StringToDoubleInterval(argv[i+1],(double)
QuantumRange+1.0);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'g':
{
if (LocaleCompare("gravity",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("green-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'i':
{
if (LocaleCompare("intensity",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("intent",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interlace",option+1) == 0)
{
if (*option == '+')
{
image_info->interlace=UndefinedInterlace;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->interlace=(InterlaceType) ParseCommandOption(
MagickInterlaceOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("label",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("limit",option+1) == 0)
{
MagickSizeType
limit;
ResourceType
type;
if (*option == '+')
break;
type=(ResourceType) ParseCommandOption(MagickResourceOptions,
MagickFalse,argv[i+1]);
limit=MagickResourceInfinity;
if (LocaleCompare("unlimited",argv[i+2]) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+2],100.0);
(void) SetMagickResourceLimit(type,limit);
break;
}
if (LocaleCompare("list",option+1) == 0)
{
ssize_t
list;
/*
Display configuration list.
*/
list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i+1]);
switch (list)
{
case MagickCoderOptions:
{
(void) ListCoderInfo((FILE *) NULL,exception);
break;
}
case MagickColorOptions:
{
(void) ListColorInfo((FILE *) NULL,exception);
break;
}
case MagickConfigureOptions:
{
(void) ListConfigureInfo((FILE *) NULL,exception);
break;
}
case MagickDelegateOptions:
{
(void) ListDelegateInfo((FILE *) NULL,exception);
break;
}
case MagickFontOptions:
{
(void) ListTypeInfo((FILE *) NULL,exception);
break;
}
case MagickFormatOptions:
{
(void) ListMagickInfo((FILE *) NULL,exception);
break;
}
case MagickLocaleOptions:
{
(void) ListLocaleInfo((FILE *) NULL,exception);
break;
}
case MagickLogOptions:
{
(void) ListLogInfo((FILE *) NULL,exception);
break;
}
case MagickMagicOptions:
{
(void) ListMagicInfo((FILE *) NULL,exception);
break;
}
case MagickMimeOptions:
{
(void) ListMimeInfo((FILE *) NULL,exception);
break;
}
case MagickModuleOptions:
{
(void) ListModuleInfo((FILE *) NULL,exception);
break;
}
case MagickPolicyOptions:
{
(void) ListPolicyInfo((FILE *) NULL,exception);
break;
}
case MagickResourceOptions:
{
(void) ListMagickResourceInfo((FILE *) NULL,exception);
break;
}
case MagickThresholdOptions:
{
(void) ListThresholdMaps((FILE *) NULL,exception);
break;
}
default:
{
(void) ListCommandOptions((FILE *) NULL,(CommandOption) list,
exception);
break;
}
}
break;
}
if (LocaleCompare("log",option+1) == 0)
{
if (*option == '+')
break;
(void) SetLogFormat(argv[i+1]);
break;
}
if (LocaleCompare("loop",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'm':
{
if (LocaleCompare("matte",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("mattecolor",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorDatabase(MogrifyMatteColor,
&image_info->matte_color,exception);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorDatabase(argv[i+1],&image_info->matte_color,
exception);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("monitor",option+1) == 0)
{
(void) SetImageInfoProgressMonitor(image_info,MonitorProgress,
(void *) NULL);
break;
}
if (LocaleCompare("monochrome",option+1) == 0)
{
image_info->monochrome=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
break;
}
case 'o':
{
if (LocaleCompare("orient",option+1) == 0)
{
if (*option == '+')
{
image_info->orientation=UndefinedOrientation;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
}
case 'p':
{
if (LocaleCompare("page",option+1) == 0)
{
char
*canonical_page,
page[MaxTextExtent];
const char
*image_option;
MagickStatusType
flags;
RectangleInfo
geometry;
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) CloneString(&image_info->page,(char *) NULL);
break;
}
(void) memset(&geometry,0,sizeof(geometry));
image_option=GetImageOption(image_info,"page");
if (image_option != (const char *) NULL)
(void) ParseAbsoluteGeometry(image_option,&geometry);
canonical_page=GetPageGeometry(argv[i+1]);
flags=ParseAbsoluteGeometry(canonical_page,&geometry);
canonical_page=DestroyString(canonical_page);
(void) FormatLocaleString(page,MaxTextExtent,"%lux%lu",
(unsigned long) geometry.width,(unsigned long) geometry.height);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
(void) FormatLocaleString(page,MaxTextExtent,"%lux%lu%+ld%+ld",
(unsigned long) geometry.width,(unsigned long) geometry.height,
(long) geometry.x,(long) geometry.y);
(void) SetImageOption(image_info,option+1,page);
(void) CloneString(&image_info->page,page);
break;
}
if (LocaleCompare("pen",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("ping",option+1) == 0)
{
image_info->ping=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (*option == '+')
geometry_info.rho=0.0;
else
(void) ParseGeometry(argv[i+1],&geometry_info);
image_info->pointsize=geometry_info.rho;
break;
}
if (LocaleCompare("precision",option+1) == 0)
{
(void) SetMagickPrecision(StringToInteger(argv[i+1]));
break;
}
if (LocaleCompare("preview",option+1) == 0)
{
/*
Preview image.
*/
if (*option == '+')
{
image_info->preview_type=UndefinedPreview;
break;
}
image_info->preview_type=(PreviewType) ParseCommandOption(
MagickPreviewOptions,MagickFalse,argv[i+1]);
break;
}
break;
}
case 'q':
{
if (LocaleCompare("quality",option+1) == 0)
{
/*
Set image compression quality.
*/
if (*option == '+')
{
image_info->quality=UndefinedCompressionQuality;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->quality=StringToUnsignedLong(argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("quiet",option+1) == 0)
{
static WarningHandler
warning_handler = (WarningHandler) NULL;
if (*option == '+')
{
/*
Restore error or warning messages.
*/
warning_handler=SetWarningHandler(warning_handler);
break;
}
/*
Suppress error or warning messages.
*/
warning_handler=SetWarningHandler((WarningHandler) NULL);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("red-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 's':
{
if (LocaleCompare("sampling-factor",option+1) == 0)
{
/*
Set image sampling factor.
*/
if (*option == '+')
{
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
break;
}
(void) CloneString(&image_info->sampling_factor,argv[i+1]);
break;
}
if (LocaleCompare("scene",option+1) == 0)
{
/*
Set image scene.
*/
if (*option == '+')
{
image_info->scene=0;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->scene=StringToUnsignedLong(argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("seed",option+1) == 0)
{
unsigned long
seed;
if (*option == '+')
{
seed=(unsigned long) time((time_t *) NULL);
SetRandomSecretKey(seed);
break;
}
seed=StringToUnsignedLong(argv[i+1]);
SetRandomSecretKey(seed);
break;
}
if (LocaleCompare("size",option+1) == 0)
{
if (*option == '+')
{
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
break;
}
(void) CloneString(&image_info->size,argv[i+1]);
break;
}
if (LocaleCompare("stroke",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"none");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"none");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("synchronize",option+1) == 0)
{
if (*option == '+')
{
image_info->synchronize=MagickFalse;
break;
}
image_info->synchronize=MagickTrue;
break;
}
break;
}
case 't':
{
if (LocaleCompare("taint",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("texture",option+1) == 0)
{
if (*option == '+')
{
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
break;
}
(void) CloneString(&image_info->texture,argv[i+1]);
break;
}
if (LocaleCompare("tile-offset",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("transparent-color",option+1) == 0)
{
if (*option == '+')
{
(void) QueryColorDatabase("none",&image_info->transparent_color, exception);
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) QueryColorDatabase(argv[i+1],&image_info->transparent_color,
exception);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("type",option+1) == 0)
{
if (*option == '+')
{
image_info->type=UndefinedType;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->type=(ImageType) ParseCommandOption(MagickTypeOptions,
MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("units",option+1) == 0)
{
if (*option == '+')
{
image_info->units=UndefinedResolution;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->units=(ResolutionType) ParseCommandOption(
MagickResolutionOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
if (*option == '+')
{
image_info->verbose=MagickFalse;
break;
}
image_info->verbose=MagickTrue;
image_info->ping=MagickFalse;
break;
}
if (LocaleCompare("view",option+1) == 0)
{
if (*option == '+')
{
if (image_info->view != (char *) NULL)
image_info->view=DestroyString(image_info->view);
break;
}
(void) CloneString(&image_info->view,argv[i+1]);
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
if (*option == '+')
{
image_info->virtual_pixel_method=UndefinedVirtualPixelMethod;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->virtual_pixel_method=(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,
argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("weight",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("white-point",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
default:
break;
}
i+=count;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImageList() applies any command line options that might affect the
% entire image list (e.g. -append, -coalesce, etc.).
%
% The format of the MogrifyImage method is:
%
% MagickBooleanType MogrifyImageList(ImageInfo *image_info,const int argc,
% const char **argv,Image **images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o images: pointer to pointer of the first image in image list.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MogrifyImageList(ImageInfo *image_info,
const int argc,const char **argv,Image **images,ExceptionInfo *exception)
{
ChannelType
channel;
const char
*option;
ImageInfo
*mogrify_info;
MagickStatusType
status;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
count,
index;
/*
Apply options to the image list.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image **) NULL);
assert((*images)->previous == (Image *) NULL);
assert((*images)->signature == MagickCoreSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
if ((argc <= 0) || (*argv == (char *) NULL))
return(MagickTrue);
mogrify_info=CloneImageInfo(image_info);
quantize_info=AcquireQuantizeInfo(mogrify_info);
channel=mogrify_info->channel;
status=MagickTrue;
for (i=0; i < (ssize_t) argc; i++)
{
if (*images == (Image *) NULL)
break;
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=ParseCommandOption(MagickCommandOptions,MagickFalse,option);
count=MagickMax(count,0L);
if ((i+count) >= (ssize_t) argc)
break;
status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception);
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("affinity",option+1) == 0)
{
(void) SyncImagesSettings(mogrify_info,*images);
if (*option == '+')
{
(void) RemapImages(quantize_info,*images,(Image *) NULL);
InheritException(exception,&(*images)->exception);
break;
}
i++;
break;
}
if (LocaleCompare("append",option+1) == 0)
{
Image
*append_image;
(void) SyncImagesSettings(mogrify_info,*images);
append_image=AppendImages(*images,*option == '-' ? MagickTrue :
MagickFalse,exception);
if (append_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=append_image;
break;
}
if (LocaleCompare("average",option+1) == 0)
{
Image
*average_image;
/*
Average an image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images);
average_image=EvaluateImages(*images,MeanEvaluateOperator,
exception);
if (average_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=average_image;
break;
}
break;
}
case 'c':
{
if (LocaleCompare("channel",option+1) == 0)
{
if (*option == '+')
{
channel=DefaultChannels;
break;
}
channel=(ChannelType) ParseChannelOption(argv[i+1]);
break;
}
if (LocaleCompare("clut",option+1) == 0)
{
Image
*clut_image,
*image;
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
clut_image=RemoveFirstImageFromList(images);
if (clut_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
(void) ClutImageChannel(image,channel,clut_image);
clut_image=DestroyImage(clut_image);
InheritException(exception,&image->exception);
*images=DestroyImageList(*images);
*images=image;
break;
}
if (LocaleCompare("coalesce",option+1) == 0)
{
Image
*coalesce_image;
(void) SyncImagesSettings(mogrify_info,*images);
coalesce_image=CoalesceImages(*images,exception);
if (coalesce_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=coalesce_image;
break;
}
if (LocaleCompare("combine",option+1) == 0)
{
Image
*combine_image;
(void) SyncImagesSettings(mogrify_info,*images);
combine_image=CombineImages(*images,channel,exception);
if (combine_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=combine_image;
break;
}
if (LocaleCompare("compare",option+1) == 0)
{
const char
*option;
double
distortion;
Image
*difference_image,
*image,
*reconstruct_image;
MetricType
metric;
/*
Mathematically and visually annotate the difference between an
image and its reconstruction.
*/
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
reconstruct_image=RemoveFirstImageFromList(images);
if (reconstruct_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
metric=UndefinedMetric;
option=GetImageOption(image_info,"metric");
if (option != (const char *) NULL)
metric=(MetricType) ParseCommandOption(MagickMetricOptions,
MagickFalse,option);
difference_image=CompareImageChannels(image,reconstruct_image,
channel,metric,&distortion,exception);
if (difference_image == (Image *) NULL)
break;
reconstruct_image=DestroyImage(reconstruct_image);
image=DestroyImage(image);
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=difference_image;
break;
}
if (LocaleCompare("complex",option+1) == 0)
{
ComplexOperator
op;
Image
*complex_images;
(void) SyncImageSettings(mogrify_info,*images);
op=(ComplexOperator) ParseCommandOption(MagickComplexOptions,
MagickFalse,argv[i+1]);
complex_images=ComplexImages(*images,op,exception);
if (complex_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=complex_images;
break;
}
if (LocaleCompare("composite",option+1) == 0)
{
Image
*mask_image,
*composite_image,
*image;
RectangleInfo
geometry;
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
composite_image=RemoveFirstImageFromList(images);
if (composite_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
(void) TransformImage(&composite_image,(char *) NULL,
composite_image->geometry);
SetGeometry(composite_image,&geometry);
(void) ParseAbsoluteGeometry(composite_image->geometry,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,
&geometry);
mask_image=RemoveFirstImageFromList(images);
if (mask_image != (Image *) NULL)
{
if ((image->compose == DisplaceCompositeOp) ||
(image->compose == DistortCompositeOp))
{
/*
Merge Y displacement into X displacement image.
*/
(void) CompositeImage(composite_image,CopyGreenCompositeOp,
mask_image,0,0);
mask_image=DestroyImage(mask_image);
}
else
{
/*
Set a blending mask for the composition.
*/
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
image->mask=mask_image;
(void) NegateImage(image->mask,MagickFalse);
}
}
(void) CompositeImageChannel(image,channel,image->compose,
composite_image,geometry.x,geometry.y);
if (mask_image != (Image *) NULL)
{
image->mask=DestroyImage(image->mask);
mask_image=image->mask;
}
composite_image=DestroyImage(composite_image);
InheritException(exception,&image->exception);
*images=DestroyImageList(*images);
*images=image;
break;
}
if (LocaleCompare("copy",option+1) == 0)
{
Image
*source_image;
OffsetInfo
offset;
RectangleInfo
geometry;
/*
Copy image pixels.
*/
(void) SyncImageSettings(mogrify_info,*images);
(void) ParsePageGeometry(*images,argv[i+2],&geometry,exception);
offset.x=geometry.x;
offset.y=geometry.y;
source_image=(*images);
if (source_image->next != (Image *) NULL)
source_image=source_image->next;
(void) ParsePageGeometry(source_image,argv[i+1],&geometry,
exception);
status=CopyImagePixels(*images,source_image,&geometry,&offset,
exception);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("deconstruct",option+1) == 0)
{
Image
*deconstruct_image;
(void) SyncImagesSettings(mogrify_info,*images);
deconstruct_image=DeconstructImages(*images,exception);
if (deconstruct_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=deconstruct_image;
break;
}
if (LocaleCompare("delete",option+1) == 0)
{
if (*option == '+')
DeleteImages(images,"-1",exception);
else
DeleteImages(images,argv[i+1],exception);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
quantize_info->dither=MagickFalse;
break;
}
quantize_info->dither=MagickTrue;
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("duplicate",option+1) == 0)
{
Image
*duplicate_images;
if (*option == '+')
duplicate_images=DuplicateImages(*images,1,"-1",exception);
else
{
const char
*p;
size_t
number_duplicates;
number_duplicates=(size_t) StringToLong(argv[i+1]);
p=strchr(argv[i+1],',');
if (p == (const char *) NULL)
duplicate_images=DuplicateImages(*images,number_duplicates,
"-1",exception);
else
duplicate_images=DuplicateImages(*images,number_duplicates,p,
exception);
}
AppendImageToList(images, duplicate_images);
(void) SyncImagesSettings(mogrify_info,*images);
break;
}
break;
}
case 'e':
{
if (LocaleCompare("evaluate-sequence",option+1) == 0)
{
Image
*evaluate_image;
MagickEvaluateOperator
op;
(void) SyncImageSettings(mogrify_info,*images);
op=(MagickEvaluateOperator) ParseCommandOption(
MagickEvaluateOptions,MagickFalse,argv[i+1]);
evaluate_image=EvaluateImages(*images,op,exception);
if (evaluate_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=evaluate_image;
break;
}
break;
}
case 'f':
{
if (LocaleCompare("fft",option+1) == 0)
{
Image
*fourier_image;
/*
Implements the discrete Fourier transform (DFT).
*/
(void) SyncImageSettings(mogrify_info,*images);
fourier_image=ForwardFourierTransformImage(*images,*option == '-' ?
MagickTrue : MagickFalse,exception);
if (fourier_image == (Image *) NULL)
break;
*images=DestroyImageList(*images);
*images=fourier_image;
break;
}
if (LocaleCompare("flatten",option+1) == 0)
{
Image
*flatten_image;
(void) SyncImagesSettings(mogrify_info,*images);
flatten_image=MergeImageLayers(*images,FlattenLayer,exception);
if (flatten_image == (Image *) NULL)
break;
*images=DestroyImageList(*images);
*images=flatten_image;
break;
}
if (LocaleCompare("fx",option+1) == 0)
{
Image
*fx_image;
(void) SyncImagesSettings(mogrify_info,*images);
fx_image=FxImageChannel(*images,channel,argv[i+1],exception);
if (fx_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=fx_image;
break;
}
break;
}
case 'h':
{
if (LocaleCompare("hald-clut",option+1) == 0)
{
Image
*hald_image,
*image;
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
hald_image=RemoveFirstImageFromList(images);
if (hald_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
(void) HaldClutImageChannel(image,channel,hald_image);
hald_image=DestroyImage(hald_image);
InheritException(exception,&image->exception);
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=image;
break;
}
break;
}
case 'i':
{
if (LocaleCompare("ift",option+1) == 0)
{
Image
*fourier_image,
*magnitude_image,
*phase_image;
/*
Implements the inverse fourier discrete Fourier transform (DFT).
*/
(void) SyncImagesSettings(mogrify_info,*images);
magnitude_image=RemoveFirstImageFromList(images);
phase_image=RemoveFirstImageFromList(images);
if (phase_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
fourier_image=InverseFourierTransformImage(magnitude_image,
phase_image,*option == '-' ? MagickTrue : MagickFalse,exception);
if (fourier_image == (Image *) NULL)
break;
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=fourier_image;
break;
}
if (LocaleCompare("insert",option+1) == 0)
{
Image
*p,
*q;
index=0;
if (*option != '+')
index=(ssize_t) StringToLong(argv[i+1]);
p=RemoveLastImageFromList(images);
if (p == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",argv[i+1]);
status=MagickFalse;
break;
}
q=p;
if (index == 0)
PrependImageToList(images,q);
else
if (index == (ssize_t) GetImageListLength(*images))
AppendImageToList(images,q);
else
{
q=GetImageFromList(*images,index-1);
if (q == (Image *) NULL)
{
p=DestroyImage(p);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",argv[i+1]);
status=MagickFalse;
break;
}
InsertImageInList(&q,p);
}
*images=GetFirstImageInList(q);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("layers",option+1) == 0)
{
Image
*layers;
ImageLayerMethod
method;
(void) SyncImagesSettings(mogrify_info,*images);
layers=(Image *) NULL;
method=(ImageLayerMethod) ParseCommandOption(MagickLayerOptions,
MagickFalse,argv[i+1]);
switch (method)
{
case CoalesceLayer:
{
layers=CoalesceImages(*images,exception);
break;
}
case CompareAnyLayer:
case CompareClearLayer:
case CompareOverlayLayer:
default:
{
layers=CompareImageLayers(*images,method,exception);
break;
}
case MergeLayer:
case FlattenLayer:
case MosaicLayer:
case TrimBoundsLayer:
{
layers=MergeImageLayers(*images,method,exception);
break;
}
case DisposeLayer:
{
layers=DisposeImages(*images,exception);
break;
}
case OptimizeImageLayer:
{
layers=OptimizeImageLayers(*images,exception);
break;
}
case OptimizePlusLayer:
{
layers=OptimizePlusImageLayers(*images,exception);
break;
}
case OptimizeTransLayer:
{
OptimizeImageTransparency(*images,exception);
break;
}
case RemoveDupsLayer:
{
RemoveDuplicateLayers(images,exception);
break;
}
case RemoveZeroLayer:
{
RemoveZeroDelayLayers(images,exception);
break;
}
case OptimizeLayer:
{
/*
General Purpose, GIF Animation Optimizer.
*/
layers=CoalesceImages(*images,exception);
if (layers == (Image *) NULL)
{
status=MagickFalse;
break;
}
InheritException(exception,&layers->exception);
*images=DestroyImageList(*images);
*images=layers;
layers=OptimizeImageLayers(*images,exception);
if (layers == (Image *) NULL)
{
status=MagickFalse;
break;
}
InheritException(exception,&layers->exception);
*images=DestroyImageList(*images);
*images=layers;
layers=(Image *) NULL;
OptimizeImageTransparency(*images,exception);
InheritException(exception,&(*images)->exception);
(void) RemapImages(quantize_info,*images,(Image *) NULL);
break;
}
case CompositeLayer:
{
CompositeOperator
compose;
Image
*source;
RectangleInfo
geometry;
/*
Split image sequence at the first 'NULL:' image.
*/
source=(*images);
while (source != (Image *) NULL)
{
source=GetNextImageInList(source);
if ((source != (Image *) NULL) &&
(LocaleCompare(source->magick,"NULL") == 0))
break;
}
if (source != (Image *) NULL)
{
if ((GetPreviousImageInList(source) == (Image *) NULL) ||
(GetNextImageInList(source) == (Image *) NULL))
source=(Image *) NULL;
else
{
/*
Separate the two lists, junk the null: image.
*/
source=SplitImageList(source->previous);
DeleteImageFromList(&source);
}
}
if (source == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"MissingNullSeparator","layers Composite");
status=MagickFalse;
break;
}
/*
Adjust offset with gravity and virtual canvas.
*/
SetGeometry(*images,&geometry);
(void) ParseAbsoluteGeometry((*images)->geometry,&geometry);
geometry.width=source->page.width != 0 ?
source->page.width : source->columns;
geometry.height=source->page.height != 0 ?
source->page.height : source->rows;
GravityAdjustGeometry((*images)->page.width != 0 ?
(*images)->page.width : (*images)->columns,
(*images)->page.height != 0 ? (*images)->page.height :
(*images)->rows,(*images)->gravity,&geometry);
compose=OverCompositeOp;
option=GetImageOption(mogrify_info,"compose");
if (option != (const char *) NULL)
compose=(CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,option);
CompositeLayers(*images,compose,source,geometry.x,geometry.y,
exception);
source=DestroyImageList(source);
break;
}
}
if (layers == (Image *) NULL)
break;
InheritException(exception,&layers->exception);
*images=DestroyImageList(*images);
*images=layers;
break;
}
break;
}
case 'm':
{
if (LocaleCompare("map",option+1) == 0)
{
(void) SyncImagesSettings(mogrify_info,*images);
if (*option == '+')
{
(void) RemapImages(quantize_info,*images,(Image *) NULL);
InheritException(exception,&(*images)->exception);
break;
}
i++;
break;
}
if (LocaleCompare("maximum",option+1) == 0)
{
Image
*maximum_image;
/*
Maximum image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images);
maximum_image=EvaluateImages(*images,MaxEvaluateOperator,exception);
if (maximum_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=maximum_image;
break;
}
if (LocaleCompare("minimum",option+1) == 0)
{
Image
*minimum_image;
/*
Minimum image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images);
minimum_image=EvaluateImages(*images,MinEvaluateOperator,exception);
if (minimum_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=minimum_image;
break;
}
if (LocaleCompare("morph",option+1) == 0)
{
Image
*morph_image;
(void) SyncImagesSettings(mogrify_info,*images);
morph_image=MorphImages(*images,StringToUnsignedLong(argv[i+1]),
exception);
if (morph_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=morph_image;
break;
}
if (LocaleCompare("mosaic",option+1) == 0)
{
Image
*mosaic_image;
(void) SyncImagesSettings(mogrify_info,*images);
mosaic_image=MergeImageLayers(*images,MosaicLayer,exception);
if (mosaic_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=mosaic_image;
break;
}
break;
}
case 'p':
{
if (LocaleCompare("poly",option+1) == 0)
{
char
*args,
token[MaxTextExtent];
const char
*p;
double
*arguments;
Image
*polynomial_image;
register ssize_t
x;
size_t
number_arguments;
/*
Polynomial image.
*/
(void) SyncImageSettings(mogrify_info,*images);
args=InterpretImageProperties(mogrify_info,*images,argv[i+1]);
InheritException(exception,&(*images)->exception);
if (args == (char *) NULL)
break;
p=(char *) args;
for (x=0; *p != '\0'; x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
}
number_arguments=(size_t) x;
arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*arguments));
if (arguments == (double *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,
"MemoryAllocationFailed",(*images)->filename);
(void) memset(arguments,0,number_arguments*
sizeof(*arguments));
p=(char *) args;
for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
arguments[x]=StringToDouble(token,(char **) NULL);
}
args=DestroyString(args);
polynomial_image=PolynomialImageChannel(*images,channel,
number_arguments >> 1,arguments,exception);
arguments=(double *) RelinquishMagickMemory(arguments);
if (polynomial_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=polynomial_image;
break;
}
if (LocaleCompare("print",option+1) == 0)
{
char
*string;
(void) SyncImagesSettings(mogrify_info,*images);
string=InterpretImageProperties(mogrify_info,*images,argv[i+1]);
if (string == (char *) NULL)
break;
InheritException(exception,&(*images)->exception);
(void) FormatLocaleFile(stdout,"%s",string);
string=DestroyString(string);
}
if (LocaleCompare("process",option+1) == 0)
{
char
**arguments;
int
j,
number_arguments;
(void) SyncImagesSettings(mogrify_info,*images);
arguments=StringToArgv(argv[i+1],&number_arguments);
if (arguments == (char **) NULL)
break;
if ((argc > 1) && (strchr(arguments[1],'=') != (char *) NULL))
{
char
breaker,
quote,
*token;
const char
*arguments;
int
next,
status;
size_t
length;
TokenInfo
*token_info;
/*
Support old style syntax, filter="-option arg".
*/
length=strlen(argv[i+1]);
token=(char *) NULL;
if (~length >= (MaxTextExtent-1))
token=(char *) AcquireQuantumMemory(length+MaxTextExtent,
sizeof(*token));
if (token == (char *) NULL)
break;
next=0;
arguments=argv[i+1];
token_info=AcquireTokenInfo();
status=Tokenizer(token_info,0,token,length,arguments,"","=",
"\"",'\0',&breaker,&next,"e);
token_info=DestroyTokenInfo(token_info);
if (status == 0)
{
const char
*argv;
argv=(&(arguments[next]));
(void) InvokeDynamicImageFilter(token,&(*images),1,&argv,
exception);
}
token=DestroyString(token);
break;
}
(void) SubstituteString(&arguments[1],"-","");
(void) InvokeDynamicImageFilter(arguments[1],&(*images),
number_arguments-2,(const char **) arguments+2,exception);
for (j=0; j < number_arguments; j++)
arguments[j]=DestroyString(arguments[j]);
arguments=(char **) RelinquishMagickMemory(arguments);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("reverse",option+1) == 0)
{
ReverseImageList(images);
InheritException(exception,&(*images)->exception);
break;
}
break;
}
case 's':
{
if (LocaleCompare("smush",option+1) == 0)
{
Image
*smush_image;
ssize_t
offset;
(void) SyncImagesSettings(mogrify_info,*images);
offset=(ssize_t) StringToLong(argv[i+1]);
smush_image=SmushImages(*images,*option == '-' ? MagickTrue :
MagickFalse,offset,exception);
if (smush_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=smush_image;
break;
}
if (LocaleCompare("swap",option+1) == 0)
{
Image
*p,
*q,
*u,
*v;
ssize_t
swap_index;
index=(-1);
swap_index=(-2);
if (*option != '+')
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
swap_index=(-1);
flags=ParseGeometry(argv[i+1],&geometry_info);
index=(ssize_t) geometry_info.rho;
if ((flags & SigmaValue) != 0)
swap_index=(ssize_t) geometry_info.sigma;
}
p=GetImageFromList(*images,index);
q=GetImageFromList(*images,swap_index);
if ((p == (Image *) NULL) || (q == (Image *) NULL))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",(*images)->filename);
status=MagickFalse;
break;
}
if (p == q)
break;
u=CloneImage(p,0,0,MagickTrue,exception);
if (u == (Image *) NULL)
break;
v=CloneImage(q,0,0,MagickTrue,exception);
if (v == (Image *) NULL)
{
u=DestroyImage(u);
break;
}
ReplaceImageInList(&p,v);
ReplaceImageInList(&q,u);
*images=GetFirstImageInList(q);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("write",option+1) == 0)
{
char
key[MaxTextExtent];
Image
*write_images;
ImageInfo
*write_info;
(void) SyncImagesSettings(mogrify_info,*images);
(void) FormatLocaleString(key,MaxTextExtent,"cache:%s",argv[i+1]);
(void) DeleteImageRegistry(key);
write_images=(*images);
if (*option == '+')
write_images=CloneImageList(*images,exception);
write_info=CloneImageInfo(mogrify_info);
status&=WriteImages(write_info,write_images,argv[i+1],exception);
write_info=DestroyImageInfo(write_info);
if (*option == '+')
write_images=DestroyImageList(write_images);
break;
}
break;
}
default:
break;
}
i+=count;
}
quantize_info=DestroyQuantizeInfo(quantize_info);
mogrify_info=DestroyImageInfo(mogrify_info);
status&=MogrifyImageInfo(image_info,argc,argv,exception);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImages() applies image processing options to a sequence of images as
% prescribed by command line options.
%
% The format of the MogrifyImage method is:
%
% MagickBooleanType MogrifyImages(ImageInfo *image_info,
% const MagickBooleanType post,const int argc,const char **argv,
% Image **images,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o post: If true, post process image list operators otherwise pre-process.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o images: pointer to a pointer of the first image in image list.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MogrifyImages(ImageInfo *image_info,
const MagickBooleanType post,const int argc,const char **argv,
Image **images,ExceptionInfo *exception)
{
#define MogrifyImageTag "Mogrify/Image"
MagickStatusType
status;
MagickBooleanType
proceed;
size_t
n;
register ssize_t
i;
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (images == (Image **) NULL)
return(MogrifyImage(image_info,argc,argv,images,exception));
assert((*images)->previous == (Image *) NULL);
assert((*images)->signature == MagickCoreSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
if ((argc <= 0) || (*argv == (char *) NULL))
return(MagickTrue);
(void) SetImageInfoProgressMonitor(image_info,(MagickProgressMonitor) NULL,
(void *) NULL);
status=MagickTrue;
#if 0
(void) FormatLocaleFile(stderr, "mogrify start %s %d (%s)\n",argv[0],argc,
post?"post":"pre");
#endif
/*
Pre-process multi-image sequence operators
*/
if (post == MagickFalse)
status&=MogrifyImageList(image_info,argc,argv,images,exception);
/*
For each image, process simple single image operators
*/
i=0;
n=GetImageListLength(*images);
for (;;)
{
#if 0
(void) FormatLocaleFile(stderr,"mogrify %ld of %ld\n",(long)
GetImageIndexInList(*images),(long)GetImageListLength(*images));
#endif
status&=MogrifyImage(image_info,argc,argv,images,exception);
proceed=SetImageProgress(*images,MogrifyImageTag,(MagickOffsetType) i, n);
if (proceed == MagickFalse)
break;
if ((*images)->next == (Image *) NULL)
break;
*images=(*images)->next;
i++;
}
assert(*images != (Image *) NULL);
#if 0
(void) FormatLocaleFile(stderr,"mogrify end %ld of %ld\n",(long)
GetImageIndexInList(*images),(long)GetImageListLength(*images));
#endif
/*
Post-process, multi-image sequence operators
*/
*images=GetFirstImageInList(*images);
if (post != MagickFalse)
status&=MogrifyImageList(image_info,argc,argv,images,exception);
return(status != 0 ? MagickTrue : MagickFalse);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_947_0 |
crossvul-cpp_data_bad_4966_4 | /*
* NET3: Garbage Collector For AF_UNIX sockets
*
* Garbage Collector:
* Copyright (C) Barak A. Pearlmutter.
* Released under the GPL version 2 or later.
*
* Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
* If it doesn't work blame me, it worked when Barak sent it.
*
* Assumptions:
*
* - object w/ a bit
* - free list
*
* Current optimizations:
*
* - explicit stack instead of recursion
* - tail recurse on first born instead of immediate push/pop
* - we gather the stuff that should not be killed into tree
* and stack is just a path from root to the current pointer.
*
* Future optimizations:
*
* - don't just push entire root set; process in place
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Fixes:
* Alan Cox 07 Sept 1997 Vmalloc internal stack as needed.
* Cope with changing max_files.
* Al Viro 11 Oct 1998
* Graph may have cycles. That is, we can send the descriptor
* of foo to bar and vice versa. Current code chokes on that.
* Fix: move SCM_RIGHTS ones into the separate list and then
* skb_free() them all instead of doing explicit fput's.
* Another problem: since fput() may block somebody may
* create a new unix_socket when we are in the middle of sweep
* phase. Fix: revert the logic wrt MARKED. Mark everything
* upon the beginning and unmark non-junk ones.
*
* [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
* sent to connect()'ed but still not accept()'ed sockets.
* Fixed. Old code had slightly different problem here:
* extra fput() in situation when we passed the descriptor via
* such socket and closed it (descriptor). That would happen on
* each unix_gc() until the accept(). Since the struct file in
* question would go to the free list and might be reused...
* That might be the reason of random oopses on filp_close()
* in unrelated processes.
*
* AV 28 Feb 1999
* Kill the explicit allocation of stack. Now we keep the tree
* with root in dummy + pointer (gc_current) to one of the nodes.
* Stack is represented as path from gc_current to dummy. Unmark
* now means "add to tree". Push == "make it a son of gc_current".
* Pop == "move gc_current to parent". We keep only pointers to
* parents (->gc_tree).
* AV 1 Mar 1999
* Damn. Added missing check for ->dead in listen queues scanning.
*
* Miklos Szeredi 25 Jun 2007
* Reimplement with a cycle collecting algorithm. This should
* solve several problems with the previous code, like being racy
* wrt receive and holding up unrelated socket operations.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/net.h>
#include <linux/fs.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/file.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <net/sock.h>
#include <net/af_unix.h>
#include <net/scm.h>
#include <net/tcp_states.h>
/* Internal data structures and random procedures: */
static LIST_HEAD(gc_inflight_list);
static LIST_HEAD(gc_candidates);
static DEFINE_SPINLOCK(unix_gc_lock);
static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
unsigned int unix_tot_inflight;
struct sock *unix_get_socket(struct file *filp)
{
struct sock *u_sock = NULL;
struct inode *inode = file_inode(filp);
/* Socket ? */
if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
struct socket *sock = SOCKET_I(inode);
struct sock *s = sock->sk;
/* PF_UNIX ? */
if (s && sock->ops && sock->ops->family == PF_UNIX)
u_sock = s;
}
return u_sock;
}
/* Keep the number of times in flight count for the file
* descriptor if it is for an AF_UNIX socket.
*/
void unix_inflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
spin_lock(&unix_gc_lock);
if (s) {
struct unix_sock *u = unix_sk(s);
if (atomic_long_inc_return(&u->inflight) == 1) {
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &gc_inflight_list);
} else {
BUG_ON(list_empty(&u->link));
}
unix_tot_inflight++;
}
fp->f_cred->user->unix_inflight++;
spin_unlock(&unix_gc_lock);
}
void unix_notinflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
spin_lock(&unix_gc_lock);
if (s) {
struct unix_sock *u = unix_sk(s);
BUG_ON(list_empty(&u->link));
if (atomic_long_dec_and_test(&u->inflight))
list_del_init(&u->link);
unix_tot_inflight--;
}
fp->f_cred->user->unix_inflight--;
spin_unlock(&unix_gc_lock);
}
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
struct sk_buff *skb;
struct sk_buff *next;
spin_lock(&x->sk_receive_queue.lock);
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
/* Do we have file descriptors ? */
if (UNIXCB(skb).fp) {
bool hit = false;
/* Process the descriptors of this socket */
int nfd = UNIXCB(skb).fp->count;
struct file **fp = UNIXCB(skb).fp->fp;
while (nfd--) {
/* Get the socket the fd matches if it indeed does so */
struct sock *sk = unix_get_socket(*fp++);
if (sk) {
struct unix_sock *u = unix_sk(sk);
/* Ignore non-candidates, they could
* have been added to the queues after
* starting the garbage collection
*/
if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
hit = true;
func(u);
}
}
}
if (hit && hitlist != NULL) {
__skb_unlink(skb, &x->sk_receive_queue);
__skb_queue_tail(hitlist, skb);
}
}
}
spin_unlock(&x->sk_receive_queue.lock);
}
static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
if (x->sk_state != TCP_LISTEN) {
scan_inflight(x, func, hitlist);
} else {
struct sk_buff *skb;
struct sk_buff *next;
struct unix_sock *u;
LIST_HEAD(embryos);
/* For a listening socket collect the queued embryos
* and perform a scan on them as well.
*/
spin_lock(&x->sk_receive_queue.lock);
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
u = unix_sk(skb->sk);
/* An embryo cannot be in-flight, so it's safe
* to use the list link.
*/
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &embryos);
}
spin_unlock(&x->sk_receive_queue.lock);
while (!list_empty(&embryos)) {
u = list_entry(embryos.next, struct unix_sock, link);
scan_inflight(&u->sk, func, hitlist);
list_del_init(&u->link);
}
}
}
static void dec_inflight(struct unix_sock *usk)
{
atomic_long_dec(&usk->inflight);
}
static void inc_inflight(struct unix_sock *usk)
{
atomic_long_inc(&usk->inflight);
}
static void inc_inflight_move_tail(struct unix_sock *u)
{
atomic_long_inc(&u->inflight);
/* If this still might be part of a cycle, move it to the end
* of the list, so that it's checked even if it was already
* passed over
*/
if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
list_move_tail(&u->link, &gc_candidates);
}
static bool gc_in_progress;
#define UNIX_INFLIGHT_TRIGGER_GC 16000
void wait_for_unix_gc(void)
{
/* If number of inflight sockets is insane,
* force a garbage collect right now.
*/
if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
unix_gc();
wait_event(unix_gc_wait, gc_in_progress == false);
}
/* The external entry point: unix_gc() */
void unix_gc(void)
{
struct unix_sock *u;
struct unix_sock *next;
struct sk_buff_head hitlist;
struct list_head cursor;
LIST_HEAD(not_cycle_list);
spin_lock(&unix_gc_lock);
/* Avoid a recursive GC. */
if (gc_in_progress)
goto out;
gc_in_progress = true;
/* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones
* which don't have any external reference.
*
* Holding unix_gc_lock will protect these candidates from
* being detached, and hence from gaining an external
* reference. Since there are no possible receivers, all
* buffers currently on the candidates' queues stay there
* during the garbage collection.
*
* We also know that no new candidate can be added onto the
* receive queues. Other, non candidate sockets _can_ be
* added to queue, so we must make sure only to touch
* candidates.
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
long total_refs;
long inflight_refs;
total_refs = file_count(u->sk.sk_socket->file);
inflight_refs = atomic_long_read(&u->inflight);
BUG_ON(inflight_refs < 1);
BUG_ON(total_refs < inflight_refs);
if (total_refs == inflight_refs) {
list_move_tail(&u->link, &gc_candidates);
__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
}
}
/* Now remove all internal in-flight reference to children of
* the candidates.
*/
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, dec_inflight, NULL);
/* Restore the references for children of all candidates,
* which have remaining references. Do this recursively, so
* only those remain, which form cyclic references.
*
* Use a "cursor" link, to make the list traversal safe, even
* though elements might be moved about.
*/
list_add(&cursor, &gc_candidates);
while (cursor.next != &gc_candidates) {
u = list_entry(cursor.next, struct unix_sock, link);
/* Move cursor to after the current position. */
list_move(&cursor, &u->link);
if (atomic_long_read(&u->inflight) > 0) {
list_move_tail(&u->link, ¬_cycle_list);
__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
scan_children(&u->sk, inc_inflight_move_tail, NULL);
}
}
list_del(&cursor);
/* not_cycle_list contains those sockets which do not make up a
* cycle. Restore these to the inflight list.
*/
while (!list_empty(¬_cycle_list)) {
u = list_entry(not_cycle_list.next, struct unix_sock, link);
__clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
list_move_tail(&u->link, &gc_inflight_list);
}
/* Now gc_candidates contains only garbage. Restore original
* inflight counters for these as well, and remove the skbuffs
* which are creating the cycle(s).
*/
skb_queue_head_init(&hitlist);
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, inc_inflight, &hitlist);
spin_unlock(&unix_gc_lock);
/* Here we are. Hitlist is filled. Die. */
__skb_queue_purge(&hitlist);
spin_lock(&unix_gc_lock);
/* All candidates should have been detached by now. */
BUG_ON(!list_empty(&gc_candidates));
gc_in_progress = false;
wake_up(&unix_gc_wait);
out:
spin_unlock(&unix_gc_lock);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_4966_4 |
crossvul-cpp_data_good_1855_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2015 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/profile.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/module.h"
#include "magick/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColourLookup
{
DDSSourceBlock sources[2];
} DDSSingleColourLookup;
typedef MagickBooleanType
DDSDecoder(Image *, DDSInfo *, ExceptionInfo *);
static const DDSSingleColourLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColourLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColourLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if (max - min < steps) \
max = MagickMin(min + steps, 255); \
if (max - min < steps) \
min = MagickMax(min - steps, 0)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
static MagickBooleanType
ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3,
DDSVector4 *,DDSVector4 *,unsigned char *,size_t),
ReadDDSInfo(Image *,DDSInfo *),
ReadDXT1(Image *,DDSInfo *,ExceptionInfo *),
ReadDXT3(Image *,DDSInfo *,ExceptionInfo *),
ReadDXT5(Image *,DDSInfo *,ExceptionInfo *),
ReadUncompressedRGB(Image *,DDSInfo *,ExceptionInfo *),
ReadUncompressedRGBA(Image *,DDSInfo *,ExceptionInfo *),
SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
WriteDDSImage(const ImageInfo *,Image *),
WriteMipmaps(Image *,const size_t,const size_t,const size_t,
const MagickBooleanType,const MagickBooleanType,ExceptionInfo *);
static void
RemapIndices(const ssize_t *,const unsigned char *,unsigned char *),
WriteDDSInfo(Image *,const size_t,const size_t,const size_t),
WriteFourCC(Image *,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteIndices(Image *,const DDSVector3,const DDSVector3, unsigned char *),
WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *),
WriteUncompressed(Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
register ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
register ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColourLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
register ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
register ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = 1.0f / MagickMax(w.x,MagickMax(w.y,w.z));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
register ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status,
cubemap = MagickFalse,
volume = MagickFalse,
matte;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue) {
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
(void) SeekBlob(image, 128, SEEK_SET);
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
matte = MagickTrue;
decoder = ReadUncompressedRGBA;
}
else
{
matte = MagickTrue;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
matte = MagickFalse;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
matte = MagickFalse;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
matte = MagickTrue;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
matte = MagickTrue;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
/* Start a new image */
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->matte = matte;
image->compression = compression;
image->columns = dds_info.width;
image->rows = dds_info.height;
image->storage_class = DirectClass;
image->endian = LSBEndian;
image->depth = 8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
if ((decoder)(image, &dds_info, exception) != MagickTrue)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
}
if (EOFBlob(image) != MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
return MagickTrue;
}
static MagickBooleanType ReadDXT1(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
PixelPacket
*q;
register ssize_t
i,
x;
size_t
bits;
ssize_t
j,
y;
unsigned char
code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x),
MagickMin(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
SetPixelOpacity(q,ScaleCharToQuantum(colors.a[code]));
if (colors.a[code] && image->matte == MagickFalse)
/* Correct matte */
image->matte = MagickTrue;
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
}
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
ssize_t
j,
y;
PixelPacket
*q;
register ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x),
MagickMin(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
alpha));
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
}
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
ssize_t
j,
y;
MagickSizeType
alpha_bits;
PixelPacket
*q;
register ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x),
MagickMin(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
alpha));
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
}
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGB(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
PixelPacket
*q;
ssize_t
x, y;
unsigned short
color;
if (dds_info->pixelformat.rgb_bitcount == 8)
(void) SetImageType(image,GrayscaleType);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
for (y = 0; y < (ssize_t) dds_info->height; y++)
{
q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
for (x = 0; x < (ssize_t) dds_info->width; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
SetPixelGray(q,ScaleCharToQuantum(ReadBlobByte(image)));
else if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)));
}
else
{
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
if (dds_info->pixelformat.rgb_bitcount == 32)
(void) ReadBlobByte(image);
}
SetPixelAlpha(q,QuantumRange);
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBA(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
PixelPacket
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleMatteType);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
for (y = 0; y < (ssize_t) dds_info->height; y++)
{
q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
for (x = 0; x < (ssize_t) dds_info->width; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(q,(color & (1 << 15)) ? QuantumRange : 0);
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)));
}
else if (alphaBits == 2)
{
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
(color >> 8)));
SetPixelGray(q,ScaleCharToQuantum((unsigned char)color));
}
else
{
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)));
}
}
else
{
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = SetMagickInfo("DDS");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
entry = SetMagickInfo("DXT1");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
entry = SetMagickInfo("DXT5");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
register ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
register ssize_t
i;
MagickOffsetType
offset;
size_t
h,
w;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
w = DIV2(dds_info->width);
h = DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset = (MagickOffsetType) ((w + 3) / 4) * ((h + 3) / 4) * texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w = DIV2(w);
h = DIV2(h);
}
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
w = DIV2(dds_info->width);
h = DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset = (MagickOffsetType) w * h * pixel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w = DIV2(w);
h = DIV2(h);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
static void WriteAlphas(Image *image, const ssize_t* alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
register ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4* points, const ssize_t* map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if (clusterFit == MagickFalse || count == 0)
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (!image->matte)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (option != (char *) NULL && LocaleCompare(option,"true") == 0)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (option != (char *) NULL && LocaleCompare(option,"true") == 0)
weightByAlpha=MagickTrue;
}
}
}
maxMipmaps=SIZE_MAX;
mipmaps=0;
if ((image->columns & (image->columns - 1)) == 0 &&
(image->rows & (image->rows - 1)) == 0)
{
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while (columns != 1 && rows != 1 && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
&image->exception);
if (mipmaps > 0 && WriteMipmaps(image,pixelFormat,compression,mipmaps,
clusterFit,weightByAlpha,&image->exception) == MagickFalse)
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MaxTextExtent];
register ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT | DDSD_LINEARSIZE);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->matte)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,
(unsigned int) (MagickMax(1,(image->columns+3)/4) * 8));
else
(void) WriteBlobLSBLong(image,
(unsigned int) (MagickMax(1,(image->columns+3)/4) * 16));
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) ResetMagickMemory(software,0,sizeof(software));
(void) strcpy(software,"IMAGEMAGICK");
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) // bitcount / masks
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->matte)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) // ddscaps2 + reserved region
(void) WriteBlobLSBLong(image,0x00);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
i,
y,
bx,
by;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const PixelPacket *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(p));
else
alpha = 255;
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p++;
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression, const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static inline size_t ClampToLimit(const float value,
const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char* indices)
{
register ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static MagickBooleanType WriteMipmaps(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
Image*
resize_image;
register ssize_t
i;
size_t
columns,
rows;
columns = image->columns;
rows = image->rows;
for (i=0; i< (ssize_t) mipmaps; i++)
{
resize_image = ResizeImage(image,columns/2,rows/2,TriangleFilter,1.0,
exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
DestroyBlob(resize_image);
resize_image->blob=ReferenceBlob(image->blob);
WriteImageData(resize_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
resize_image=DestroyImage(resize_image);
columns = DIV2(columns);
rows = DIV2(rows);
}
return(MagickTrue);
}
static void WriteSingleColorFit(Image *image, const DDSVector4* points,
const ssize_t* map)
{
DDSVector3
start,
end;
register ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(p)));
if (image->matte)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(p)));
p++;
}
}
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_1855_0 |
crossvul-cpp_data_good_3640_0 | /*
* hugetlbpage-backed filesystem. Based on ramfs.
*
* William Irwin, 2002
*
* Copyright (C) 2002 Linus Torvalds.
*/
#include <linux/module.h>
#include <linux/thread_info.h>
#include <asm/current.h>
#include <linux/sched.h> /* remove ASAP */
#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/file.h>
#include <linux/kernel.h>
#include <linux/writeback.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/capability.h>
#include <linux/ctype.h>
#include <linux/backing-dev.h>
#include <linux/hugetlb.h>
#include <linux/pagevec.h>
#include <linux/parser.h>
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/dnotify.h>
#include <linux/statfs.h>
#include <linux/security.h>
#include <linux/magic.h>
#include <linux/migrate.h>
#include <asm/uaccess.h>
static const struct super_operations hugetlbfs_ops;
static const struct address_space_operations hugetlbfs_aops;
const struct file_operations hugetlbfs_file_operations;
static const struct inode_operations hugetlbfs_dir_inode_operations;
static const struct inode_operations hugetlbfs_inode_operations;
struct hugetlbfs_config {
uid_t uid;
gid_t gid;
umode_t mode;
long nr_blocks;
long nr_inodes;
struct hstate *hstate;
};
struct hugetlbfs_inode_info {
struct shared_policy policy;
struct inode vfs_inode;
};
static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
{
return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
}
static struct backing_dev_info hugetlbfs_backing_dev_info = {
.name = "hugetlbfs",
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
int sysctl_hugetlb_shm_group;
enum {
Opt_size, Opt_nr_inodes,
Opt_mode, Opt_uid, Opt_gid,
Opt_pagesize,
Opt_err,
};
static const match_table_t tokens = {
{Opt_size, "size=%s"},
{Opt_nr_inodes, "nr_inodes=%s"},
{Opt_mode, "mode=%o"},
{Opt_uid, "uid=%u"},
{Opt_gid, "gid=%u"},
{Opt_pagesize, "pagesize=%s"},
{Opt_err, NULL},
};
static void huge_pagevec_release(struct pagevec *pvec)
{
int i;
for (i = 0; i < pagevec_count(pvec); ++i)
put_page(pvec->pages[i]);
pagevec_reinit(pvec);
}
static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
{
struct inode *inode = file->f_path.dentry->d_inode;
loff_t len, vma_len;
int ret;
struct hstate *h = hstate_file(file);
/*
* vma address alignment (but not the pgoff alignment) has
* already been checked by prepare_hugepage_range. If you add
* any error returns here, do so after setting VM_HUGETLB, so
* is_vm_hugetlb_page tests below unmap_region go the right
* way when do_mmap_pgoff unwinds (may be important on powerpc
* and ia64).
*/
vma->vm_flags |= VM_HUGETLB | VM_RESERVED;
vma->vm_ops = &hugetlb_vm_ops;
if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
return -EINVAL;
vma_len = (loff_t)(vma->vm_end - vma->vm_start);
mutex_lock(&inode->i_mutex);
file_accessed(file);
ret = -ENOMEM;
len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
if (hugetlb_reserve_pages(inode,
vma->vm_pgoff >> huge_page_order(h),
len >> huge_page_shift(h), vma,
vma->vm_flags))
goto out;
ret = 0;
hugetlb_prefault_arch_hook(vma->vm_mm);
if (vma->vm_flags & VM_WRITE && inode->i_size < len)
inode->i_size = len;
out:
mutex_unlock(&inode->i_mutex);
return ret;
}
/*
* Called under down_write(mmap_sem).
*/
#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
static unsigned long
hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long start_addr;
struct hstate *h = hstate_file(file);
if (len & ~huge_page_mask(h))
return -EINVAL;
if (len > TASK_SIZE)
return -ENOMEM;
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
return addr;
}
if (addr) {
addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start))
return addr;
}
if (len > mm->cached_hole_size)
start_addr = mm->free_area_cache;
else {
start_addr = TASK_UNMAPPED_BASE;
mm->cached_hole_size = 0;
}
full_search:
addr = ALIGN(start_addr, huge_page_size(h));
for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
/* At this point: (!vma || addr < vma->vm_end). */
if (TASK_SIZE - len < addr) {
/*
* Start a new search - just in case we missed
* some holes.
*/
if (start_addr != TASK_UNMAPPED_BASE) {
start_addr = TASK_UNMAPPED_BASE;
mm->cached_hole_size = 0;
goto full_search;
}
return -ENOMEM;
}
if (!vma || addr + len <= vma->vm_start) {
mm->free_area_cache = addr + len;
return addr;
}
if (addr + mm->cached_hole_size < vma->vm_start)
mm->cached_hole_size = vma->vm_start - addr;
addr = ALIGN(vma->vm_end, huge_page_size(h));
}
}
#endif
static int
hugetlbfs_read_actor(struct page *page, unsigned long offset,
char __user *buf, unsigned long count,
unsigned long size)
{
char *kaddr;
unsigned long left, copied = 0;
int i, chunksize;
if (size > count)
size = count;
/* Find which 4k chunk and offset with in that chunk */
i = offset >> PAGE_CACHE_SHIFT;
offset = offset & ~PAGE_CACHE_MASK;
while (size) {
chunksize = PAGE_CACHE_SIZE;
if (offset)
chunksize -= offset;
if (chunksize > size)
chunksize = size;
kaddr = kmap(&page[i]);
left = __copy_to_user(buf, kaddr + offset, chunksize);
kunmap(&page[i]);
if (left) {
copied += (chunksize - left);
break;
}
offset = 0;
size -= chunksize;
buf += chunksize;
copied += chunksize;
i++;
}
return copied ? copied : -EFAULT;
}
/*
* Support for read() - Find the page attached to f_mapping and copy out the
* data. Its *very* similar to do_generic_mapping_read(), we can't use that
* since it has PAGE_CACHE_SIZE assumptions.
*/
static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
size_t len, loff_t *ppos)
{
struct hstate *h = hstate_file(filp);
struct address_space *mapping = filp->f_mapping;
struct inode *inode = mapping->host;
unsigned long index = *ppos >> huge_page_shift(h);
unsigned long offset = *ppos & ~huge_page_mask(h);
unsigned long end_index;
loff_t isize;
ssize_t retval = 0;
/* validate length */
if (len == 0)
goto out;
for (;;) {
struct page *page;
unsigned long nr, ret;
int ra;
/* nr is the maximum number of bytes to copy from this page */
nr = huge_page_size(h);
isize = i_size_read(inode);
if (!isize)
goto out;
end_index = (isize - 1) >> huge_page_shift(h);
if (index >= end_index) {
if (index > end_index)
goto out;
nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
if (nr <= offset)
goto out;
}
nr = nr - offset;
/* Find the page */
page = find_lock_page(mapping, index);
if (unlikely(page == NULL)) {
/*
* We have a HOLE, zero out the user-buffer for the
* length of the hole or request.
*/
ret = len < nr ? len : nr;
if (clear_user(buf, ret))
ra = -EFAULT;
else
ra = 0;
} else {
unlock_page(page);
/*
* We have the page, copy it to user space buffer.
*/
ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
ret = ra;
page_cache_release(page);
}
if (ra < 0) {
if (retval == 0)
retval = ra;
goto out;
}
offset += ret;
retval += ret;
len -= ret;
index += offset >> huge_page_shift(h);
offset &= ~huge_page_mask(h);
/* short read or no more work */
if ((ret != nr) || (len == 0))
break;
}
out:
*ppos = ((loff_t)index << huge_page_shift(h)) + offset;
return retval;
}
static int hugetlbfs_write_begin(struct file *file,
struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
return -EINVAL;
}
static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
BUG();
return -EINVAL;
}
static void truncate_huge_page(struct page *page)
{
cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
ClearPageUptodate(page);
delete_from_page_cache(page);
}
static void truncate_hugepages(struct inode *inode, loff_t lstart)
{
struct hstate *h = hstate_inode(inode);
struct address_space *mapping = &inode->i_data;
const pgoff_t start = lstart >> huge_page_shift(h);
struct pagevec pvec;
pgoff_t next;
int i, freed = 0;
pagevec_init(&pvec, 0);
next = start;
while (1) {
if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
if (next == start)
break;
next = start;
continue;
}
for (i = 0; i < pagevec_count(&pvec); ++i) {
struct page *page = pvec.pages[i];
lock_page(page);
if (page->index > next)
next = page->index;
++next;
truncate_huge_page(page);
unlock_page(page);
freed++;
}
huge_pagevec_release(&pvec);
}
BUG_ON(!lstart && mapping->nrpages);
hugetlb_unreserve_pages(inode, start, freed);
}
static void hugetlbfs_evict_inode(struct inode *inode)
{
truncate_hugepages(inode, 0);
end_writeback(inode);
}
static inline void
hugetlb_vmtruncate_list(struct prio_tree_root *root, pgoff_t pgoff)
{
struct vm_area_struct *vma;
struct prio_tree_iter iter;
vma_prio_tree_foreach(vma, &iter, root, pgoff, ULONG_MAX) {
unsigned long v_offset;
/*
* Can the expression below overflow on 32-bit arches?
* No, because the prio_tree returns us only those vmas
* which overlap the truncated area starting at pgoff,
* and no vma on a 32-bit arch can span beyond the 4GB.
*/
if (vma->vm_pgoff < pgoff)
v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
else
v_offset = 0;
__unmap_hugepage_range(vma,
vma->vm_start + v_offset, vma->vm_end, NULL);
}
}
static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
{
pgoff_t pgoff;
struct address_space *mapping = inode->i_mapping;
struct hstate *h = hstate_inode(inode);
BUG_ON(offset & ~huge_page_mask(h));
pgoff = offset >> PAGE_SHIFT;
i_size_write(inode, offset);
mutex_lock(&mapping->i_mmap_mutex);
if (!prio_tree_empty(&mapping->i_mmap))
hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
mutex_unlock(&mapping->i_mmap_mutex);
truncate_hugepages(inode, offset);
return 0;
}
static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
struct hstate *h = hstate_inode(inode);
int error;
unsigned int ia_valid = attr->ia_valid;
BUG_ON(!inode);
error = inode_change_ok(inode, attr);
if (error)
return error;
if (ia_valid & ATTR_SIZE) {
error = -EINVAL;
if (attr->ia_size & ~huge_page_mask(h))
return -EINVAL;
error = hugetlb_vmtruncate(inode, attr->ia_size);
if (error)
return error;
}
setattr_copy(inode, attr);
mark_inode_dirty(inode);
return 0;
}
static struct inode *hugetlbfs_get_root(struct super_block *sb,
struct hugetlbfs_config *config)
{
struct inode *inode;
inode = new_inode(sb);
if (inode) {
struct hugetlbfs_inode_info *info;
inode->i_ino = get_next_ino();
inode->i_mode = S_IFDIR | config->mode;
inode->i_uid = config->uid;
inode->i_gid = config->gid;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
info = HUGETLBFS_I(inode);
mpol_shared_policy_init(&info->policy, NULL);
inode->i_op = &hugetlbfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
}
return inode;
}
static struct inode *hugetlbfs_get_inode(struct super_block *sb,
struct inode *dir,
umode_t mode, dev_t dev)
{
struct inode *inode;
inode = new_inode(sb);
if (inode) {
struct hugetlbfs_inode_info *info;
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
inode->i_mapping->a_ops = &hugetlbfs_aops;
inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
INIT_LIST_HEAD(&inode->i_mapping->private_list);
info = HUGETLBFS_I(inode);
/*
* The policy is initialized here even if we are creating a
* private inode because initialization simply creates an
* an empty rb tree and calls spin_lock_init(), later when we
* call mpol_free_shared_policy() it will just return because
* the rb tree will still be empty.
*/
mpol_shared_policy_init(&info->policy, NULL);
switch (mode & S_IFMT) {
default:
init_special_inode(inode, mode, dev);
break;
case S_IFREG:
inode->i_op = &hugetlbfs_inode_operations;
inode->i_fop = &hugetlbfs_file_operations;
break;
case S_IFDIR:
inode->i_op = &hugetlbfs_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
/* directory inodes start off with i_nlink == 2 (for "." entry) */
inc_nlink(inode);
break;
case S_IFLNK:
inode->i_op = &page_symlink_inode_operations;
break;
}
lockdep_annotate_inode_mutex_key(inode);
}
return inode;
}
/*
* File creation. Allocate an inode, and we're done..
*/
static int hugetlbfs_mknod(struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t dev)
{
struct inode *inode;
int error = -ENOSPC;
inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
if (inode) {
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
error = 0;
}
return error;
}
static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
if (!retval)
inc_nlink(dir);
return retval;
}
static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
{
return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
}
static int hugetlbfs_symlink(struct inode *dir,
struct dentry *dentry, const char *symname)
{
struct inode *inode;
int error = -ENOSPC;
inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
if (inode) {
int l = strlen(symname)+1;
error = page_symlink(inode, symname, l);
if (!error) {
d_instantiate(dentry, inode);
dget(dentry);
} else
iput(inode);
}
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
return error;
}
/*
* mark the head page dirty
*/
static int hugetlbfs_set_page_dirty(struct page *page)
{
struct page *head = compound_head(page);
SetPageDirty(head);
return 0;
}
static int hugetlbfs_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page,
enum migrate_mode mode)
{
int rc;
rc = migrate_huge_page_move_mapping(mapping, newpage, page);
if (rc)
return rc;
migrate_page_copy(newpage, page);
return 0;
}
static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
struct hstate *h = hstate_inode(dentry->d_inode);
buf->f_type = HUGETLBFS_MAGIC;
buf->f_bsize = huge_page_size(h);
if (sbinfo) {
spin_lock(&sbinfo->stat_lock);
/* If no limits set, just report 0 for max/free/used
* blocks, like simple_statfs() */
if (sbinfo->spool) {
long free_pages;
spin_lock(&sbinfo->spool->lock);
buf->f_blocks = sbinfo->spool->max_hpages;
free_pages = sbinfo->spool->max_hpages
- sbinfo->spool->used_hpages;
buf->f_bavail = buf->f_bfree = free_pages;
spin_unlock(&sbinfo->spool->lock);
buf->f_files = sbinfo->max_inodes;
buf->f_ffree = sbinfo->free_inodes;
}
spin_unlock(&sbinfo->stat_lock);
}
buf->f_namelen = NAME_MAX;
return 0;
}
static void hugetlbfs_put_super(struct super_block *sb)
{
struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
if (sbi) {
sb->s_fs_info = NULL;
if (sbi->spool)
hugepage_put_subpool(sbi->spool);
kfree(sbi);
}
}
static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
{
if (sbinfo->free_inodes >= 0) {
spin_lock(&sbinfo->stat_lock);
if (unlikely(!sbinfo->free_inodes)) {
spin_unlock(&sbinfo->stat_lock);
return 0;
}
sbinfo->free_inodes--;
spin_unlock(&sbinfo->stat_lock);
}
return 1;
}
static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
{
if (sbinfo->free_inodes >= 0) {
spin_lock(&sbinfo->stat_lock);
sbinfo->free_inodes++;
spin_unlock(&sbinfo->stat_lock);
}
}
static struct kmem_cache *hugetlbfs_inode_cachep;
static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
{
struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
struct hugetlbfs_inode_info *p;
if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
return NULL;
p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
if (unlikely(!p)) {
hugetlbfs_inc_free_inodes(sbinfo);
return NULL;
}
return &p->vfs_inode;
}
static void hugetlbfs_i_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
}
static void hugetlbfs_destroy_inode(struct inode *inode)
{
hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
}
static const struct address_space_operations hugetlbfs_aops = {
.write_begin = hugetlbfs_write_begin,
.write_end = hugetlbfs_write_end,
.set_page_dirty = hugetlbfs_set_page_dirty,
.migratepage = hugetlbfs_migrate_page,
};
static void init_once(void *foo)
{
struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
inode_init_once(&ei->vfs_inode);
}
const struct file_operations hugetlbfs_file_operations = {
.read = hugetlbfs_read,
.mmap = hugetlbfs_file_mmap,
.fsync = noop_fsync,
.get_unmapped_area = hugetlb_get_unmapped_area,
.llseek = default_llseek,
};
static const struct inode_operations hugetlbfs_dir_inode_operations = {
.create = hugetlbfs_create,
.lookup = simple_lookup,
.link = simple_link,
.unlink = simple_unlink,
.symlink = hugetlbfs_symlink,
.mkdir = hugetlbfs_mkdir,
.rmdir = simple_rmdir,
.mknod = hugetlbfs_mknod,
.rename = simple_rename,
.setattr = hugetlbfs_setattr,
};
static const struct inode_operations hugetlbfs_inode_operations = {
.setattr = hugetlbfs_setattr,
};
static const struct super_operations hugetlbfs_ops = {
.alloc_inode = hugetlbfs_alloc_inode,
.destroy_inode = hugetlbfs_destroy_inode,
.evict_inode = hugetlbfs_evict_inode,
.statfs = hugetlbfs_statfs,
.put_super = hugetlbfs_put_super,
.show_options = generic_show_options,
};
static int
hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
{
char *p, *rest;
substring_t args[MAX_OPT_ARGS];
int option;
unsigned long long size = 0;
enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE;
if (!options)
return 0;
while ((p = strsep(&options, ",")) != NULL) {
int token;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_uid:
if (match_int(&args[0], &option))
goto bad_val;
pconfig->uid = option;
break;
case Opt_gid:
if (match_int(&args[0], &option))
goto bad_val;
pconfig->gid = option;
break;
case Opt_mode:
if (match_octal(&args[0], &option))
goto bad_val;
pconfig->mode = option & 01777U;
break;
case Opt_size: {
/* memparse() will accept a K/M/G without a digit */
if (!isdigit(*args[0].from))
goto bad_val;
size = memparse(args[0].from, &rest);
setsize = SIZE_STD;
if (*rest == '%')
setsize = SIZE_PERCENT;
break;
}
case Opt_nr_inodes:
/* memparse() will accept a K/M/G without a digit */
if (!isdigit(*args[0].from))
goto bad_val;
pconfig->nr_inodes = memparse(args[0].from, &rest);
break;
case Opt_pagesize: {
unsigned long ps;
ps = memparse(args[0].from, &rest);
pconfig->hstate = size_to_hstate(ps);
if (!pconfig->hstate) {
printk(KERN_ERR
"hugetlbfs: Unsupported page size %lu MB\n",
ps >> 20);
return -EINVAL;
}
break;
}
default:
printk(KERN_ERR "hugetlbfs: Bad mount option: \"%s\"\n",
p);
return -EINVAL;
break;
}
}
/* Do size after hstate is set up */
if (setsize > NO_SIZE) {
struct hstate *h = pconfig->hstate;
if (setsize == SIZE_PERCENT) {
size <<= huge_page_shift(h);
size *= h->max_huge_pages;
do_div(size, 100);
}
pconfig->nr_blocks = (size >> huge_page_shift(h));
}
return 0;
bad_val:
printk(KERN_ERR "hugetlbfs: Bad value '%s' for mount option '%s'\n",
args[0].from, p);
return -EINVAL;
}
static int
hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode * inode;
struct dentry * root;
int ret;
struct hugetlbfs_config config;
struct hugetlbfs_sb_info *sbinfo;
save_mount_options(sb, data);
config.nr_blocks = -1; /* No limit on size by default */
config.nr_inodes = -1; /* No limit on number of inodes by default */
config.uid = current_fsuid();
config.gid = current_fsgid();
config.mode = 0755;
config.hstate = &default_hstate;
ret = hugetlbfs_parse_options(data, &config);
if (ret)
return ret;
sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
if (!sbinfo)
return -ENOMEM;
sb->s_fs_info = sbinfo;
sbinfo->hstate = config.hstate;
spin_lock_init(&sbinfo->stat_lock);
sbinfo->max_inodes = config.nr_inodes;
sbinfo->free_inodes = config.nr_inodes;
sbinfo->spool = NULL;
if (config.nr_blocks != -1) {
sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
if (!sbinfo->spool)
goto out_free;
}
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = huge_page_size(config.hstate);
sb->s_blocksize_bits = huge_page_shift(config.hstate);
sb->s_magic = HUGETLBFS_MAGIC;
sb->s_op = &hugetlbfs_ops;
sb->s_time_gran = 1;
inode = hugetlbfs_get_root(sb, &config);
if (!inode)
goto out_free;
root = d_alloc_root(inode);
if (!root) {
iput(inode);
goto out_free;
}
sb->s_root = root;
return 0;
out_free:
if (sbinfo->spool)
kfree(sbinfo->spool);
kfree(sbinfo);
return -ENOMEM;
}
static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
}
static struct file_system_type hugetlbfs_fs_type = {
.name = "hugetlbfs",
.mount = hugetlbfs_mount,
.kill_sb = kill_litter_super,
};
static struct vfsmount *hugetlbfs_vfsmount;
static int can_do_hugetlb_shm(void)
{
return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group);
}
struct file *hugetlb_file_setup(const char *name, size_t size,
vm_flags_t acctflag,
struct user_struct **user, int creat_flags)
{
int error = -ENOMEM;
struct file *file;
struct inode *inode;
struct path path;
struct dentry *root;
struct qstr quick_string;
*user = NULL;
if (!hugetlbfs_vfsmount)
return ERR_PTR(-ENOENT);
if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
*user = current_user();
if (user_shm_lock(size, *user)) {
printk_once(KERN_WARNING "Using mlock ulimits for SHM_HUGETLB is deprecated\n");
} else {
*user = NULL;
return ERR_PTR(-EPERM);
}
}
root = hugetlbfs_vfsmount->mnt_root;
quick_string.name = name;
quick_string.len = strlen(quick_string.name);
quick_string.hash = 0;
path.dentry = d_alloc(root, &quick_string);
if (!path.dentry)
goto out_shm_unlock;
path.mnt = mntget(hugetlbfs_vfsmount);
error = -ENOSPC;
inode = hugetlbfs_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0);
if (!inode)
goto out_dentry;
error = -ENOMEM;
if (hugetlb_reserve_pages(inode, 0,
size >> huge_page_shift(hstate_inode(inode)), NULL,
acctflag))
goto out_inode;
d_instantiate(path.dentry, inode);
inode->i_size = size;
clear_nlink(inode);
error = -ENFILE;
file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
&hugetlbfs_file_operations);
if (!file)
goto out_dentry; /* inode is already attached */
return file;
out_inode:
iput(inode);
out_dentry:
path_put(&path);
out_shm_unlock:
if (*user) {
user_shm_unlock(size, *user);
*user = NULL;
}
return ERR_PTR(error);
}
static int __init init_hugetlbfs_fs(void)
{
int error;
struct vfsmount *vfsmount;
error = bdi_init(&hugetlbfs_backing_dev_info);
if (error)
return error;
hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
sizeof(struct hugetlbfs_inode_info),
0, 0, init_once);
if (hugetlbfs_inode_cachep == NULL)
goto out2;
error = register_filesystem(&hugetlbfs_fs_type);
if (error)
goto out;
vfsmount = kern_mount(&hugetlbfs_fs_type);
if (!IS_ERR(vfsmount)) {
hugetlbfs_vfsmount = vfsmount;
return 0;
}
error = PTR_ERR(vfsmount);
out:
if (error)
kmem_cache_destroy(hugetlbfs_inode_cachep);
out2:
bdi_destroy(&hugetlbfs_backing_dev_info);
return error;
}
static void __exit exit_hugetlbfs_fs(void)
{
kmem_cache_destroy(hugetlbfs_inode_cachep);
kern_unmount(hugetlbfs_vfsmount);
unregister_filesystem(&hugetlbfs_fs_type);
bdi_destroy(&hugetlbfs_backing_dev_info);
}
module_init(init_hugetlbfs_fs)
module_exit(exit_hugetlbfs_fs)
MODULE_LICENSE("GPL");
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3640_0 |
crossvul-cpp_data_good_3486_11 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995 - 2000 by Ralf Baechle
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/module.h>
#include <linux/kprobes.h>
#include <linux/perf_event.h>
#include <asm/branch.h>
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/highmem.h> /* For VMALLOC_END */
#include <linux/kdebug.h>
/*
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*/
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long write,
unsigned long address)
{
struct vm_area_struct * vma = NULL;
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
const int field = sizeof(unsigned long) * 2;
siginfo_t info;
int fault;
#if 0
printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
current->comm, current->pid, field, address, write,
field, regs->cp0_epc);
#endif
#ifdef CONFIG_KPROBES
/*
* This is to notify the fault handler of the kprobes. The
* exception code is redundant as it is also carried in REGS,
* but we pass it anyhow.
*/
if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1,
(regs->cp0_cause >> 2) & 0x1f, SIGSEGV) == NOTIFY_STOP)
return;
#endif
info.si_code = SEGV_MAPERR;
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
*
* NOTE! We MUST NOT take any locks for this case. We may
* be in an interrupt or a critical region, and should
* only copy the information from the master page table,
* nothing more.
*/
#ifdef CONFIG_64BIT
# define VMALLOC_FAULT_TARGET no_context
#else
# define VMALLOC_FAULT_TARGET vmalloc_fault
#endif
if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END))
goto VMALLOC_FAULT_TARGET;
#ifdef MODULE_START
if (unlikely(address >= MODULE_START && address < MODULE_END))
goto VMALLOC_FAULT_TARGET;
#endif
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if (!vma)
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
* we can handle it..
*/
good_area:
info.si_code = SEGV_ACCERR;
if (write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
if (kernel_uses_smartmips_rixi) {
if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
#if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n",
raw_smp_processor_id(),
current->comm, current->pid,
field, address, write,
field, regs->cp0_epc);
#endif
goto bad_area;
}
if (!(vma->vm_flags & VM_READ)) {
#if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
raw_smp_processor_id(),
current->comm, current->pid,
field, address, write,
field, regs->cp0_epc);
#endif
goto bad_area;
}
} else {
if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
goto bad_area;
}
}
/*
* If for any reason at all we couldn't handle the fault,
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
}
if (fault & VM_FAULT_MAJOR) {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
tsk->maj_flt++;
} else {
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
tsk->min_flt++;
}
up_read(&mm->mmap_sem);
return;
/*
* Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first..
*/
bad_area:
up_read(&mm->mmap_sem);
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
tsk->thread.cp0_badvaddr = address;
tsk->thread.error_code = write;
#if 0
printk("do_page_fault() #2: sending SIGSEGV to %s for "
"invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
tsk->comm,
write ? "write access to" : "read access from",
field, address,
field, (unsigned long) regs->cp0_epc,
field, (unsigned long) regs->regs[31]);
#endif
info.si_signo = SIGSEGV;
info.si_errno = 0;
/* info.si_code has been set above */
info.si_addr = (void __user *) address;
force_sig_info(SIGSEGV, &info, tsk);
return;
}
no_context:
/* Are we prepared to handle this kernel fault? */
if (fixup_exception(regs)) {
current->thread.cp0_baduaddr = address;
return;
}
/*
* Oops. The kernel tried to access some bad page. We'll have to
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
"virtual address %0*lx, epc == %0*lx, ra == %0*lx\n",
raw_smp_processor_id(), field, address, field, regs->cp0_epc,
field, regs->regs[31]);
die("Oops", regs);
out_of_memory:
/*
* We ran out of memory, call the OOM killer, and return the userspace
* (which will retry the fault, or kill us if we got oom-killed).
*/
up_read(&mm->mmap_sem);
pagefault_out_of_memory();
return;
do_sigbus:
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
goto no_context;
else
/*
* Send a sigbus, regardless of whether we were in kernel
* or user mode.
*/
#if 0
printk("do_page_fault() #3: sending SIGBUS to %s for "
"invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
tsk->comm,
write ? "write access to" : "read access from",
field, address,
field, (unsigned long) regs->cp0_epc,
field, (unsigned long) regs->regs[31]);
#endif
tsk->thread.cp0_badvaddr = address;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void __user *) address;
force_sig_info(SIGBUS, &info, tsk);
return;
#ifndef CONFIG_64BIT
vmalloc_fault:
{
/*
* Synchronize this task's top level page-table
* with the 'reference' page table.
*
* Do _not_ use "tsk" here. We might be inside
* an interrupt in the middle of a task switch..
*/
int offset = __pgd_offset(address);
pgd_t *pgd, *pgd_k;
pud_t *pud, *pud_k;
pmd_t *pmd, *pmd_k;
pte_t *pte_k;
pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset;
pgd_k = init_mm.pgd + offset;
if (!pgd_present(*pgd_k))
goto no_context;
set_pgd(pgd, *pgd_k);
pud = pud_offset(pgd, address);
pud_k = pud_offset(pgd_k, address);
if (!pud_present(*pud_k))
goto no_context;
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
if (!pmd_present(*pmd_k))
goto no_context;
set_pmd(pmd, *pmd_k);
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
goto no_context;
return;
}
#endif
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3486_11 |
crossvul-cpp_data_bad_3468_2 | /*
* Copyright (C) 2009 Red Hat, Inc.
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*/
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <linux/mmu_notifier.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/mm_inline.h>
#include <linux/kthread.h>
#include <linux/khugepaged.h>
#include <linux/freezer.h>
#include <linux/mman.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
#include "internal.h"
/*
* By default transparent hugepage support is enabled for all mappings
* and khugepaged scans all mappings. Defrag is only invoked by
* khugepaged hugepage allocations and by page faults inside
* MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
* allocations.
*/
unsigned long transparent_hugepage_flags __read_mostly =
#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
(1<<TRANSPARENT_HUGEPAGE_FLAG)|
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
#endif
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
/* default scan 8*512 pte (or vmas) every 30 second */
static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
static unsigned int khugepaged_pages_collapsed;
static unsigned int khugepaged_full_scans;
static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
/* during fragmentation poll the hugepage allocator once every minute */
static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
static struct task_struct *khugepaged_thread __read_mostly;
static DEFINE_MUTEX(khugepaged_mutex);
static DEFINE_SPINLOCK(khugepaged_mm_lock);
static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
/*
* default collapse hugepages if there is at least one pte mapped like
* it would have happened if the vma was large enough during page
* fault.
*/
static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
static int khugepaged(void *none);
static int mm_slots_hash_init(void);
static int khugepaged_slab_init(void);
static void khugepaged_slab_free(void);
#define MM_SLOTS_HASH_HEADS 1024
static struct hlist_head *mm_slots_hash __read_mostly;
static struct kmem_cache *mm_slot_cache __read_mostly;
/**
* struct mm_slot - hash lookup from mm to mm_slot
* @hash: hash collision list
* @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
* @mm: the mm that this information is valid for
*/
struct mm_slot {
struct hlist_node hash;
struct list_head mm_node;
struct mm_struct *mm;
};
/**
* struct khugepaged_scan - cursor for scanning
* @mm_head: the head of the mm list to scan
* @mm_slot: the current mm_slot we are scanning
* @address: the next address inside that to be scanned
*
* There is only the one khugepaged_scan instance of this cursor structure.
*/
struct khugepaged_scan {
struct list_head mm_head;
struct mm_slot *mm_slot;
unsigned long address;
} khugepaged_scan = {
.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
};
static int set_recommended_min_free_kbytes(void)
{
struct zone *zone;
int nr_zones = 0;
unsigned long recommended_min;
extern int min_free_kbytes;
if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
&transparent_hugepage_flags) &&
!test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags))
return 0;
for_each_populated_zone(zone)
nr_zones++;
/* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
recommended_min = pageblock_nr_pages * nr_zones * 2;
/*
* Make sure that on average at least two pageblocks are almost free
* of another type, one for a migratetype to fall back to and a
* second to avoid subsequent fallbacks of other types There are 3
* MIGRATE_TYPES we care about.
*/
recommended_min += pageblock_nr_pages * nr_zones *
MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
/* don't ever allow to reserve more than 5% of the lowmem */
recommended_min = min(recommended_min,
(unsigned long) nr_free_buffer_pages() / 20);
recommended_min <<= (PAGE_SHIFT-10);
if (recommended_min > min_free_kbytes)
min_free_kbytes = recommended_min;
setup_per_zone_wmarks();
return 0;
}
late_initcall(set_recommended_min_free_kbytes);
static int start_khugepaged(void)
{
int err = 0;
if (khugepaged_enabled()) {
int wakeup;
if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
err = -ENOMEM;
goto out;
}
mutex_lock(&khugepaged_mutex);
if (!khugepaged_thread)
khugepaged_thread = kthread_run(khugepaged, NULL,
"khugepaged");
if (unlikely(IS_ERR(khugepaged_thread))) {
printk(KERN_ERR
"khugepaged: kthread_run(khugepaged) failed\n");
err = PTR_ERR(khugepaged_thread);
khugepaged_thread = NULL;
}
wakeup = !list_empty(&khugepaged_scan.mm_head);
mutex_unlock(&khugepaged_mutex);
if (wakeup)
wake_up_interruptible(&khugepaged_wait);
set_recommended_min_free_kbytes();
} else
/* wakeup to exit */
wake_up_interruptible(&khugepaged_wait);
out:
return err;
}
#ifdef CONFIG_SYSFS
static ssize_t double_flag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf,
enum transparent_hugepage_flag enabled,
enum transparent_hugepage_flag req_madv)
{
if (test_bit(enabled, &transparent_hugepage_flags)) {
VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
return sprintf(buf, "[always] madvise never\n");
} else if (test_bit(req_madv, &transparent_hugepage_flags))
return sprintf(buf, "always [madvise] never\n");
else
return sprintf(buf, "always madvise [never]\n");
}
static ssize_t double_flag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count,
enum transparent_hugepage_flag enabled,
enum transparent_hugepage_flag req_madv)
{
if (!memcmp("always", buf,
min(sizeof("always")-1, count))) {
set_bit(enabled, &transparent_hugepage_flags);
clear_bit(req_madv, &transparent_hugepage_flags);
} else if (!memcmp("madvise", buf,
min(sizeof("madvise")-1, count))) {
clear_bit(enabled, &transparent_hugepage_flags);
set_bit(req_madv, &transparent_hugepage_flags);
} else if (!memcmp("never", buf,
min(sizeof("never")-1, count))) {
clear_bit(enabled, &transparent_hugepage_flags);
clear_bit(req_madv, &transparent_hugepage_flags);
} else
return -EINVAL;
return count;
}
static ssize_t enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return double_flag_show(kobj, attr, buf,
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
}
static ssize_t enabled_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
ssize_t ret;
ret = double_flag_store(kobj, attr, buf, count,
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
if (ret > 0) {
int err = start_khugepaged();
if (err)
ret = err;
}
if (ret > 0 &&
(test_bit(TRANSPARENT_HUGEPAGE_FLAG,
&transparent_hugepage_flags) ||
test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags)))
set_recommended_min_free_kbytes();
return ret;
}
static struct kobj_attribute enabled_attr =
__ATTR(enabled, 0644, enabled_show, enabled_store);
static ssize_t single_flag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf,
enum transparent_hugepage_flag flag)
{
return sprintf(buf, "%d\n",
!!test_bit(flag, &transparent_hugepage_flags));
}
static ssize_t single_flag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count,
enum transparent_hugepage_flag flag)
{
unsigned long value;
int ret;
ret = kstrtoul(buf, 10, &value);
if (ret < 0)
return ret;
if (value > 1)
return -EINVAL;
if (value)
set_bit(flag, &transparent_hugepage_flags);
else
clear_bit(flag, &transparent_hugepage_flags);
return count;
}
/*
* Currently defrag only disables __GFP_NOWAIT for allocation. A blind
* __GFP_REPEAT is too aggressive, it's never worth swapping tons of
* memory just to allocate one more hugepage.
*/
static ssize_t defrag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return double_flag_show(kobj, attr, buf,
TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
}
static ssize_t defrag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
return double_flag_store(kobj, attr, buf, count,
TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
}
static struct kobj_attribute defrag_attr =
__ATTR(defrag, 0644, defrag_show, defrag_store);
#ifdef CONFIG_DEBUG_VM
static ssize_t debug_cow_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return single_flag_show(kobj, attr, buf,
TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
}
static ssize_t debug_cow_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
return single_flag_store(kobj, attr, buf, count,
TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
}
static struct kobj_attribute debug_cow_attr =
__ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
#endif /* CONFIG_DEBUG_VM */
static struct attribute *hugepage_attr[] = {
&enabled_attr.attr,
&defrag_attr.attr,
#ifdef CONFIG_DEBUG_VM
&debug_cow_attr.attr,
#endif
NULL,
};
static struct attribute_group hugepage_attr_group = {
.attrs = hugepage_attr,
};
static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
}
static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
unsigned long msecs;
int err;
err = strict_strtoul(buf, 10, &msecs);
if (err || msecs > UINT_MAX)
return -EINVAL;
khugepaged_scan_sleep_millisecs = msecs;
wake_up_interruptible(&khugepaged_wait);
return count;
}
static struct kobj_attribute scan_sleep_millisecs_attr =
__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
scan_sleep_millisecs_store);
static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
}
static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
unsigned long msecs;
int err;
err = strict_strtoul(buf, 10, &msecs);
if (err || msecs > UINT_MAX)
return -EINVAL;
khugepaged_alloc_sleep_millisecs = msecs;
wake_up_interruptible(&khugepaged_wait);
return count;
}
static struct kobj_attribute alloc_sleep_millisecs_attr =
__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
alloc_sleep_millisecs_store);
static ssize_t pages_to_scan_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
}
static ssize_t pages_to_scan_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int err;
unsigned long pages;
err = strict_strtoul(buf, 10, &pages);
if (err || !pages || pages > UINT_MAX)
return -EINVAL;
khugepaged_pages_to_scan = pages;
return count;
}
static struct kobj_attribute pages_to_scan_attr =
__ATTR(pages_to_scan, 0644, pages_to_scan_show,
pages_to_scan_store);
static ssize_t pages_collapsed_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
}
static struct kobj_attribute pages_collapsed_attr =
__ATTR_RO(pages_collapsed);
static ssize_t full_scans_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", khugepaged_full_scans);
}
static struct kobj_attribute full_scans_attr =
__ATTR_RO(full_scans);
static ssize_t khugepaged_defrag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return single_flag_show(kobj, attr, buf,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
}
static ssize_t khugepaged_defrag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
return single_flag_store(kobj, attr, buf, count,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
}
static struct kobj_attribute khugepaged_defrag_attr =
__ATTR(defrag, 0644, khugepaged_defrag_show,
khugepaged_defrag_store);
/*
* max_ptes_none controls if khugepaged should collapse hugepages over
* any unmapped ptes in turn potentially increasing the memory
* footprint of the vmas. When max_ptes_none is 0 khugepaged will not
* reduce the available free memory in the system as it
* runs. Increasing max_ptes_none will instead potentially reduce the
* free memory in the system during the khugepaged scan.
*/
static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
}
static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int err;
unsigned long max_ptes_none;
err = strict_strtoul(buf, 10, &max_ptes_none);
if (err || max_ptes_none > HPAGE_PMD_NR-1)
return -EINVAL;
khugepaged_max_ptes_none = max_ptes_none;
return count;
}
static struct kobj_attribute khugepaged_max_ptes_none_attr =
__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
khugepaged_max_ptes_none_store);
static struct attribute *khugepaged_attr[] = {
&khugepaged_defrag_attr.attr,
&khugepaged_max_ptes_none_attr.attr,
&pages_to_scan_attr.attr,
&pages_collapsed_attr.attr,
&full_scans_attr.attr,
&scan_sleep_millisecs_attr.attr,
&alloc_sleep_millisecs_attr.attr,
NULL,
};
static struct attribute_group khugepaged_attr_group = {
.attrs = khugepaged_attr,
.name = "khugepaged",
};
#endif /* CONFIG_SYSFS */
static int __init hugepage_init(void)
{
int err;
#ifdef CONFIG_SYSFS
static struct kobject *hugepage_kobj;
#endif
err = -EINVAL;
if (!has_transparent_hugepage()) {
transparent_hugepage_flags = 0;
goto out;
}
#ifdef CONFIG_SYSFS
err = -ENOMEM;
hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
if (unlikely(!hugepage_kobj)) {
printk(KERN_ERR "hugepage: failed kobject create\n");
goto out;
}
err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group);
if (err) {
printk(KERN_ERR "hugepage: failed register hugeage group\n");
goto out;
}
err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group);
if (err) {
printk(KERN_ERR "hugepage: failed register hugeage group\n");
goto out;
}
#endif
err = khugepaged_slab_init();
if (err)
goto out;
err = mm_slots_hash_init();
if (err) {
khugepaged_slab_free();
goto out;
}
/*
* By default disable transparent hugepages on smaller systems,
* where the extra memory used could hurt more than TLB overhead
* is likely to save. The admin can still enable it through /sys.
*/
if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
transparent_hugepage_flags = 0;
start_khugepaged();
set_recommended_min_free_kbytes();
out:
return err;
}
module_init(hugepage_init)
static int __init setup_transparent_hugepage(char *str)
{
int ret = 0;
if (!str)
goto out;
if (!strcmp(str, "always")) {
set_bit(TRANSPARENT_HUGEPAGE_FLAG,
&transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags);
ret = 1;
} else if (!strcmp(str, "madvise")) {
clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
&transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags);
ret = 1;
} else if (!strcmp(str, "never")) {
clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
&transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags);
ret = 1;
}
out:
if (!ret)
printk(KERN_WARNING
"transparent_hugepage= cannot parse, ignored\n");
return ret;
}
__setup("transparent_hugepage=", setup_transparent_hugepage);
static void prepare_pmd_huge_pte(pgtable_t pgtable,
struct mm_struct *mm)
{
assert_spin_locked(&mm->page_table_lock);
/* FIFO */
if (!mm->pmd_huge_pte)
INIT_LIST_HEAD(&pgtable->lru);
else
list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
mm->pmd_huge_pte = pgtable;
}
static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
pmd = pmd_mkwrite(pmd);
return pmd;
}
static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long haddr, pmd_t *pmd,
struct page *page)
{
int ret = 0;
pgtable_t pgtable;
VM_BUG_ON(!PageCompound(page));
pgtable = pte_alloc_one(mm, haddr);
if (unlikely(!pgtable)) {
mem_cgroup_uncharge_page(page);
put_page(page);
return VM_FAULT_OOM;
}
clear_huge_page(page, haddr, HPAGE_PMD_NR);
__SetPageUptodate(page);
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_none(*pmd))) {
spin_unlock(&mm->page_table_lock);
mem_cgroup_uncharge_page(page);
put_page(page);
pte_free(mm, pgtable);
} else {
pmd_t entry;
entry = mk_pmd(page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
entry = pmd_mkhuge(entry);
/*
* The spinlocking to take the lru_lock inside
* page_add_new_anon_rmap() acts as a full memory
* barrier to be sure clear_huge_page writes become
* visible after the set_pmd_at() write.
*/
page_add_new_anon_rmap(page, vma, haddr);
set_pmd_at(mm, haddr, pmd, entry);
prepare_pmd_huge_pte(pgtable, mm);
add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
spin_unlock(&mm->page_table_lock);
}
return ret;
}
static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
{
return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
}
static inline struct page *alloc_hugepage_vma(int defrag,
struct vm_area_struct *vma,
unsigned long haddr, int nd,
gfp_t extra_gfp)
{
return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
HPAGE_PMD_ORDER, vma, haddr, nd);
}
#ifndef CONFIG_NUMA
static inline struct page *alloc_hugepage(int defrag)
{
return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
HPAGE_PMD_ORDER);
}
#endif
int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
unsigned int flags)
{
struct page *page;
unsigned long haddr = address & HPAGE_PMD_MASK;
pte_t *pte;
if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
if (unlikely(khugepaged_enter(vma)))
return VM_FAULT_OOM;
page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
vma, haddr, numa_node_id(), 0);
if (unlikely(!page)) {
count_vm_event(THP_FAULT_FALLBACK);
goto out;
}
count_vm_event(THP_FAULT_ALLOC);
if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
put_page(page);
goto out;
}
return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
}
out:
/*
* Use __pte_alloc instead of pte_alloc_map, because we can't
* run pte_offset_map on the pmd, if an huge pmd could
* materialize from under us from a different thread.
*/
if (unlikely(__pte_alloc(mm, vma, pmd, address)))
return VM_FAULT_OOM;
/* if an huge pmd materialized from under us just retry later */
if (unlikely(pmd_trans_huge(*pmd)))
return 0;
/*
* A regular pmd is established and it can't morph into a huge pmd
* from under us anymore at this point because we hold the mmap_sem
* read mode and khugepaged takes it in write mode. So now it's
* safe to run pte_offset_map().
*/
pte = pte_offset_map(pmd, address);
return handle_pte_fault(mm, vma, address, pte, pmd, flags);
}
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *vma)
{
struct page *src_page;
pmd_t pmd;
pgtable_t pgtable;
int ret;
ret = -ENOMEM;
pgtable = pte_alloc_one(dst_mm, addr);
if (unlikely(!pgtable))
goto out;
spin_lock(&dst_mm->page_table_lock);
spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
ret = -EAGAIN;
pmd = *src_pmd;
if (unlikely(!pmd_trans_huge(pmd))) {
pte_free(dst_mm, pgtable);
goto out_unlock;
}
if (unlikely(pmd_trans_splitting(pmd))) {
/* split huge page running from under us */
spin_unlock(&src_mm->page_table_lock);
spin_unlock(&dst_mm->page_table_lock);
pte_free(dst_mm, pgtable);
wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
goto out;
}
src_page = pmd_page(pmd);
VM_BUG_ON(!PageHead(src_page));
get_page(src_page);
page_dup_rmap(src_page);
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
pmdp_set_wrprotect(src_mm, addr, src_pmd);
pmd = pmd_mkold(pmd_wrprotect(pmd));
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
prepare_pmd_huge_pte(pgtable, dst_mm);
ret = 0;
out_unlock:
spin_unlock(&src_mm->page_table_lock);
spin_unlock(&dst_mm->page_table_lock);
out:
return ret;
}
/* no "address" argument so destroys page coloring of some arch */
pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
{
pgtable_t pgtable;
assert_spin_locked(&mm->page_table_lock);
/* FIFO */
pgtable = mm->pmd_huge_pte;
if (list_empty(&pgtable->lru))
mm->pmd_huge_pte = NULL;
else {
mm->pmd_huge_pte = list_entry(pgtable->lru.next,
struct page, lru);
list_del(&pgtable->lru);
}
return pgtable;
}
static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmd, pmd_t orig_pmd,
struct page *page,
unsigned long haddr)
{
pgtable_t pgtable;
pmd_t _pmd;
int ret = 0, i;
struct page **pages;
pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
GFP_KERNEL);
if (unlikely(!pages)) {
ret |= VM_FAULT_OOM;
goto out;
}
for (i = 0; i < HPAGE_PMD_NR; i++) {
pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
__GFP_OTHER_NODE,
vma, address, page_to_nid(page));
if (unlikely(!pages[i] ||
mem_cgroup_newpage_charge(pages[i], mm,
GFP_KERNEL))) {
if (pages[i])
put_page(pages[i]);
mem_cgroup_uncharge_start();
while (--i >= 0) {
mem_cgroup_uncharge_page(pages[i]);
put_page(pages[i]);
}
mem_cgroup_uncharge_end();
kfree(pages);
ret |= VM_FAULT_OOM;
goto out;
}
}
for (i = 0; i < HPAGE_PMD_NR; i++) {
copy_user_highpage(pages[i], page + i,
haddr + PAGE_SHIFT*i, vma);
__SetPageUptodate(pages[i]);
cond_resched();
}
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(*pmd, orig_pmd)))
goto out_free_pages;
VM_BUG_ON(!PageHead(page));
pmdp_clear_flush_notify(vma, haddr, pmd);
/* leave pmd empty until pte is filled */
pgtable = get_pmd_huge_pte(mm);
pmd_populate(mm, &_pmd, pgtable);
for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
pte_t *pte, entry;
entry = mk_pte(pages[i], vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
page_add_new_anon_rmap(pages[i], vma, haddr);
pte = pte_offset_map(&_pmd, haddr);
VM_BUG_ON(!pte_none(*pte));
set_pte_at(mm, haddr, pte, entry);
pte_unmap(pte);
}
kfree(pages);
mm->nr_ptes++;
smp_wmb(); /* make pte visible before pmd */
pmd_populate(mm, pmd, pgtable);
page_remove_rmap(page);
spin_unlock(&mm->page_table_lock);
ret |= VM_FAULT_WRITE;
put_page(page);
out:
return ret;
out_free_pages:
spin_unlock(&mm->page_table_lock);
mem_cgroup_uncharge_start();
for (i = 0; i < HPAGE_PMD_NR; i++) {
mem_cgroup_uncharge_page(pages[i]);
put_page(pages[i]);
}
mem_cgroup_uncharge_end();
kfree(pages);
goto out;
}
int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
{
int ret = 0;
struct page *page, *new_page;
unsigned long haddr;
VM_BUG_ON(!vma->anon_vma);
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(*pmd, orig_pmd)))
goto out_unlock;
page = pmd_page(orig_pmd);
VM_BUG_ON(!PageCompound(page) || !PageHead(page));
haddr = address & HPAGE_PMD_MASK;
if (page_mapcount(page) == 1) {
pmd_t entry;
entry = pmd_mkyoung(orig_pmd);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
update_mmu_cache(vma, address, entry);
ret |= VM_FAULT_WRITE;
goto out_unlock;
}
get_page(page);
spin_unlock(&mm->page_table_lock);
if (transparent_hugepage_enabled(vma) &&
!transparent_hugepage_debug_cow())
new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
vma, haddr, numa_node_id(), 0);
else
new_page = NULL;
if (unlikely(!new_page)) {
count_vm_event(THP_FAULT_FALLBACK);
ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
pmd, orig_pmd, page, haddr);
put_page(page);
goto out;
}
count_vm_event(THP_FAULT_ALLOC);
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
put_page(new_page);
put_page(page);
ret |= VM_FAULT_OOM;
goto out;
}
copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
__SetPageUptodate(new_page);
spin_lock(&mm->page_table_lock);
put_page(page);
if (unlikely(!pmd_same(*pmd, orig_pmd))) {
mem_cgroup_uncharge_page(new_page);
put_page(new_page);
} else {
pmd_t entry;
VM_BUG_ON(!PageHead(page));
entry = mk_pmd(new_page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
entry = pmd_mkhuge(entry);
pmdp_clear_flush_notify(vma, haddr, pmd);
page_add_new_anon_rmap(new_page, vma, haddr);
set_pmd_at(mm, haddr, pmd, entry);
update_mmu_cache(vma, address, entry);
page_remove_rmap(page);
put_page(page);
ret |= VM_FAULT_WRITE;
}
out_unlock:
spin_unlock(&mm->page_table_lock);
out:
return ret;
}
struct page *follow_trans_huge_pmd(struct mm_struct *mm,
unsigned long addr,
pmd_t *pmd,
unsigned int flags)
{
struct page *page = NULL;
assert_spin_locked(&mm->page_table_lock);
if (flags & FOLL_WRITE && !pmd_write(*pmd))
goto out;
page = pmd_page(*pmd);
VM_BUG_ON(!PageHead(page));
if (flags & FOLL_TOUCH) {
pmd_t _pmd;
/*
* We should set the dirty bit only for FOLL_WRITE but
* for now the dirty bit in the pmd is meaningless.
* And if the dirty bit will become meaningful and
* we'll only set it with FOLL_WRITE, an atomic
* set_bit will be required on the pmd to set the
* young bit, instead of the current set_pmd_at.
*/
_pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
}
page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
VM_BUG_ON(!PageCompound(page));
if (flags & FOLL_GET)
get_page(page);
out:
return page;
}
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd)
{
int ret = 0;
spin_lock(&tlb->mm->page_table_lock);
if (likely(pmd_trans_huge(*pmd))) {
if (unlikely(pmd_trans_splitting(*pmd))) {
spin_unlock(&tlb->mm->page_table_lock);
wait_split_huge_page(vma->anon_vma,
pmd);
} else {
struct page *page;
pgtable_t pgtable;
pgtable = get_pmd_huge_pte(tlb->mm);
page = pmd_page(*pmd);
pmd_clear(pmd);
page_remove_rmap(page);
VM_BUG_ON(page_mapcount(page) < 0);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
VM_BUG_ON(!PageHead(page));
spin_unlock(&tlb->mm->page_table_lock);
tlb_remove_page(tlb, page);
pte_free(tlb->mm, pgtable);
ret = 1;
}
} else
spin_unlock(&tlb->mm->page_table_lock);
return ret;
}
int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned char *vec)
{
int ret = 0;
spin_lock(&vma->vm_mm->page_table_lock);
if (likely(pmd_trans_huge(*pmd))) {
ret = !pmd_trans_splitting(*pmd);
spin_unlock(&vma->vm_mm->page_table_lock);
if (unlikely(!ret))
wait_split_huge_page(vma->anon_vma, pmd);
else {
/*
* All logical pages in the range are present
* if backed by a huge page.
*/
memset(vec, 1, (end - addr) >> PAGE_SHIFT);
}
} else
spin_unlock(&vma->vm_mm->page_table_lock);
return ret;
}
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot)
{
struct mm_struct *mm = vma->vm_mm;
int ret = 0;
spin_lock(&mm->page_table_lock);
if (likely(pmd_trans_huge(*pmd))) {
if (unlikely(pmd_trans_splitting(*pmd))) {
spin_unlock(&mm->page_table_lock);
wait_split_huge_page(vma->anon_vma, pmd);
} else {
pmd_t entry;
entry = pmdp_get_and_clear(mm, addr, pmd);
entry = pmd_modify(entry, newprot);
set_pmd_at(mm, addr, pmd, entry);
spin_unlock(&vma->vm_mm->page_table_lock);
flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
ret = 1;
}
} else
spin_unlock(&vma->vm_mm->page_table_lock);
return ret;
}
pmd_t *page_check_address_pmd(struct page *page,
struct mm_struct *mm,
unsigned long address,
enum page_check_address_pmd_flag flag)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd, *ret = NULL;
if (address & ~HPAGE_PMD_MASK)
goto out;
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto out;
pud = pud_offset(pgd, address);
if (!pud_present(*pud))
goto out;
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
goto out;
if (pmd_page(*pmd) != page)
goto out;
/*
* split_vma() may create temporary aliased mappings. There is
* no risk as long as all huge pmd are found and have their
* splitting bit set before __split_huge_page_refcount
* runs. Finding the same huge pmd more than once during the
* same rmap walk is not a problem.
*/
if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
pmd_trans_splitting(*pmd))
goto out;
if (pmd_trans_huge(*pmd)) {
VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
!pmd_trans_splitting(*pmd));
ret = pmd;
}
out:
return ret;
}
static int __split_huge_page_splitting(struct page *page,
struct vm_area_struct *vma,
unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
pmd_t *pmd;
int ret = 0;
spin_lock(&mm->page_table_lock);
pmd = page_check_address_pmd(page, mm, address,
PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
if (pmd) {
/*
* We can't temporarily set the pmd to null in order
* to split it, the pmd must remain marked huge at all
* times or the VM won't take the pmd_trans_huge paths
* and it won't wait on the anon_vma->root->lock to
* serialize against split_huge_page*.
*/
pmdp_splitting_flush_notify(vma, address, pmd);
ret = 1;
}
spin_unlock(&mm->page_table_lock);
return ret;
}
static void __split_huge_page_refcount(struct page *page)
{
int i;
unsigned long head_index = page->index;
struct zone *zone = page_zone(page);
int zonestat;
/* prevent PageLRU to go away from under us, and freeze lru stats */
spin_lock_irq(&zone->lru_lock);
compound_lock(page);
for (i = 1; i < HPAGE_PMD_NR; i++) {
struct page *page_tail = page + i;
/* tail_page->_count cannot change */
atomic_sub(atomic_read(&page_tail->_count), &page->_count);
BUG_ON(page_count(page) <= 0);
atomic_add(page_mapcount(page) + 1, &page_tail->_count);
BUG_ON(atomic_read(&page_tail->_count) <= 0);
/* after clearing PageTail the gup refcount can be released */
smp_mb();
/*
* retain hwpoison flag of the poisoned tail page:
* fix for the unsuitable process killed on Guest Machine(KVM)
* by the memory-failure.
*/
page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
page_tail->flags |= (page->flags &
((1L << PG_referenced) |
(1L << PG_swapbacked) |
(1L << PG_mlocked) |
(1L << PG_uptodate)));
page_tail->flags |= (1L << PG_dirty);
/*
* 1) clear PageTail before overwriting first_page
* 2) clear PageTail before clearing PageHead for VM_BUG_ON
*/
smp_wmb();
/*
* __split_huge_page_splitting() already set the
* splitting bit in all pmd that could map this
* hugepage, that will ensure no CPU can alter the
* mapcount on the head page. The mapcount is only
* accounted in the head page and it has to be
* transferred to all tail pages in the below code. So
* for this code to be safe, the split the mapcount
* can't change. But that doesn't mean userland can't
* keep changing and reading the page contents while
* we transfer the mapcount, so the pmd splitting
* status is achieved setting a reserved bit in the
* pmd, not by clearing the present bit.
*/
BUG_ON(page_mapcount(page_tail));
page_tail->_mapcount = page->_mapcount;
BUG_ON(page_tail->mapping);
page_tail->mapping = page->mapping;
page_tail->index = ++head_index;
BUG_ON(!PageAnon(page_tail));
BUG_ON(!PageUptodate(page_tail));
BUG_ON(!PageDirty(page_tail));
BUG_ON(!PageSwapBacked(page_tail));
mem_cgroup_split_huge_fixup(page, page_tail);
lru_add_page_tail(zone, page, page_tail);
}
__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
__mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
/*
* A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
* so adjust those appropriately if this page is on the LRU.
*/
if (PageLRU(page)) {
zonestat = NR_LRU_BASE + page_lru(page);
__mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
}
ClearPageCompound(page);
compound_unlock(page);
spin_unlock_irq(&zone->lru_lock);
for (i = 1; i < HPAGE_PMD_NR; i++) {
struct page *page_tail = page + i;
BUG_ON(page_count(page_tail) <= 0);
/*
* Tail pages may be freed if there wasn't any mapping
* like if add_to_swap() is running on a lru page that
* had its mapping zapped. And freeing these pages
* requires taking the lru_lock so we do the put_page
* of the tail pages after the split is complete.
*/
put_page(page_tail);
}
/*
* Only the head page (now become a regular page) is required
* to be pinned by the caller.
*/
BUG_ON(page_count(page) <= 0);
}
static int __split_huge_page_map(struct page *page,
struct vm_area_struct *vma,
unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
pmd_t *pmd, _pmd;
int ret = 0, i;
pgtable_t pgtable;
unsigned long haddr;
spin_lock(&mm->page_table_lock);
pmd = page_check_address_pmd(page, mm, address,
PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
if (pmd) {
pgtable = get_pmd_huge_pte(mm);
pmd_populate(mm, &_pmd, pgtable);
for (i = 0, haddr = address; i < HPAGE_PMD_NR;
i++, haddr += PAGE_SIZE) {
pte_t *pte, entry;
BUG_ON(PageCompound(page+i));
entry = mk_pte(page + i, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (!pmd_write(*pmd))
entry = pte_wrprotect(entry);
else
BUG_ON(page_mapcount(page) != 1);
if (!pmd_young(*pmd))
entry = pte_mkold(entry);
pte = pte_offset_map(&_pmd, haddr);
BUG_ON(!pte_none(*pte));
set_pte_at(mm, haddr, pte, entry);
pte_unmap(pte);
}
mm->nr_ptes++;
smp_wmb(); /* make pte visible before pmd */
/*
* Up to this point the pmd is present and huge and
* userland has the whole access to the hugepage
* during the split (which happens in place). If we
* overwrite the pmd with the not-huge version
* pointing to the pte here (which of course we could
* if all CPUs were bug free), userland could trigger
* a small page size TLB miss on the small sized TLB
* while the hugepage TLB entry is still established
* in the huge TLB. Some CPU doesn't like that. See
* http://support.amd.com/us/Processor_TechDocs/41322.pdf,
* Erratum 383 on page 93. Intel should be safe but is
* also warns that it's only safe if the permission
* and cache attributes of the two entries loaded in
* the two TLB is identical (which should be the case
* here). But it is generally safer to never allow
* small and huge TLB entries for the same virtual
* address to be loaded simultaneously. So instead of
* doing "pmd_populate(); flush_tlb_range();" we first
* mark the current pmd notpresent (atomically because
* here the pmd_trans_huge and pmd_trans_splitting
* must remain set at all times on the pmd until the
* split is complete for this pmd), then we flush the
* SMP TLB and finally we write the non-huge version
* of the pmd entry with pmd_populate.
*/
set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
pmd_populate(mm, pmd, pgtable);
ret = 1;
}
spin_unlock(&mm->page_table_lock);
return ret;
}
/* must be called with anon_vma->root->lock hold */
static void __split_huge_page(struct page *page,
struct anon_vma *anon_vma)
{
int mapcount, mapcount2;
struct anon_vma_chain *avc;
BUG_ON(!PageHead(page));
BUG_ON(PageTail(page));
mapcount = 0;
list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
struct vm_area_struct *vma = avc->vma;
unsigned long addr = vma_address(page, vma);
BUG_ON(is_vma_temporary_stack(vma));
if (addr == -EFAULT)
continue;
mapcount += __split_huge_page_splitting(page, vma, addr);
}
/*
* It is critical that new vmas are added to the tail of the
* anon_vma list. This guarantes that if copy_huge_pmd() runs
* and establishes a child pmd before
* __split_huge_page_splitting() freezes the parent pmd (so if
* we fail to prevent copy_huge_pmd() from running until the
* whole __split_huge_page() is complete), we will still see
* the newly established pmd of the child later during the
* walk, to be able to set it as pmd_trans_splitting too.
*/
if (mapcount != page_mapcount(page))
printk(KERN_ERR "mapcount %d page_mapcount %d\n",
mapcount, page_mapcount(page));
BUG_ON(mapcount != page_mapcount(page));
__split_huge_page_refcount(page);
mapcount2 = 0;
list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
struct vm_area_struct *vma = avc->vma;
unsigned long addr = vma_address(page, vma);
BUG_ON(is_vma_temporary_stack(vma));
if (addr == -EFAULT)
continue;
mapcount2 += __split_huge_page_map(page, vma, addr);
}
if (mapcount != mapcount2)
printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
mapcount, mapcount2, page_mapcount(page));
BUG_ON(mapcount != mapcount2);
}
int split_huge_page(struct page *page)
{
struct anon_vma *anon_vma;
int ret = 1;
BUG_ON(!PageAnon(page));
anon_vma = page_lock_anon_vma(page);
if (!anon_vma)
goto out;
ret = 0;
if (!PageCompound(page))
goto out_unlock;
BUG_ON(!PageSwapBacked(page));
__split_huge_page(page, anon_vma);
count_vm_event(THP_SPLIT);
BUG_ON(PageCompound(page));
out_unlock:
page_unlock_anon_vma(anon_vma);
out:
return ret;
}
int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
switch (advice) {
case MADV_HUGEPAGE:
/*
* Be somewhat over-protective like KSM for now!
*/
if (*vm_flags & (VM_HUGEPAGE |
VM_SHARED | VM_MAYSHARE |
VM_PFNMAP | VM_IO | VM_DONTEXPAND |
VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
VM_MIXEDMAP | VM_SAO))
return -EINVAL;
*vm_flags &= ~VM_NOHUGEPAGE;
*vm_flags |= VM_HUGEPAGE;
/*
* If the vma become good for khugepaged to scan,
* register it here without waiting a page fault that
* may not happen any time soon.
*/
if (unlikely(khugepaged_enter_vma_merge(vma)))
return -ENOMEM;
break;
case MADV_NOHUGEPAGE:
/*
* Be somewhat over-protective like KSM for now!
*/
if (*vm_flags & (VM_NOHUGEPAGE |
VM_SHARED | VM_MAYSHARE |
VM_PFNMAP | VM_IO | VM_DONTEXPAND |
VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE |
VM_MIXEDMAP | VM_SAO))
return -EINVAL;
*vm_flags &= ~VM_HUGEPAGE;
*vm_flags |= VM_NOHUGEPAGE;
/*
* Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
* this vma even if we leave the mm registered in khugepaged if
* it got registered before VM_NOHUGEPAGE was set.
*/
break;
}
return 0;
}
static int __init khugepaged_slab_init(void)
{
mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
sizeof(struct mm_slot),
__alignof__(struct mm_slot), 0, NULL);
if (!mm_slot_cache)
return -ENOMEM;
return 0;
}
static void __init khugepaged_slab_free(void)
{
kmem_cache_destroy(mm_slot_cache);
mm_slot_cache = NULL;
}
static inline struct mm_slot *alloc_mm_slot(void)
{
if (!mm_slot_cache) /* initialization failed */
return NULL;
return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
}
static inline void free_mm_slot(struct mm_slot *mm_slot)
{
kmem_cache_free(mm_slot_cache, mm_slot);
}
static int __init mm_slots_hash_init(void)
{
mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
GFP_KERNEL);
if (!mm_slots_hash)
return -ENOMEM;
return 0;
}
#if 0
static void __init mm_slots_hash_free(void)
{
kfree(mm_slots_hash);
mm_slots_hash = NULL;
}
#endif
static struct mm_slot *get_mm_slot(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
struct hlist_head *bucket;
struct hlist_node *node;
bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
% MM_SLOTS_HASH_HEADS];
hlist_for_each_entry(mm_slot, node, bucket, hash) {
if (mm == mm_slot->mm)
return mm_slot;
}
return NULL;
}
static void insert_to_mm_slots_hash(struct mm_struct *mm,
struct mm_slot *mm_slot)
{
struct hlist_head *bucket;
bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
% MM_SLOTS_HASH_HEADS];
mm_slot->mm = mm;
hlist_add_head(&mm_slot->hash, bucket);
}
static inline int khugepaged_test_exit(struct mm_struct *mm)
{
return atomic_read(&mm->mm_users) == 0;
}
int __khugepaged_enter(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
int wakeup;
mm_slot = alloc_mm_slot();
if (!mm_slot)
return -ENOMEM;
/* __khugepaged_exit() must not run from under us */
VM_BUG_ON(khugepaged_test_exit(mm));
if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
free_mm_slot(mm_slot);
return 0;
}
spin_lock(&khugepaged_mm_lock);
insert_to_mm_slots_hash(mm, mm_slot);
/*
* Insert just behind the scanning cursor, to let the area settle
* down a little.
*/
wakeup = list_empty(&khugepaged_scan.mm_head);
list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
spin_unlock(&khugepaged_mm_lock);
atomic_inc(&mm->mm_count);
if (wakeup)
wake_up_interruptible(&khugepaged_wait);
return 0;
}
int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
{
unsigned long hstart, hend;
if (!vma->anon_vma)
/*
* Not yet faulted in so we will register later in the
* page fault if needed.
*/
return 0;
if (vma->vm_file || vma->vm_ops)
/* khugepaged not yet working on file or special mappings */
return 0;
VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart < hend)
return khugepaged_enter(vma);
return 0;
}
void __khugepaged_exit(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
int free = 0;
spin_lock(&khugepaged_mm_lock);
mm_slot = get_mm_slot(mm);
if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
hlist_del(&mm_slot->hash);
list_del(&mm_slot->mm_node);
free = 1;
}
if (free) {
spin_unlock(&khugepaged_mm_lock);
clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
free_mm_slot(mm_slot);
mmdrop(mm);
} else if (mm_slot) {
spin_unlock(&khugepaged_mm_lock);
/*
* This is required to serialize against
* khugepaged_test_exit() (which is guaranteed to run
* under mmap sem read mode). Stop here (after we
* return all pagetables will be destroyed) until
* khugepaged has finished working on the pagetables
* under the mmap_sem.
*/
down_write(&mm->mmap_sem);
up_write(&mm->mmap_sem);
} else
spin_unlock(&khugepaged_mm_lock);
}
static void release_pte_page(struct page *page)
{
/* 0 stands for page_is_file_cache(page) == false */
dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
unlock_page(page);
putback_lru_page(page);
}
static void release_pte_pages(pte_t *pte, pte_t *_pte)
{
while (--_pte >= pte) {
pte_t pteval = *_pte;
if (!pte_none(pteval))
release_pte_page(pte_page(pteval));
}
}
static void release_all_pte_pages(pte_t *pte)
{
release_pte_pages(pte, pte + HPAGE_PMD_NR);
}
static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
unsigned long address,
pte_t *pte)
{
struct page *page;
pte_t *_pte;
int referenced = 0, isolated = 0, none = 0;
for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
_pte++, address += PAGE_SIZE) {
pte_t pteval = *_pte;
if (pte_none(pteval)) {
if (++none <= khugepaged_max_ptes_none)
continue;
else {
release_pte_pages(pte, _pte);
goto out;
}
}
if (!pte_present(pteval) || !pte_write(pteval)) {
release_pte_pages(pte, _pte);
goto out;
}
page = vm_normal_page(vma, address, pteval);
if (unlikely(!page)) {
release_pte_pages(pte, _pte);
goto out;
}
VM_BUG_ON(PageCompound(page));
BUG_ON(!PageAnon(page));
VM_BUG_ON(!PageSwapBacked(page));
/* cannot use mapcount: can't collapse if there's a gup pin */
if (page_count(page) != 1) {
release_pte_pages(pte, _pte);
goto out;
}
/*
* We can do it before isolate_lru_page because the
* page can't be freed from under us. NOTE: PG_lock
* is needed to serialize against split_huge_page
* when invoked from the VM.
*/
if (!trylock_page(page)) {
release_pte_pages(pte, _pte);
goto out;
}
/*
* Isolate the page to avoid collapsing an hugepage
* currently in use by the VM.
*/
if (isolate_lru_page(page)) {
unlock_page(page);
release_pte_pages(pte, _pte);
goto out;
}
/* 0 stands for page_is_file_cache(page) == false */
inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageLRU(page));
/* If there is no mapped pte young don't collapse the page */
if (pte_young(pteval) || PageReferenced(page) ||
mmu_notifier_test_young(vma->vm_mm, address))
referenced = 1;
}
if (unlikely(!referenced))
release_all_pte_pages(pte);
else
isolated = 1;
out:
return isolated;
}
static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
struct vm_area_struct *vma,
unsigned long address,
spinlock_t *ptl)
{
pte_t *_pte;
for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
pte_t pteval = *_pte;
struct page *src_page;
if (pte_none(pteval)) {
clear_user_highpage(page, address);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
} else {
src_page = pte_page(pteval);
copy_user_highpage(page, src_page, address, vma);
VM_BUG_ON(page_mapcount(src_page) != 1);
VM_BUG_ON(page_count(src_page) != 2);
release_pte_page(src_page);
/*
* ptl mostly unnecessary, but preempt has to
* be disabled to update the per-cpu stats
* inside page_remove_rmap().
*/
spin_lock(ptl);
/*
* paravirt calls inside pte_clear here are
* superfluous.
*/
pte_clear(vma->vm_mm, address, _pte);
page_remove_rmap(src_page);
spin_unlock(ptl);
free_page_and_swap_cache(src_page);
}
address += PAGE_SIZE;
page++;
}
}
static void collapse_huge_page(struct mm_struct *mm,
unsigned long address,
struct page **hpage,
struct vm_area_struct *vma,
int node)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd, _pmd;
pte_t *pte;
pgtable_t pgtable;
struct page *new_page;
spinlock_t *ptl;
int isolated;
unsigned long hstart, hend;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
#ifndef CONFIG_NUMA
VM_BUG_ON(!*hpage);
new_page = *hpage;
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
up_read(&mm->mmap_sem);
return;
}
#else
VM_BUG_ON(*hpage);
/*
* Allocate the page while the vma is still valid and under
* the mmap_sem read mode so there is no memory allocation
* later when we take the mmap_sem in write mode. This is more
* friendly behavior (OTOH it may actually hide bugs) to
* filesystems in userland with daemons allocating memory in
* the userland I/O paths. Allocating memory with the
* mmap_sem in read mode is good idea also to allow greater
* scalability.
*/
new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
node, __GFP_OTHER_NODE);
if (unlikely(!new_page)) {
up_read(&mm->mmap_sem);
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
*hpage = ERR_PTR(-ENOMEM);
return;
}
count_vm_event(THP_COLLAPSE_ALLOC);
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
up_read(&mm->mmap_sem);
put_page(new_page);
return;
}
#endif
/* after allocating the hugepage upgrade to mmap_sem write mode */
up_read(&mm->mmap_sem);
/*
* Prevent all access to pagetables with the exception of
* gup_fast later hanlded by the ptep_clear_flush and the VM
* handled by the anon_vma lock + PG_lock.
*/
down_write(&mm->mmap_sem);
if (unlikely(khugepaged_test_exit(mm)))
goto out;
vma = find_vma(mm, address);
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (address < hstart || address + HPAGE_PMD_SIZE > hend)
goto out;
if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
(vma->vm_flags & VM_NOHUGEPAGE))
goto out;
/* VM_PFNMAP vmas may have vm_ops null but vm_file set */
if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
goto out;
if (is_vma_temporary_stack(vma))
goto out;
VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto out;
pud = pud_offset(pgd, address);
if (!pud_present(*pud))
goto out;
pmd = pmd_offset(pud, address);
/* pmd can't go away or become huge under us */
if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
goto out;
anon_vma_lock(vma->anon_vma);
pte = pte_offset_map(pmd, address);
ptl = pte_lockptr(mm, pmd);
spin_lock(&mm->page_table_lock); /* probably unnecessary */
/*
* After this gup_fast can't run anymore. This also removes
* any huge TLB entry from the CPU so we won't allow
* huge and small TLB entries for the same virtual address
* to avoid the risk of CPU bugs in that area.
*/
_pmd = pmdp_clear_flush_notify(vma, address, pmd);
spin_unlock(&mm->page_table_lock);
spin_lock(ptl);
isolated = __collapse_huge_page_isolate(vma, address, pte);
spin_unlock(ptl);
if (unlikely(!isolated)) {
pte_unmap(pte);
spin_lock(&mm->page_table_lock);
BUG_ON(!pmd_none(*pmd));
set_pmd_at(mm, address, pmd, _pmd);
spin_unlock(&mm->page_table_lock);
anon_vma_unlock(vma->anon_vma);
goto out;
}
/*
* All pages are isolated and locked so anon_vma rmap
* can't run anymore.
*/
anon_vma_unlock(vma->anon_vma);
__collapse_huge_page_copy(pte, new_page, vma, address, ptl);
pte_unmap(pte);
__SetPageUptodate(new_page);
pgtable = pmd_pgtable(_pmd);
VM_BUG_ON(page_count(pgtable) != 1);
VM_BUG_ON(page_mapcount(pgtable) != 0);
_pmd = mk_pmd(new_page, vma->vm_page_prot);
_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
_pmd = pmd_mkhuge(_pmd);
/*
* spin_lock() below is not the equivalent of smp_wmb(), so
* this is needed to avoid the copy_huge_page writes to become
* visible after the set_pmd_at() write.
*/
smp_wmb();
spin_lock(&mm->page_table_lock);
BUG_ON(!pmd_none(*pmd));
page_add_new_anon_rmap(new_page, vma, address);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache(vma, address, entry);
prepare_pmd_huge_pte(pgtable, mm);
mm->nr_ptes--;
spin_unlock(&mm->page_table_lock);
#ifndef CONFIG_NUMA
*hpage = NULL;
#endif
khugepaged_pages_collapsed++;
out_up_write:
up_write(&mm->mmap_sem);
return;
out:
mem_cgroup_uncharge_page(new_page);
#ifdef CONFIG_NUMA
put_page(new_page);
#endif
goto out_up_write;
}
static int khugepaged_scan_pmd(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long address,
struct page **hpage)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte, *_pte;
int ret = 0, referenced = 0, none = 0;
struct page *page;
unsigned long _address;
spinlock_t *ptl;
int node = -1;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto out;
pud = pud_offset(pgd, address);
if (!pud_present(*pud))
goto out;
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
goto out;
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
_pte++, _address += PAGE_SIZE) {
pte_t pteval = *_pte;
if (pte_none(pteval)) {
if (++none <= khugepaged_max_ptes_none)
continue;
else
goto out_unmap;
}
if (!pte_present(pteval) || !pte_write(pteval))
goto out_unmap;
page = vm_normal_page(vma, _address, pteval);
if (unlikely(!page))
goto out_unmap;
/*
* Chose the node of the first page. This could
* be more sophisticated and look at more pages,
* but isn't for now.
*/
if (node == -1)
node = page_to_nid(page);
VM_BUG_ON(PageCompound(page));
if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
goto out_unmap;
/* cannot use mapcount: can't collapse if there's a gup pin */
if (page_count(page) != 1)
goto out_unmap;
if (pte_young(pteval) || PageReferenced(page) ||
mmu_notifier_test_young(vma->vm_mm, address))
referenced = 1;
}
if (referenced)
ret = 1;
out_unmap:
pte_unmap_unlock(pte, ptl);
if (ret)
/* collapse_huge_page will return with the mmap_sem released */
collapse_huge_page(mm, address, hpage, vma, node);
out:
return ret;
}
static void collect_mm_slot(struct mm_slot *mm_slot)
{
struct mm_struct *mm = mm_slot->mm;
VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
if (khugepaged_test_exit(mm)) {
/* free mm_slot */
hlist_del(&mm_slot->hash);
list_del(&mm_slot->mm_node);
/*
* Not strictly needed because the mm exited already.
*
* clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
*/
/* khugepaged_mm_lock actually not necessary for the below */
free_mm_slot(mm_slot);
mmdrop(mm);
}
}
static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
struct page **hpage)
{
struct mm_slot *mm_slot;
struct mm_struct *mm;
struct vm_area_struct *vma;
int progress = 0;
VM_BUG_ON(!pages);
VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
if (khugepaged_scan.mm_slot)
mm_slot = khugepaged_scan.mm_slot;
else {
mm_slot = list_entry(khugepaged_scan.mm_head.next,
struct mm_slot, mm_node);
khugepaged_scan.address = 0;
khugepaged_scan.mm_slot = mm_slot;
}
spin_unlock(&khugepaged_mm_lock);
mm = mm_slot->mm;
down_read(&mm->mmap_sem);
if (unlikely(khugepaged_test_exit(mm)))
vma = NULL;
else
vma = find_vma(mm, khugepaged_scan.address);
progress++;
for (; vma; vma = vma->vm_next) {
unsigned long hstart, hend;
cond_resched();
if (unlikely(khugepaged_test_exit(mm))) {
progress++;
break;
}
if ((!(vma->vm_flags & VM_HUGEPAGE) &&
!khugepaged_always()) ||
(vma->vm_flags & VM_NOHUGEPAGE)) {
skip:
progress++;
continue;
}
/* VM_PFNMAP vmas may have vm_ops null but vm_file set */
if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
goto skip;
if (is_vma_temporary_stack(vma))
goto skip;
VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart >= hend)
goto skip;
if (khugepaged_scan.address > hend)
goto skip;
if (khugepaged_scan.address < hstart)
khugepaged_scan.address = hstart;
VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
while (khugepaged_scan.address < hend) {
int ret;
cond_resched();
if (unlikely(khugepaged_test_exit(mm)))
goto breakouterloop;
VM_BUG_ON(khugepaged_scan.address < hstart ||
khugepaged_scan.address + HPAGE_PMD_SIZE >
hend);
ret = khugepaged_scan_pmd(mm, vma,
khugepaged_scan.address,
hpage);
/* move to next address */
khugepaged_scan.address += HPAGE_PMD_SIZE;
progress += HPAGE_PMD_NR;
if (ret)
/* we released mmap_sem so break loop */
goto breakouterloop_mmap_sem;
if (progress >= pages)
goto breakouterloop;
}
}
breakouterloop:
up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
breakouterloop_mmap_sem:
spin_lock(&khugepaged_mm_lock);
VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
/*
* Release the current mm_slot if this mm is about to die, or
* if we scanned all vmas of this mm.
*/
if (khugepaged_test_exit(mm) || !vma) {
/*
* Make sure that if mm_users is reaching zero while
* khugepaged runs here, khugepaged_exit will find
* mm_slot not pointing to the exiting mm.
*/
if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
khugepaged_scan.mm_slot = list_entry(
mm_slot->mm_node.next,
struct mm_slot, mm_node);
khugepaged_scan.address = 0;
} else {
khugepaged_scan.mm_slot = NULL;
khugepaged_full_scans++;
}
collect_mm_slot(mm_slot);
}
return progress;
}
static int khugepaged_has_work(void)
{
return !list_empty(&khugepaged_scan.mm_head) &&
khugepaged_enabled();
}
static int khugepaged_wait_event(void)
{
return !list_empty(&khugepaged_scan.mm_head) ||
!khugepaged_enabled();
}
static void khugepaged_do_scan(struct page **hpage)
{
unsigned int progress = 0, pass_through_head = 0;
unsigned int pages = khugepaged_pages_to_scan;
barrier(); /* write khugepaged_pages_to_scan to local stack */
while (progress < pages) {
cond_resched();
#ifndef CONFIG_NUMA
if (!*hpage) {
*hpage = alloc_hugepage(khugepaged_defrag());
if (unlikely(!*hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
break;
}
count_vm_event(THP_COLLAPSE_ALLOC);
}
#else
if (IS_ERR(*hpage))
break;
#endif
if (unlikely(kthread_should_stop() || freezing(current)))
break;
spin_lock(&khugepaged_mm_lock);
if (!khugepaged_scan.mm_slot)
pass_through_head++;
if (khugepaged_has_work() &&
pass_through_head < 2)
progress += khugepaged_scan_mm_slot(pages - progress,
hpage);
else
progress = pages;
spin_unlock(&khugepaged_mm_lock);
}
}
static void khugepaged_alloc_sleep(void)
{
DEFINE_WAIT(wait);
add_wait_queue(&khugepaged_wait, &wait);
schedule_timeout_interruptible(
msecs_to_jiffies(
khugepaged_alloc_sleep_millisecs));
remove_wait_queue(&khugepaged_wait, &wait);
}
#ifndef CONFIG_NUMA
static struct page *khugepaged_alloc_hugepage(void)
{
struct page *hpage;
do {
hpage = alloc_hugepage(khugepaged_defrag());
if (!hpage) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
khugepaged_alloc_sleep();
} else
count_vm_event(THP_COLLAPSE_ALLOC);
} while (unlikely(!hpage) &&
likely(khugepaged_enabled()));
return hpage;
}
#endif
static void khugepaged_loop(void)
{
struct page *hpage;
#ifdef CONFIG_NUMA
hpage = NULL;
#endif
while (likely(khugepaged_enabled())) {
#ifndef CONFIG_NUMA
hpage = khugepaged_alloc_hugepage();
if (unlikely(!hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
break;
}
count_vm_event(THP_COLLAPSE_ALLOC);
#else
if (IS_ERR(hpage)) {
khugepaged_alloc_sleep();
hpage = NULL;
}
#endif
khugepaged_do_scan(&hpage);
#ifndef CONFIG_NUMA
if (hpage)
put_page(hpage);
#endif
try_to_freeze();
if (unlikely(kthread_should_stop()))
break;
if (khugepaged_has_work()) {
DEFINE_WAIT(wait);
if (!khugepaged_scan_sleep_millisecs)
continue;
add_wait_queue(&khugepaged_wait, &wait);
schedule_timeout_interruptible(
msecs_to_jiffies(
khugepaged_scan_sleep_millisecs));
remove_wait_queue(&khugepaged_wait, &wait);
} else if (khugepaged_enabled())
wait_event_freezable(khugepaged_wait,
khugepaged_wait_event());
}
}
static int khugepaged(void *none)
{
struct mm_slot *mm_slot;
set_freezable();
set_user_nice(current, 19);
/* serialize with start_khugepaged() */
mutex_lock(&khugepaged_mutex);
for (;;) {
mutex_unlock(&khugepaged_mutex);
VM_BUG_ON(khugepaged_thread != current);
khugepaged_loop();
VM_BUG_ON(khugepaged_thread != current);
mutex_lock(&khugepaged_mutex);
if (!khugepaged_enabled())
break;
if (unlikely(kthread_should_stop()))
break;
}
spin_lock(&khugepaged_mm_lock);
mm_slot = khugepaged_scan.mm_slot;
khugepaged_scan.mm_slot = NULL;
if (mm_slot)
collect_mm_slot(mm_slot);
spin_unlock(&khugepaged_mm_lock);
khugepaged_thread = NULL;
mutex_unlock(&khugepaged_mutex);
return 0;
}
void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
{
struct page *page;
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_trans_huge(*pmd))) {
spin_unlock(&mm->page_table_lock);
return;
}
page = pmd_page(*pmd);
VM_BUG_ON(!page_count(page));
get_page(page);
spin_unlock(&mm->page_table_lock);
split_huge_page(page);
put_page(page);
BUG_ON(pmd_trans_huge(*pmd));
}
static void split_huge_page_address(struct mm_struct *mm,
unsigned long address)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
return;
pud = pud_offset(pgd, address);
if (!pud_present(*pud))
return;
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return;
/*
* Caller holds the mmap_sem write mode, so a huge pmd cannot
* materialize from under us.
*/
split_huge_page_pmd(mm, pmd);
}
void __vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next)
{
/*
* If the new start address isn't hpage aligned and it could
* previously contain an hugepage: check if we need to split
* an huge pmd.
*/
if (start & ~HPAGE_PMD_MASK &&
(start & HPAGE_PMD_MASK) >= vma->vm_start &&
(start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
split_huge_page_address(vma->vm_mm, start);
/*
* If the new end address isn't hpage aligned and it could
* previously contain an hugepage: check if we need to split
* an huge pmd.
*/
if (end & ~HPAGE_PMD_MASK &&
(end & HPAGE_PMD_MASK) >= vma->vm_start &&
(end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
split_huge_page_address(vma->vm_mm, end);
/*
* If we're also updating the vma->vm_next->vm_start, if the new
* vm_next->vm_start isn't page aligned and it could previously
* contain an hugepage: check if we need to split an huge pmd.
*/
if (adjust_next > 0) {
struct vm_area_struct *next = vma->vm_next;
unsigned long nstart = next->vm_start;
nstart += adjust_next << PAGE_SHIFT;
if (nstart & ~HPAGE_PMD_MASK &&
(nstart & HPAGE_PMD_MASK) >= next->vm_start &&
(nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
split_huge_page_address(next->vm_mm, nstart);
}
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3468_2 |
crossvul-cpp_data_bad_1415_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS IIIII X X EEEEE L %
% SS I X X E L %
% SSS I X EEE L %
% SS I X X E L %
% SSSSS IIIII X X EEEEE LLLLL %
% %
% %
% Read/Write DEC SIXEL Format %
% %
% Software Design %
% Hayaki Saito %
% September 2014 %
% Based on kmiya's sixel (2014-03-28) %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/module.h"
#include "MagickCore/threshold.h"
#include "MagickCore/utility.h"
/*
Definitions
*/
#define SIXEL_PALETTE_MAX 256
#define SIXEL_OUTPUT_PACKET_SIZE 1024
/*
Macros
*/
#define SIXEL_RGB(r, g, b) ((int) (((ssize_t) (r) << 16) + ((g) << 8) + (b)))
#define SIXEL_PALVAL(n,a,m) ((int) (((ssize_t) (n) * (a) + ((m) / 2)) / (m)))
#define SIXEL_XRGB(r,g,b) SIXEL_RGB(SIXEL_PALVAL(r, 255, 100), SIXEL_PALVAL(g, 255, 100), SIXEL_PALVAL(b, 255, 100))
/*
Structure declarations.
*/
typedef struct sixel_node {
struct sixel_node *next;
int color;
int left;
int right;
unsigned char *map;
} sixel_node_t;
typedef struct sixel_output {
/* compatiblity flags */
/* 0: 7bit terminal,
* 1: 8bit terminal */
unsigned char has_8bit_control;
int save_pixel;
int save_count;
int active_palette;
sixel_node_t *node_top;
sixel_node_t *node_free;
Image *image;
int pos;
unsigned char buffer[1];
} sixel_output_t;
static int const sixel_default_color_table[] = {
SIXEL_XRGB(0, 0, 0), /* 0 Black */
SIXEL_XRGB(20, 20, 80), /* 1 Blue */
SIXEL_XRGB(80, 13, 13), /* 2 Red */
SIXEL_XRGB(20, 80, 20), /* 3 Green */
SIXEL_XRGB(80, 20, 80), /* 4 Magenta */
SIXEL_XRGB(20, 80, 80), /* 5 Cyan */
SIXEL_XRGB(80, 80, 20), /* 6 Yellow */
SIXEL_XRGB(53, 53, 53), /* 7 Gray 50% */
SIXEL_XRGB(26, 26, 26), /* 8 Gray 25% */
SIXEL_XRGB(33, 33, 60), /* 9 Blue* */
SIXEL_XRGB(60, 26, 26), /* 10 Red* */
SIXEL_XRGB(33, 60, 33), /* 11 Green* */
SIXEL_XRGB(60, 33, 60), /* 12 Magenta* */
SIXEL_XRGB(33, 60, 60), /* 13 Cyan* */
SIXEL_XRGB(60, 60, 33), /* 14 Yellow* */
SIXEL_XRGB(80, 80, 80), /* 15 Gray 75% */
};
/*
Forward declarations.
*/
static MagickBooleanType
WriteSIXELImage(const ImageInfo *,Image *,ExceptionInfo *);
static int hue_to_rgb(int n1, int n2, int hue)
{
const int HLSMAX = 100;
if (hue < 0) {
hue += HLSMAX;
}
if (hue > HLSMAX) {
hue -= HLSMAX;
}
if (hue < (HLSMAX / 6)) {
return (n1 + (((n2 - n1) * hue + (HLSMAX / 12)) / (HLSMAX / 6)));
}
if (hue < (HLSMAX / 2)) {
return (n2);
}
if (hue < ((HLSMAX * 2) / 3)) {
return (n1 + (((n2 - n1) * (((HLSMAX * 2) / 3) - hue) + (HLSMAX / 12))/(HLSMAX / 6)));
}
return (n1);
}
static int hls_to_rgb(int hue, int lum, int sat)
{
int R, G, B;
int Magic1, Magic2;
const int RGBMAX = 255;
const int HLSMAX = 100;
if (sat == 0) {
R = G = B = (lum * RGBMAX) / HLSMAX;
} else {
if (lum <= (HLSMAX / 2)) {
Magic2 = (int) (((ssize_t) lum * (HLSMAX + sat) + (HLSMAX / 2)) / HLSMAX);
} else {
Magic2 = (int) (lum + sat - (((ssize_t) lum * sat) + (HLSMAX / 2)) / HLSMAX);
}
Magic1 = 2 * lum - Magic2;
R = (hue_to_rgb(Magic1, Magic2, hue + (HLSMAX / 3)) * RGBMAX + (HLSMAX / 2)) / HLSMAX;
G = (hue_to_rgb(Magic1, Magic2, hue) * RGBMAX + (HLSMAX / 2)) / HLSMAX;
B = (hue_to_rgb(Magic1, Magic2, hue - (HLSMAX / 3)) * RGBMAX + (HLSMAX/2)) / HLSMAX;
}
return SIXEL_RGB(R, G, B);
}
static unsigned char *get_params(unsigned char *p, int *param, int *len)
{
int n;
*len = 0;
while (*p != '\0') {
while (*p == ' ' || *p == '\t') {
p++;
}
if (isdigit((int) ((unsigned char) *p))) {
for (n = 0; isdigit((int) ((unsigned char) *p)); p++) {
n = (int) ((ssize_t) n * 10 + (*p - '0'));
}
if (*len < 10) {
param[(*len)++] = n;
}
while (*p == ' ' || *p == '\t') {
p++;
}
if (*p == ';') {
p++;
}
} else if (*p == ';') {
if (*len < 10) {
param[(*len)++] = 0;
}
p++;
} else
break;
}
return p;
}
/* convert sixel data into indexed pixel bytes and palette data */
MagickBooleanType sixel_decode(Image *image,
unsigned char /* in */ *p, /* sixel bytes */
unsigned char /* out */ **pixels, /* decoded pixels */
size_t /* out */ *pwidth, /* image width */
size_t /* out */ *pheight, /* image height */
unsigned char /* out */ **palette, /* ARGB palette */
size_t /* out */ *ncolors, /* palette size (<= 256) */
ExceptionInfo *exception)
{
int n, i, r, g, b, sixel_vertical_mask, c;
int posision_x, posision_y;
int max_x, max_y;
int attributed_pan, attributed_pad;
int attributed_ph, attributed_pv;
int repeat_count, color_index, max_color_index = 2, background_color_index;
int param[10];
int sixel_palet[SIXEL_PALETTE_MAX];
unsigned char *imbuf, *dmbuf;
int imsx, imsy;
int dmsx, dmsy;
int y;
size_t extent,offset;
extent=strlen((char *) p);
posision_x = posision_y = 0;
max_x = max_y = 0;
attributed_pan = 2;
attributed_pad = 1;
attributed_ph = attributed_pv = 0;
repeat_count = 1;
color_index = 0;
background_color_index = 0;
imsx = 2048;
imsy = 2048;
if (SetImageExtent(image,imsx,imsy,exception) == MagickFalse)
return(MagickFalse);
imbuf = (unsigned char *) AcquireQuantumMemory(imsx , imsy);
if (imbuf == NULL) {
return(MagickFalse);
}
for (n = 0; n < 16; n++) {
sixel_palet[n] = sixel_default_color_table[n];
}
/* colors 16-231 are a 6x6x6 color cube */
for (r = 0; r < 6; r++) {
for (g = 0; g < 6; g++) {
for (b = 0; b < 6; b++) {
sixel_palet[n++] = SIXEL_RGB(r * 51, g * 51, b * 51);
}
}
}
/* colors 232-255 are a grayscale ramp, intentionally leaving out */
for (i = 0; i < 24; i++) {
sixel_palet[n++] = SIXEL_RGB(i * 11, i * 11, i * 11);
}
for (; n < SIXEL_PALETTE_MAX; n++) {
sixel_palet[n] = SIXEL_RGB(255, 255, 255);
}
(void) memset(imbuf, background_color_index, (size_t) imsx * imsy);
while (*p != '\0') {
if ((p[0] == '\033' && p[1] == 'P') || *p == 0x90) {
if (*p == '\033') {
p++;
}
p = get_params(++p, param, &n);
if (*p == 'q') {
p++;
if (n > 0) { /* Pn1 */
switch(param[0]) {
case 0:
case 1:
attributed_pad = 2;
break;
case 2:
attributed_pad = 5;
break;
case 3:
attributed_pad = 4;
break;
case 4:
attributed_pad = 4;
break;
case 5:
attributed_pad = 3;
break;
case 6:
attributed_pad = 3;
break;
case 7:
attributed_pad = 2;
break;
case 8:
attributed_pad = 2;
break;
case 9:
attributed_pad = 1;
break;
}
}
if (n > 2) { /* Pn3 */
if (param[2] == 0) {
param[2] = 10;
}
attributed_pan = (int) (((ssize_t) attributed_pan * param[2]) / 10);
attributed_pad = (int) (((ssize_t) attributed_pad * param[2]) / 10);
if (attributed_pan <= 0) attributed_pan = 1;
if (attributed_pad <= 0) attributed_pad = 1;
}
}
} else if ((p[0] == '\033' && p[1] == '\\') || *p == 0x9C) {
break;
} else if (*p == '"') {
/* DECGRA Set Raster Attributes " Pan; Pad; Ph; Pv */
p = get_params(++p, param, &n);
if (n > 0) attributed_pad = param[0];
if (n > 1) attributed_pan = param[1];
if (n > 2 && param[2] > 0) attributed_ph = param[2];
if (n > 3 && param[3] > 0) attributed_pv = param[3];
if (attributed_pan <= 0) attributed_pan = 1;
if (attributed_pad <= 0) attributed_pad = 1;
if (imsx < attributed_ph || imsy < attributed_pv) {
dmsx = imsx > attributed_ph ? imsx : attributed_ph;
dmsy = imsy > attributed_pv ? imsy : attributed_pv;
if (SetImageExtent(image,dmsx,dmsy,exception) == MagickFalse)
break;
dmbuf = (unsigned char *) AcquireQuantumMemory(dmsx , dmsy);
if (dmbuf == (unsigned char *) NULL) {
imbuf = (unsigned char *) RelinquishMagickMemory(imbuf);
return (MagickFalse);
}
(void) memset(dmbuf, background_color_index, (size_t) dmsx * dmsy);
for (y = 0; y < imsy; ++y) {
(void) memcpy(dmbuf + dmsx * y, imbuf + imsx * y, imsx);
}
imbuf = (unsigned char *) RelinquishMagickMemory(imbuf);
imsx = dmsx;
imsy = dmsy;
imbuf = dmbuf;
}
} else if (*p == '!') {
/* DECGRI Graphics Repeat Introducer ! Pn Ch */
p = get_params(++p, param, &n);
if ((n > 0) && (param[0] > 0)) {
repeat_count = param[0];
if (repeat_count > (ssize_t) extent)
break;
}
} else if (*p == '#') {
/* DECGCI Graphics Color Introducer # Pc; Pu; Px; Py; Pz */
p = get_params(++p, param, &n);
if (n > 0) {
if ((color_index = param[0]) < 0) {
color_index = 0;
} else if (color_index >= SIXEL_PALETTE_MAX) {
color_index = SIXEL_PALETTE_MAX - 1;
}
}
if (n > 4) {
if (param[1] == 1) { /* HLS */
if (param[2] > 360) param[2] = 360;
if (param[3] > 100) param[3] = 100;
if (param[4] > 100) param[4] = 100;
sixel_palet[color_index] = hls_to_rgb(param[2] * 100 / 360, param[3], param[4]);
} else if (param[1] == 2) { /* RGB */
if (param[2] > 100) param[2] = 100;
if (param[3] > 100) param[3] = 100;
if (param[4] > 100) param[4] = 100;
sixel_palet[color_index] = SIXEL_XRGB(param[2], param[3], param[4]);
}
}
} else if (*p == '$') {
/* DECGCR Graphics Carriage Return */
p++;
posision_x = 0;
repeat_count = 1;
} else if (*p == '-') {
/* DECGNL Graphics Next Line */
p++;
posision_x = 0;
posision_y += 6;
repeat_count = 1;
} else if (*p >= '?' && *p <= '\177') {
if (imsx < (posision_x + repeat_count) || imsy < (posision_y + 6)) {
int nx = imsx * 2;
int ny = imsy * 2;
while (nx < (posision_x + repeat_count) || ny < (posision_y + 6)) {
nx *= 2;
ny *= 2;
}
dmsx = nx;
dmsy = ny;
if (SetImageExtent(image,dmsx,dmsy,exception) == MagickFalse)
break;
dmbuf = (unsigned char *) AcquireQuantumMemory(dmsx , dmsy);
if (dmbuf == (unsigned char *) NULL) {
imbuf = (unsigned char *) RelinquishMagickMemory(imbuf);
return (MagickFalse);
}
(void) memset(dmbuf, background_color_index, (size_t) dmsx * dmsy);
for (y = 0; y < imsy; ++y) {
(void) memcpy(dmbuf + dmsx * y, imbuf + imsx * y, imsx);
}
imbuf = (unsigned char *) RelinquishMagickMemory(imbuf);
imsx = dmsx;
imsy = dmsy;
imbuf = dmbuf;
}
if (color_index > max_color_index) {
max_color_index = color_index;
}
if ((b = *(p++) - '?') == 0) {
posision_x += repeat_count;
} else {
sixel_vertical_mask = 0x01;
if (repeat_count <= 1) {
for (i = 0; i < 6; i++) {
if ((b & sixel_vertical_mask) != 0) {
offset=(size_t) imsx * (posision_y + i) + posision_x;
if (offset >= (size_t) imsx * imsy)
{
imbuf = (unsigned char *) RelinquishMagickMemory(imbuf);
return (MagickFalse);
}
imbuf[offset] = color_index;
if (max_x < posision_x) {
max_x = posision_x;
}
if (max_y < (posision_y + i)) {
max_y = posision_y + i;
}
}
sixel_vertical_mask <<= 1;
}
posision_x += 1;
} else { /* repeat_count > 1 */
for (i = 0; i < 6; i++) {
if ((b & sixel_vertical_mask) != 0) {
c = sixel_vertical_mask << 1;
for (n = 1; (i + n) < 6; n++) {
if ((b & c) == 0) {
break;
}
c <<= 1;
}
for (y = posision_y + i; y < posision_y + i + n; ++y) {
offset=(size_t) imsx * y + posision_x;
if (offset + repeat_count >= (size_t) imsx * imsy)
{
imbuf = (unsigned char *) RelinquishMagickMemory(imbuf);
return (MagickFalse);
}
(void) memset(imbuf + offset, color_index, repeat_count);
}
if (max_x < (posision_x + repeat_count - 1)) {
max_x = posision_x + repeat_count - 1;
}
if (max_y < (posision_y + i + n - 1)) {
max_y = posision_y + i + n - 1;
}
i += (n - 1);
sixel_vertical_mask <<= (n - 1);
}
sixel_vertical_mask <<= 1;
}
posision_x += repeat_count;
}
}
repeat_count = 1;
} else {
p++;
}
}
if (++max_x < attributed_ph) {
max_x = attributed_ph;
}
if (++max_y < attributed_pv) {
max_y = attributed_pv;
}
if (imsx > max_x || imsy > max_y) {
dmsx = max_x;
dmsy = max_y;
if (SetImageExtent(image,dmsx,dmsy,exception) == MagickFalse)
{
imbuf = (unsigned char *) RelinquishMagickMemory(imbuf);
return (MagickFalse);
}
if ((dmbuf = (unsigned char *) AcquireQuantumMemory(dmsx , dmsy)) == NULL) {
imbuf = (unsigned char *) RelinquishMagickMemory(imbuf);
return (MagickFalse);
}
for (y = 0; y < dmsy; ++y) {
(void) memcpy(dmbuf + dmsx * y, imbuf + imsx * y, dmsx);
}
imbuf = (unsigned char *) RelinquishMagickMemory(imbuf);
imsx = dmsx;
imsy = dmsy;
imbuf = dmbuf;
}
*pixels = imbuf;
*pwidth = imsx;
*pheight = imsy;
*ncolors = max_color_index + 1;
*palette = (unsigned char *) AcquireQuantumMemory(*ncolors,4);
if (*palette == (unsigned char *) NULL)
return(MagickFalse);
for (n = 0; n < (ssize_t) *ncolors; ++n) {
(*palette)[n * 4 + 0] = sixel_palet[n] >> 16 & 0xff;
(*palette)[n * 4 + 1] = sixel_palet[n] >> 8 & 0xff;
(*palette)[n * 4 + 2] = sixel_palet[n] & 0xff;
(*palette)[n * 4 + 3] = 0xff;
}
return(MagickTrue);
}
sixel_output_t *sixel_output_create(Image *image)
{
sixel_output_t *output;
output = (sixel_output_t *) AcquireQuantumMemory(sizeof(sixel_output_t) + SIXEL_OUTPUT_PACKET_SIZE * 2, 1);
if (output == (sixel_output_t *) NULL)
return((sixel_output_t *) NULL);
output->has_8bit_control = 0;
output->save_pixel = 0;
output->save_count = 0;
output->active_palette = (-1);
output->node_top = NULL;
output->node_free = NULL;
output->image = image;
output->pos = 0;
return output;
}
static void sixel_advance(sixel_output_t *context, int nwrite)
{
if ((context->pos += nwrite) >= SIXEL_OUTPUT_PACKET_SIZE) {
WriteBlob(context->image,SIXEL_OUTPUT_PACKET_SIZE,context->buffer);
memmove(context->buffer,
context->buffer + SIXEL_OUTPUT_PACKET_SIZE,
(context->pos -= SIXEL_OUTPUT_PACKET_SIZE));
}
}
static int sixel_put_flash(sixel_output_t *const context)
{
int n;
int nwrite;
#if defined(USE_VT240) /* VT240 Max 255 ? */
while (context->save_count > 255) {
nwrite = spritf((char *)context->buffer + context->pos, "!255%c", context->save_pixel);
if (nwrite <= 0) {
return (-1);
}
sixel_advance(context, nwrite);
context->save_count -= 255;
}
#endif /* defined(USE_VT240) */
if (context->save_count > 3) {
/* DECGRI Graphics Repeat Introducer ! Pn Ch */
nwrite = sprintf((char *)context->buffer + context->pos, "!%d%c", context->save_count, context->save_pixel);
if (nwrite <= 0) {
return (-1);
}
sixel_advance(context, nwrite);
} else {
for (n = 0; n < context->save_count; n++) {
context->buffer[context->pos] = (char)context->save_pixel;
sixel_advance(context, 1);
}
}
context->save_pixel = 0;
context->save_count = 0;
return 0;
}
static void sixel_put_pixel(sixel_output_t *const context, int pix)
{
if (pix < 0 || pix > '?') {
pix = 0;
}
pix += '?';
if (pix == context->save_pixel) {
context->save_count++;
} else {
sixel_put_flash(context);
context->save_pixel = pix;
context->save_count = 1;
}
}
static void sixel_node_del(sixel_output_t *const context, sixel_node_t *np)
{
sixel_node_t *tp;
if ((tp = context->node_top) == np) {
context->node_top = np->next;
}
else {
while (tp->next != NULL) {
if (tp->next == np) {
tp->next = np->next;
break;
}
tp = tp->next;
}
}
np->next = context->node_free;
context->node_free = np;
}
static int sixel_put_node(sixel_output_t *const context, int x,
sixel_node_t *np, int ncolors, int keycolor)
{
int nwrite;
if (ncolors != 2 || keycolor == -1) {
/* designate palette index */
if (context->active_palette != np->color) {
nwrite = sprintf((char *)context->buffer + context->pos,
"#%d", np->color);
sixel_advance(context, nwrite);
context->active_palette = np->color;
}
}
for (; x < np->left; x++) {
sixel_put_pixel(context, 0);
}
for (; x < np->right; x++) {
sixel_put_pixel(context, np->map[x]);
}
sixel_put_flash(context);
return x;
}
static MagickBooleanType sixel_encode_impl(unsigned char *pixels, size_t width,size_t height,
unsigned char *palette, size_t ncolors, int keycolor,
sixel_output_t *context)
{
#define RelinquishNodesAndMap \
while ((np = context->node_free) != NULL) { \
context->node_free = np->next; \
np=(sixel_node_t *) RelinquishMagickMemory(np); \
} \
map = (unsigned char *) RelinquishMagickMemory(map)
int x, y, i, n, c;
int left, right;
int pix;
unsigned char *map;
sixel_node_t *np, *tp, top;
int nwrite;
size_t len;
context->pos = 0;
if (ncolors < 1) {
return (MagickFalse);
}
len = ncolors * width;
context->active_palette = (-1);
if ((map = (unsigned char *)AcquireQuantumMemory(len, sizeof(unsigned char))) == NULL) {
return (MagickFalse);
}
(void) memset(map, 0, len);
if (context->has_8bit_control) {
nwrite = sprintf((char *)context->buffer, "\x90" "0;0;0" "q");
} else {
nwrite = sprintf((char *)context->buffer, "\x1bP" "0;0;0" "q");
}
if (nwrite <= 0) {
return (MagickFalse);
}
sixel_advance(context, nwrite);
nwrite = sprintf((char *)context->buffer + context->pos, "\"1;1;%d;%d", (int) width, (int) height);
if (nwrite <= 0) {
RelinquishNodesAndMap;
return (MagickFalse);
}
sixel_advance(context, nwrite);
if (ncolors != 2 || keycolor == -1) {
for (n = 0; n < (ssize_t) ncolors; n++) {
/* DECGCI Graphics Color Introducer # Pc ; Pu; Px; Py; Pz */
nwrite = sprintf((char *)context->buffer + context->pos, "#%d;2;%d;%d;%d",
n,
(palette[n * 3 + 0] * 100 + 127) / 255,
(palette[n * 3 + 1] * 100 + 127) / 255,
(palette[n * 3 + 2] * 100 + 127) / 255);
if (nwrite <= 0) {
RelinquishNodesAndMap;
return (MagickFalse);
}
sixel_advance(context, nwrite);
if (nwrite <= 0) {
RelinquishNodesAndMap;
return (MagickFalse);
}
}
}
for (y = i = 0; y < (ssize_t) height; y++) {
for (x = 0; x < (ssize_t) width; x++) {
pix = pixels[y * width + x];
if (pix >= 0 && pix < (ssize_t) ncolors && pix != keycolor) {
map[pix * width + x] |= (1 << i);
}
}
if (++i < 6 && (y + 1) < (ssize_t) height) {
continue;
}
for (c = 0; c < (ssize_t) ncolors; c++) {
for (left = 0; left < (ssize_t) width; left++) {
if (*(map + c * width + left) == 0) {
continue;
}
for (right = left + 1; right < (ssize_t) width; right++) {
if (*(map + c * width + right) != 0) {
continue;
}
for (n = 1; (right + n) < (ssize_t) width; n++) {
if (*(map + c * width + right + n) != 0) {
break;
}
}
if (n >= 10 || right + n >= (ssize_t) width) {
break;
}
right = right + n - 1;
}
if ((np = context->node_free) != NULL) {
context->node_free = np->next;
} else if ((np = (sixel_node_t *)AcquireMagickMemory(sizeof(sixel_node_t))) == NULL) {
RelinquishNodesAndMap;
return (MagickFalse);
}
np->color = c;
np->left = left;
np->right = right;
np->map = map + c * width;
top.next = context->node_top;
tp = ⊤
while (tp->next != NULL) {
if (np->left < tp->next->left) {
break;
}
if (np->left == tp->next->left && np->right > tp->next->right) {
break;
}
tp = tp->next;
}
np->next = tp->next;
tp->next = np;
context->node_top = top.next;
left = right - 1;
}
}
for (x = 0; (np = context->node_top) != NULL;) {
if (x > np->left) {
/* DECGCR Graphics Carriage Return */
context->buffer[context->pos] = '$';
sixel_advance(context, 1);
x = 0;
}
x = sixel_put_node(context, x, np, (int) ncolors, keycolor);
sixel_node_del(context, np);
np = context->node_top;
while (np != NULL) {
if (np->left < x) {
np = np->next;
continue;
}
x = sixel_put_node(context, x, np, (int) ncolors, keycolor);
sixel_node_del(context, np);
np = context->node_top;
}
}
/* DECGNL Graphics Next Line */
context->buffer[context->pos] = '-';
sixel_advance(context, 1);
if (nwrite <= 0) {
RelinquishNodesAndMap;
return (MagickFalse);
}
i = 0;
(void) memset(map, 0, len);
}
if (context->has_8bit_control) {
context->buffer[context->pos] = 0x9c;
sixel_advance(context, 1);
} else {
context->buffer[context->pos] = 0x1b;
context->buffer[context->pos + 1] = '\\';
sixel_advance(context, 2);
}
if (nwrite <= 0) {
RelinquishNodesAndMap;
return (MagickFalse);
}
/* flush buffer */
if (context->pos > 0) {
WriteBlob(context->image,context->pos,context->buffer);
}
RelinquishNodesAndMap;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s S I X E L %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsSIXEL() returns MagickTrue if the image format type, identified by the
% magick string, is SIXEL.
%
% The format of the IsSIXEL method is:
%
% MagickBooleanType IsSIXEL(const unsigned char *magick,
% const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes. or
% blob.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsSIXEL(const unsigned char *magick,
const size_t length)
{
const unsigned char
*end = magick + length;
if (length < 3)
return(MagickFalse);
if (*magick == 0x90 || (*magick == 0x1b && *++magick == 'P')) {
while (++magick != end) {
if (*magick == 'q')
return(MagickTrue);
if (!(*magick >= '0' && *magick <= '9') && *magick != ';')
return(MagickFalse);
}
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d S I X E L I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadSIXELImage() reads an X11 pixmap image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadSIXELImage method is:
%
% Image *ReadSIXELImage(const ImageInfo *image_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadSIXELImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
char
*sixel_buffer;
Image
*image;
MagickBooleanType
status;
register char
*p;
register ssize_t
x;
register Quantum
*q;
size_t
length;
ssize_t
i,
j,
y;
unsigned char
*sixel_pixels,
*sixel_palette;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Read SIXEL file.
*/
length=MagickPathExtent;
sixel_buffer=(char *) AcquireQuantumMemory((size_t) length+MagickPathExtent,
sizeof(*sixel_buffer));
p=sixel_buffer;
if (sixel_buffer != (char *) NULL)
while (ReadBlobString(image,p) != (char *) NULL)
{
if ((*p == '#') && ((p == sixel_buffer) || (*(p-1) == '\n')))
continue;
if ((*p == '}') && (*(p+1) == ';'))
break;
p+=strlen(p);
if ((size_t) (p-sixel_buffer+MagickPathExtent+1) < length)
continue;
length<<=1;
sixel_buffer=(char *) ResizeQuantumMemory(sixel_buffer,length+
MagickPathExtent+1,sizeof(*sixel_buffer));
if (sixel_buffer == (char *) NULL)
break;
p=sixel_buffer+strlen(sixel_buffer);
}
if (sixel_buffer == (char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
sixel_buffer[length]='\0';
/*
Decode SIXEL
*/
if (sixel_decode(image,(unsigned char *) sixel_buffer,&sixel_pixels,&image->columns,&image->rows,&sixel_palette,&image->colors,exception) == MagickFalse)
{
sixel_buffer=(char *) RelinquishMagickMemory(sixel_buffer);
ThrowReaderException(CorruptImageError,"CorruptImage");
}
sixel_buffer=(char *) RelinquishMagickMemory(sixel_buffer);
image->depth=24;
image->storage_class=PseudoClass;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
{
sixel_pixels=(unsigned char *) RelinquishMagickMemory(sixel_pixels);
sixel_palette=(unsigned char *) RelinquishMagickMemory(sixel_palette);
return(DestroyImageList(image));
}
if (AcquireImageColormap(image,image->colors, exception) == MagickFalse)
{
sixel_pixels=(unsigned char *) RelinquishMagickMemory(sixel_pixels);
sixel_palette=(unsigned char *) RelinquishMagickMemory(sixel_palette);
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
}
for (i = 0; i < (ssize_t) image->colors; ++i) {
image->colormap[i].red = ScaleCharToQuantum(sixel_palette[i * 4 + 0]);
image->colormap[i].green = ScaleCharToQuantum(sixel_palette[i * 4 + 1]);
image->colormap[i].blue = ScaleCharToQuantum(sixel_palette[i * 4 + 2]);
}
j=0;
if (image_info->ping == MagickFalse)
{
/*
Read image pixels.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
j=(ssize_t) sixel_pixels[y * image->columns + x];
SetPixelIndex(image,j,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
}
if (y < (ssize_t) image->rows)
{
sixel_pixels=(unsigned char *) RelinquishMagickMemory(sixel_pixels);
sixel_palette=(unsigned char *) RelinquishMagickMemory(sixel_palette);
ThrowReaderException(CorruptImageError,"NotEnoughPixelData");
}
}
/*
Relinquish resources.
*/
sixel_pixels=(unsigned char *) RelinquishMagickMemory(sixel_pixels);
sixel_palette=(unsigned char *) RelinquishMagickMemory(sixel_palette);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r S I X E L I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterSIXELImage() adds attributes for the SIXEL image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterSIXELImage method is:
%
% size_t RegisterSIXELImage(void)
%
*/
ModuleExport size_t RegisterSIXELImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("SIXEL","SIXEL","DEC SIXEL Graphics Format");
entry->decoder=(DecodeImageHandler *) ReadSIXELImage;
entry->encoder=(EncodeImageHandler *) WriteSIXELImage;
entry->magick=(IsImageFormatHandler *) IsSIXEL;
entry->flags^=CoderAdjoinFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("SIXEL","SIX","DEC SIXEL Graphics Format");
entry->decoder=(DecodeImageHandler *) ReadSIXELImage;
entry->encoder=(EncodeImageHandler *) WriteSIXELImage;
entry->magick=(IsImageFormatHandler *) IsSIXEL;
entry->flags^=CoderAdjoinFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r S I X E L I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterSIXELImage() removes format registrations made by the
% SIXEL module from the list of supported formats.
%
% The format of the UnregisterSIXELImage method is:
%
% UnregisterSIXELImage(void)
%
*/
ModuleExport void UnregisterSIXELImage(void)
{
(void) UnregisterMagickInfo("SIXEL");
(void) UnregisterMagickInfo("SIX");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e S I X E L I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteSIXELImage() writes an image to a file in the X pixmap format.
%
% The format of the WriteSIXELImage method is:
%
% MagickBooleanType WriteSIXELImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WriteSIXELImage(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
MagickBooleanType
status;
register const Quantum
*q;
register ssize_t
i,
x;
ssize_t
opacity,
y;
sixel_output_t
*output;
unsigned char
sixel_palette[256*3],
*sixel_pixels;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
opacity=(-1);
if (image->alpha_trait == UndefinedPixelTrait)
{
if ((image->storage_class == DirectClass) || (image->colors > 256))
(void) SetImageType(image,PaletteType,exception);
}
else
{
MagickRealType
alpha,
beta;
/*
Identify transparent colormap index.
*/
if ((image->storage_class == DirectClass) || (image->colors > 256))
(void) SetImageType(image,PaletteBilevelAlphaType,exception);
for (i=0; i < (ssize_t) image->colors; i++)
if (image->colormap[i].alpha != OpaqueAlpha)
{
if (opacity < 0)
{
opacity=i;
continue;
}
alpha=image->colormap[i].alpha;
beta=image->colormap[opacity].alpha;
if (alpha < beta)
opacity=i;
}
if (opacity == -1)
{
(void) SetImageType(image,PaletteBilevelAlphaType,exception);
for (i=0; i < (ssize_t) image->colors; i++)
if (image->colormap[i].alpha != OpaqueAlpha)
{
if (opacity < 0)
{
opacity=i;
continue;
}
alpha=image->colormap[i].alpha;
beta=image->colormap[opacity].alpha;
if (alpha < beta)
opacity=i;
}
}
if (opacity >= 0)
{
image->colormap[opacity].red=image->transparent_color.red;
image->colormap[opacity].green=image->transparent_color.green;
image->colormap[opacity].blue=image->transparent_color.blue;
}
}
/*
SIXEL header.
*/
for (i=0; i < (ssize_t) image->colors; i++)
{
sixel_palette[3*i+0]=ScaleQuantumToChar(image->colormap[i].red);
sixel_palette[3*i+1]=ScaleQuantumToChar(image->colormap[i].green);
sixel_palette[3*i+2]=ScaleQuantumToChar(image->colormap[i].blue);
}
/*
Define SIXEL pixels.
*/
output = sixel_output_create(image);
if (output == (sixel_output_t *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
sixel_pixels=(unsigned char *) AcquireQuantumMemory(image->columns,
image->rows*sizeof(*sixel_pixels));
if (sixel_pixels == (unsigned char *) NULL)
{
output = (sixel_output_t *) RelinquishMagickMemory(output);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
for (y=0; y < (ssize_t) image->rows; y++)
{
q=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
sixel_pixels[y*image->columns+x]= ((ssize_t) GetPixelIndex(image,q));
q+=GetPixelChannels(image);
}
}
status = sixel_encode_impl(sixel_pixels,image->columns,image->rows,
sixel_palette,image->colors,-1,output);
sixel_pixels=(unsigned char *) RelinquishMagickMemory(sixel_pixels);
output=(sixel_output_t *) RelinquishMagickMemory(output);
(void) CloseBlob(image);
return(status);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_1415_0 |
crossvul-cpp_data_bad_3846_0 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Implementation of the Transmission Control Protocol(TCP).
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Mark Evans, <evansmp@uhura.aston.ac.uk>
* Corey Minyard <wf-rch!minyard@relay.EU.net>
* Florian La Roche, <flla@stud.uni-sb.de>
* Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
* Linus Torvalds, <torvalds@cs.helsinki.fi>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Matthew Dillon, <dillon@apollo.west.oic.com>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Jorge Cwik, <jorge@laser.satlink.net>
*/
/*
* Changes:
* Pedro Roque : Fast Retransmit/Recovery.
* Two receive queues.
* Retransmit queue handled by TCP.
* Better retransmit timer handling.
* New congestion avoidance.
* Header prediction.
* Variable renaming.
*
* Eric : Fast Retransmit.
* Randy Scott : MSS option defines.
* Eric Schenk : Fixes to slow start algorithm.
* Eric Schenk : Yet another double ACK bug.
* Eric Schenk : Delayed ACK bug fixes.
* Eric Schenk : Floyd style fast retrans war avoidance.
* David S. Miller : Don't allow zero congestion window.
* Eric Schenk : Fix retransmitter so that it sends
* next packet on ack of previous packet.
* Andi Kleen : Moved open_request checking here
* and process RSTs for open_requests.
* Andi Kleen : Better prune_queue, and other fixes.
* Andrey Savochkin: Fix RTT measurements in the presence of
* timestamps.
* Andrey Savochkin: Check sequence numbers correctly when
* removing SACKs due to in sequence incoming
* data segments.
* Andi Kleen: Make sure we never ack data there is not
* enough room for. Also make this condition
* a fatal error if it might still happen.
* Andi Kleen: Add tcp_measure_rcv_mss to make
* connections with MSS<min(MTU,ann. MSS)
* work without delayed acks.
* Andi Kleen: Process packets with PSH set in the
* fast path.
* J Hadi Salim: ECN support
* Andrei Gurtov,
* Pasi Sarolahti,
* Panu Kuhlberg: Experimental audit of TCP (re)transmission
* engine. Lots of bugs are found.
* Pasi Sarolahti: F-RTO for dealing with spurious RTOs
*/
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/sysctl.h>
#include <linux/kernel.h>
#include <net/dst.h>
#include <net/tcp.h>
#include <net/inet_common.h>
#include <linux/ipsec.h>
#include <asm/unaligned.h>
#include <net/netdma.h>
int sysctl_tcp_timestamps __read_mostly = 1;
int sysctl_tcp_window_scaling __read_mostly = 1;
int sysctl_tcp_sack __read_mostly = 1;
int sysctl_tcp_fack __read_mostly = 1;
int sysctl_tcp_reordering __read_mostly = TCP_FASTRETRANS_THRESH;
EXPORT_SYMBOL(sysctl_tcp_reordering);
int sysctl_tcp_ecn __read_mostly = 2;
EXPORT_SYMBOL(sysctl_tcp_ecn);
int sysctl_tcp_dsack __read_mostly = 1;
int sysctl_tcp_app_win __read_mostly = 31;
int sysctl_tcp_adv_win_scale __read_mostly = 2;
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
int sysctl_tcp_stdurg __read_mostly;
int sysctl_tcp_rfc1337 __read_mostly;
int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
int sysctl_tcp_frto __read_mostly = 2;
int sysctl_tcp_frto_response __read_mostly;
int sysctl_tcp_nometrics_save __read_mostly;
int sysctl_tcp_thin_dupack __read_mostly;
int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
int sysctl_tcp_abc __read_mostly;
#define FLAG_DATA 0x01 /* Incoming frame contained data. */
#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
#define FLAG_DATA_ACKED 0x04 /* This ACK acknowledged new data. */
#define FLAG_RETRANS_DATA_ACKED 0x08 /* "" "" some of which was retransmitted. */
#define FLAG_SYN_ACKED 0x10 /* This ACK acknowledged SYN. */
#define FLAG_DATA_SACKED 0x20 /* New SACK. */
#define FLAG_ECE 0x40 /* ECE in this ACK */
#define FLAG_DATA_LOST 0x80 /* SACK detected data lossage. */
#define FLAG_SLOWPATH 0x100 /* Do not skip RFC checks for window update.*/
#define FLAG_ONLY_ORIG_SACKED 0x200 /* SACKs only non-rexmit sent before RTO */
#define FLAG_SND_UNA_ADVANCED 0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
#define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */
#define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */
#define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */
#define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED)
#define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
#define FLAG_CA_ALERT (FLAG_DATA_SACKED|FLAG_ECE)
#define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED)
#define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED)
#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)
#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))
/* Adapt the MSS value used to make delayed ack decision to the
* real world.
*/
static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
{
struct inet_connection_sock *icsk = inet_csk(sk);
const unsigned int lss = icsk->icsk_ack.last_seg_size;
unsigned int len;
icsk->icsk_ack.last_seg_size = 0;
/* skb->len may jitter because of SACKs, even if peer
* sends good full-sized frames.
*/
len = skb_shinfo(skb)->gso_size ? : skb->len;
if (len >= icsk->icsk_ack.rcv_mss) {
icsk->icsk_ack.rcv_mss = len;
} else {
/* Otherwise, we make more careful check taking into account,
* that SACKs block is variable.
*
* "len" is invariant segment length, including TCP header.
*/
len += skb->data - skb_transport_header(skb);
if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) ||
/* If PSH is not set, packet should be
* full sized, provided peer TCP is not badly broken.
* This observation (if it is correct 8)) allows
* to handle super-low mtu links fairly.
*/
(len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
!(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
/* Subtract also invariant (if peer is RFC compliant),
* tcp header plus fixed timestamp option length.
* Resulting "len" is MSS free of SACK jitter.
*/
len -= tcp_sk(sk)->tcp_header_len;
icsk->icsk_ack.last_seg_size = len;
if (len == lss) {
icsk->icsk_ack.rcv_mss = len;
return;
}
}
if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)
icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2;
icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
}
}
static void tcp_incr_quickack(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
unsigned quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
if (quickacks == 0)
quickacks = 2;
if (quickacks > icsk->icsk_ack.quick)
icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
}
static void tcp_enter_quickack_mode(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_incr_quickack(sk);
icsk->icsk_ack.pingpong = 0;
icsk->icsk_ack.ato = TCP_ATO_MIN;
}
/* Send ACKs quickly, if "quick" count is not exhausted
* and the session is not interactive.
*/
static inline int tcp_in_quickack_mode(const struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
return icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong;
}
static inline void TCP_ECN_queue_cwr(struct tcp_sock *tp)
{
if (tp->ecn_flags & TCP_ECN_OK)
tp->ecn_flags |= TCP_ECN_QUEUE_CWR;
}
static inline void TCP_ECN_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb)
{
if (tcp_hdr(skb)->cwr)
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
}
static inline void TCP_ECN_withdraw_cwr(struct tcp_sock *tp)
{
tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
}
static inline void TCP_ECN_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
{
if (!(tp->ecn_flags & TCP_ECN_OK))
return;
switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
case INET_ECN_NOT_ECT:
/* Funny extension: if ECT is not set on a segment,
* and we already seen ECT on a previous segment,
* it is probably a retransmit.
*/
if (tp->ecn_flags & TCP_ECN_SEEN)
tcp_enter_quickack_mode((struct sock *)tp);
break;
case INET_ECN_CE:
tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
/* fallinto */
default:
tp->ecn_flags |= TCP_ECN_SEEN;
}
}
static inline void TCP_ECN_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
{
if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr))
tp->ecn_flags &= ~TCP_ECN_OK;
}
static inline void TCP_ECN_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th)
{
if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr))
tp->ecn_flags &= ~TCP_ECN_OK;
}
static inline int TCP_ECN_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th)
{
if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK))
return 1;
return 0;
}
/* Buffer size and advertised window tuning.
*
* 1. Tuning sk->sk_sndbuf, when connection enters established state.
*/
static void tcp_fixup_sndbuf(struct sock *sk)
{
int sndmem = SKB_TRUESIZE(tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER);
sndmem *= TCP_INIT_CWND;
if (sk->sk_sndbuf < sndmem)
sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
}
/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
*
* All tcp_full_space() is split to two parts: "network" buffer, allocated
* forward and advertised in receiver window (tp->rcv_wnd) and
* "application buffer", required to isolate scheduling/application
* latencies from network.
* window_clamp is maximal advertised window. It can be less than
* tcp_full_space(), in this case tcp_full_space() - window_clamp
* is reserved for "application" buffer. The less window_clamp is
* the smoother our behaviour from viewpoint of network, but the lower
* throughput and the higher sensitivity of the connection to losses. 8)
*
* rcv_ssthresh is more strict window_clamp used at "slow start"
* phase to predict further behaviour of this connection.
* It is used for two goals:
* - to enforce header prediction at sender, even when application
* requires some significant "application buffer". It is check #1.
* - to prevent pruning of receive queue because of misprediction
* of receiver window. Check #2.
*
* The scheme does not work when sender sends good segments opening
* window and then starts to feed us spaghetti. But it should work
* in common situations. Otherwise, we have to rely on queue collapsing.
*/
/* Slow part of check#2. */
static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
/* Optimize this! */
int truesize = tcp_win_from_space(skb->truesize) >> 1;
int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1;
while (tp->rcv_ssthresh <= window) {
if (truesize <= skb->len)
return 2 * inet_csk(sk)->icsk_ack.rcv_mss;
truesize >>= 1;
window >>= 1;
}
return 0;
}
static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
/* Check #1 */
if (tp->rcv_ssthresh < tp->window_clamp &&
(int)tp->rcv_ssthresh < tcp_space(sk) &&
!tcp_memory_pressure) {
int incr;
/* Check #2. Increase window, if skb with such overhead
* will fit to rcvbuf in future.
*/
if (tcp_win_from_space(skb->truesize) <= skb->len)
incr = 2 * tp->advmss;
else
incr = __tcp_grow_window(sk, skb);
if (incr) {
tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr,
tp->window_clamp);
inet_csk(sk)->icsk_ack.quick |= 1;
}
}
}
/* 3. Tuning rcvbuf, when connection enters established state. */
static void tcp_fixup_rcvbuf(struct sock *sk)
{
u32 mss = tcp_sk(sk)->advmss;
u32 icwnd = TCP_DEFAULT_INIT_RCVWND;
int rcvmem;
/* Limit to 10 segments if mss <= 1460,
* or 14600/mss segments, with a minimum of two segments.
*/
if (mss > 1460)
icwnd = max_t(u32, (1460 * TCP_DEFAULT_INIT_RCVWND) / mss, 2);
rcvmem = SKB_TRUESIZE(mss + MAX_TCP_HEADER);
while (tcp_win_from_space(rcvmem) < mss)
rcvmem += 128;
rcvmem *= icwnd;
if (sk->sk_rcvbuf < rcvmem)
sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]);
}
/* 4. Try to fixup all. It is made immediately after connection enters
* established state.
*/
static void tcp_init_buffer_space(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
int maxwin;
if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
tcp_fixup_rcvbuf(sk);
if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK))
tcp_fixup_sndbuf(sk);
tp->rcvq_space.space = tp->rcv_wnd;
maxwin = tcp_full_space(sk);
if (tp->window_clamp >= maxwin) {
tp->window_clamp = maxwin;
if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss)
tp->window_clamp = max(maxwin -
(maxwin >> sysctl_tcp_app_win),
4 * tp->advmss);
}
/* Force reservation of one segment. */
if (sysctl_tcp_app_win &&
tp->window_clamp > 2 * tp->advmss &&
tp->window_clamp + tp->advmss > maxwin)
tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss);
tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
/* 5. Recalculate window clamp after socket hit its memory bounds. */
static void tcp_clamp_window(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_ack.quick = 0;
if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
!tcp_memory_pressure &&
atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
sysctl_tcp_rmem[2]);
}
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss);
}
/* Initialize RCV_MSS value.
* RCV_MSS is an our guess about MSS used by the peer.
* We haven't any direct information about the MSS.
* It's better to underestimate the RCV_MSS rather than overestimate.
* Overestimations make us ACKing less frequently than needed.
* Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
*/
void tcp_initialize_rcv_mss(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache);
hint = min(hint, tp->rcv_wnd / 2);
hint = min(hint, TCP_MSS_DEFAULT);
hint = max(hint, TCP_MIN_MSS);
inet_csk(sk)->icsk_ack.rcv_mss = hint;
}
EXPORT_SYMBOL(tcp_initialize_rcv_mss);
/* Receiver "autotuning" code.
*
* The algorithm for RTT estimation w/o timestamps is based on
* Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL.
* <http://public.lanl.gov/radiant/pubs.html#DRS>
*
* More detail on this code can be found at
* <http://staff.psc.edu/jheffner/>,
* though this reference is out of date. A new paper
* is pending.
*/
static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
{
u32 new_sample = tp->rcv_rtt_est.rtt;
long m = sample;
if (m == 0)
m = 1;
if (new_sample != 0) {
/* If we sample in larger samples in the non-timestamp
* case, we could grossly overestimate the RTT especially
* with chatty applications or bulk transfer apps which
* are stalled on filesystem I/O.
*
* Also, since we are only going for a minimum in the
* non-timestamp case, we do not smooth things out
* else with timestamps disabled convergence takes too
* long.
*/
if (!win_dep) {
m -= (new_sample >> 3);
new_sample += m;
} else if (m < new_sample)
new_sample = m << 3;
} else {
/* No previous measure. */
new_sample = m << 3;
}
if (tp->rcv_rtt_est.rtt != new_sample)
tp->rcv_rtt_est.rtt = new_sample;
}
static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp)
{
if (tp->rcv_rtt_est.time == 0)
goto new_measure;
if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq))
return;
tcp_rcv_rtt_update(tp, jiffies - tp->rcv_rtt_est.time, 1);
new_measure:
tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd;
tp->rcv_rtt_est.time = tcp_time_stamp;
}
static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tp->rx_opt.rcv_tsecr &&
(TCP_SKB_CB(skb)->end_seq -
TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss))
tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0);
}
/*
* This function should be called every time data is copied to user space.
* It calculates the appropriate TCP receive buffer space.
*/
void tcp_rcv_space_adjust(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
int time;
int space;
if (tp->rcvq_space.time == 0)
goto new_measure;
time = tcp_time_stamp - tp->rcvq_space.time;
if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0)
return;
space = 2 * (tp->copied_seq - tp->rcvq_space.seq);
space = max(tp->rcvq_space.space, space);
if (tp->rcvq_space.space != space) {
int rcvmem;
tp->rcvq_space.space = space;
if (sysctl_tcp_moderate_rcvbuf &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
int new_clamp = space;
/* Receive space grows, normalize in order to
* take into account packet headers and sk_buff
* structure overhead.
*/
space /= tp->advmss;
if (!space)
space = 1;
rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER);
while (tcp_win_from_space(rcvmem) < tp->advmss)
rcvmem += 128;
space *= rcvmem;
space = min(space, sysctl_tcp_rmem[2]);
if (space > sk->sk_rcvbuf) {
sk->sk_rcvbuf = space;
/* Make the window clamp follow along. */
tp->window_clamp = new_clamp;
}
}
}
new_measure:
tp->rcvq_space.seq = tp->copied_seq;
tp->rcvq_space.time = tcp_time_stamp;
}
/* There is something which you must keep in mind when you analyze the
* behavior of the tp->ato delayed ack timeout interval. When a
* connection starts up, we want to ack as quickly as possible. The
* problem is that "good" TCP's do slow start at the beginning of data
* transmission. The means that until we send the first few ACK's the
* sender will sit on his end and only queue most of his data, because
* he can only send snd_cwnd unacked packets at any given time. For
* each ACK we send, he increments snd_cwnd and transmits more of his
* queue. -DaveM
*/
static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
u32 now;
inet_csk_schedule_ack(sk);
tcp_measure_rcv_mss(sk, skb);
tcp_rcv_rtt_measure(tp);
now = tcp_time_stamp;
if (!icsk->icsk_ack.ato) {
/* The _first_ data packet received, initialize
* delayed ACK engine.
*/
tcp_incr_quickack(sk);
icsk->icsk_ack.ato = TCP_ATO_MIN;
} else {
int m = now - icsk->icsk_ack.lrcvtime;
if (m <= TCP_ATO_MIN / 2) {
/* The fastest case is the first. */
icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2;
} else if (m < icsk->icsk_ack.ato) {
icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m;
if (icsk->icsk_ack.ato > icsk->icsk_rto)
icsk->icsk_ack.ato = icsk->icsk_rto;
} else if (m > icsk->icsk_rto) {
/* Too long gap. Apparently sender failed to
* restart window, so that we send ACKs quickly.
*/
tcp_incr_quickack(sk);
sk_mem_reclaim(sk);
}
}
icsk->icsk_ack.lrcvtime = now;
TCP_ECN_check_ce(tp, skb);
if (skb->len >= 128)
tcp_grow_window(sk, skb);
}
/* Called to compute a smoothed rtt estimate. The data fed to this
* routine either comes from timestamps, or from segments that were
* known _not_ to have been retransmitted [see Karn/Partridge
* Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88
* piece by Van Jacobson.
* NOTE: the next three routines used to be one big routine.
* To save cycles in the RFC 1323 implementation it was better to break
* it up into three procedures. -- erics
*/
static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
{
struct tcp_sock *tp = tcp_sk(sk);
long m = mrtt; /* RTT */
/* The following amusing code comes from Jacobson's
* article in SIGCOMM '88. Note that rtt and mdev
* are scaled versions of rtt and mean deviation.
* This is designed to be as fast as possible
* m stands for "measurement".
*
* On a 1990 paper the rto value is changed to:
* RTO = rtt + 4 * mdev
*
* Funny. This algorithm seems to be very broken.
* These formulae increase RTO, when it should be decreased, increase
* too slowly, when it should be increased quickly, decrease too quickly
* etc. I guess in BSD RTO takes ONE value, so that it is absolutely
* does not matter how to _calculate_ it. Seems, it was trap
* that VJ failed to avoid. 8)
*/
if (m == 0)
m = 1;
if (tp->srtt != 0) {
m -= (tp->srtt >> 3); /* m is now error in rtt est */
tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */
if (m < 0) {
m = -m; /* m is now abs(error) */
m -= (tp->mdev >> 2); /* similar update on mdev */
/* This is similar to one of Eifel findings.
* Eifel blocks mdev updates when rtt decreases.
* This solution is a bit different: we use finer gain
* for mdev in this case (alpha*beta).
* Like Eifel it also prevents growth of rto,
* but also it limits too fast rto decreases,
* happening in pure Eifel.
*/
if (m > 0)
m >>= 3;
} else {
m -= (tp->mdev >> 2); /* similar update on mdev */
}
tp->mdev += m; /* mdev = 3/4 mdev + 1/4 new */
if (tp->mdev > tp->mdev_max) {
tp->mdev_max = tp->mdev;
if (tp->mdev_max > tp->rttvar)
tp->rttvar = tp->mdev_max;
}
if (after(tp->snd_una, tp->rtt_seq)) {
if (tp->mdev_max < tp->rttvar)
tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2;
tp->rtt_seq = tp->snd_nxt;
tp->mdev_max = tcp_rto_min(sk);
}
} else {
/* no previous measure. */
tp->srtt = m << 3; /* take the measured time to be rtt */
tp->mdev = m << 1; /* make sure rto = 3*rtt */
tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
tp->rtt_seq = tp->snd_nxt;
}
}
/* Calculate rto without backoff. This is the second half of Van Jacobson's
* routine referred to above.
*/
static inline void tcp_set_rto(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
/* Old crap is replaced with new one. 8)
*
* More seriously:
* 1. If rtt variance happened to be less 50msec, it is hallucination.
* It cannot be less due to utterly erratic ACK generation made
* at least by solaris and freebsd. "Erratic ACKs" has _nothing_
* to do with delayed acks, because at cwnd>2 true delack timeout
* is invisible. Actually, Linux-2.4 also generates erratic
* ACKs in some circumstances.
*/
inet_csk(sk)->icsk_rto = __tcp_set_rto(tp);
/* 2. Fixups made earlier cannot be right.
* If we do not estimate RTO correctly without them,
* all the algo is pure shit and should be replaced
* with correct one. It is exactly, which we pretend to do.
*/
/* NOTE: clamping at TCP_RTO_MIN is not required, current algo
* guarantees that rto is higher.
*/
tcp_bound_rto(sk);
}
/* Save metrics learned by this TCP session.
This function is called only, when TCP finishes successfully
i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE.
*/
void tcp_update_metrics(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct dst_entry *dst = __sk_dst_get(sk);
if (sysctl_tcp_nometrics_save)
return;
dst_confirm(dst);
if (dst && (dst->flags & DST_HOST)) {
const struct inet_connection_sock *icsk = inet_csk(sk);
int m;
unsigned long rtt;
if (icsk->icsk_backoff || !tp->srtt) {
/* This session failed to estimate rtt. Why?
* Probably, no packets returned in time.
* Reset our results.
*/
if (!(dst_metric_locked(dst, RTAX_RTT)))
dst_metric_set(dst, RTAX_RTT, 0);
return;
}
rtt = dst_metric_rtt(dst, RTAX_RTT);
m = rtt - tp->srtt;
/* If newly calculated rtt larger than stored one,
* store new one. Otherwise, use EWMA. Remember,
* rtt overestimation is always better than underestimation.
*/
if (!(dst_metric_locked(dst, RTAX_RTT))) {
if (m <= 0)
set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt);
else
set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3));
}
if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
unsigned long var;
if (m < 0)
m = -m;
/* Scale deviation to rttvar fixed point */
m >>= 1;
if (m < tp->mdev)
m = tp->mdev;
var = dst_metric_rtt(dst, RTAX_RTTVAR);
if (m >= var)
var = m;
else
var -= (var - m) >> 2;
set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
}
if (tcp_in_initial_slowstart(tp)) {
/* Slow start still did not finish. */
if (dst_metric(dst, RTAX_SSTHRESH) &&
!dst_metric_locked(dst, RTAX_SSTHRESH) &&
(tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1);
if (!dst_metric_locked(dst, RTAX_CWND) &&
tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd);
} else if (tp->snd_cwnd > tp->snd_ssthresh &&
icsk->icsk_ca_state == TCP_CA_Open) {
/* Cong. avoidance phase, cwnd is reliable. */
if (!dst_metric_locked(dst, RTAX_SSTHRESH))
dst_metric_set(dst, RTAX_SSTHRESH,
max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
if (!dst_metric_locked(dst, RTAX_CWND))
dst_metric_set(dst, RTAX_CWND,
(dst_metric(dst, RTAX_CWND) +
tp->snd_cwnd) >> 1);
} else {
/* Else slow start did not finish, cwnd is non-sense,
ssthresh may be also invalid.
*/
if (!dst_metric_locked(dst, RTAX_CWND))
dst_metric_set(dst, RTAX_CWND,
(dst_metric(dst, RTAX_CWND) +
tp->snd_ssthresh) >> 1);
if (dst_metric(dst, RTAX_SSTHRESH) &&
!dst_metric_locked(dst, RTAX_SSTHRESH) &&
tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh);
}
if (!dst_metric_locked(dst, RTAX_REORDERING)) {
if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
tp->reordering != sysctl_tcp_reordering)
dst_metric_set(dst, RTAX_REORDERING, tp->reordering);
}
}
}
__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
{
__u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
if (!cwnd)
cwnd = TCP_INIT_CWND;
return min_t(__u32, cwnd, tp->snd_cwnd_clamp);
}
/* Set slow start threshold and cwnd not falling to slow start */
void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
{
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
tp->prior_ssthresh = 0;
tp->bytes_acked = 0;
if (icsk->icsk_ca_state < TCP_CA_CWR) {
tp->undo_marker = 0;
if (set_ssthresh)
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
tp->snd_cwnd = min(tp->snd_cwnd,
tcp_packets_in_flight(tp) + 1U);
tp->snd_cwnd_cnt = 0;
tp->high_seq = tp->snd_nxt;
tp->snd_cwnd_stamp = tcp_time_stamp;
TCP_ECN_queue_cwr(tp);
tcp_set_ca_state(sk, TCP_CA_CWR);
}
}
/*
* Packet counting of FACK is based on in-order assumptions, therefore TCP
* disables it when reordering is detected
*/
static void tcp_disable_fack(struct tcp_sock *tp)
{
/* RFC3517 uses different metric in lost marker => reset on change */
if (tcp_is_fack(tp))
tp->lost_skb_hint = NULL;
tp->rx_opt.sack_ok &= ~2;
}
/* Take a notice that peer is sending D-SACKs */
static void tcp_dsack_seen(struct tcp_sock *tp)
{
tp->rx_opt.sack_ok |= 4;
}
/* Initialize metrics on socket. */
static void tcp_init_metrics(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct dst_entry *dst = __sk_dst_get(sk);
if (dst == NULL)
goto reset;
dst_confirm(dst);
if (dst_metric_locked(dst, RTAX_CWND))
tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND);
if (dst_metric(dst, RTAX_SSTHRESH)) {
tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
tp->snd_ssthresh = tp->snd_cwnd_clamp;
} else {
/* ssthresh may have been reduced unnecessarily during.
* 3WHS. Restore it back to its initial default.
*/
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
}
if (dst_metric(dst, RTAX_REORDERING) &&
tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
tcp_disable_fack(tp);
tp->reordering = dst_metric(dst, RTAX_REORDERING);
}
if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0)
goto reset;
/* Initial rtt is determined from SYN,SYN-ACK.
* The segment is small and rtt may appear much
* less than real one. Use per-dst memory
* to make it more realistic.
*
* A bit of theory. RTT is time passed after "normal" sized packet
* is sent until it is ACKed. In normal circumstances sending small
* packets force peer to delay ACKs and calculation is correct too.
* The algorithm is adaptive and, provided we follow specs, it
* NEVER underestimate RTT. BUT! If peer tries to make some clever
* tricks sort of "quick acks" for time long enough to decrease RTT
* to low value, and then abruptly stops to do it and starts to delay
* ACKs, wait for troubles.
*/
if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) {
tp->srtt = dst_metric_rtt(dst, RTAX_RTT);
tp->rtt_seq = tp->snd_nxt;
}
if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) {
tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR);
tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
}
tcp_set_rto(sk);
reset:
if (tp->srtt == 0) {
/* RFC2988bis: We've failed to get a valid RTT sample from
* 3WHS. This is most likely due to retransmission,
* including spurious one. Reset the RTO back to 3secs
* from the more aggressive 1sec to avoid more spurious
* retransmission.
*/
tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
}
/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
* retransmitted. In light of RFC2988bis' more aggressive 1sec
* initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
* retransmission has occurred.
*/
if (tp->total_retrans > 1)
tp->snd_cwnd = 1;
else
tp->snd_cwnd = tcp_init_cwnd(tp, dst);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
static void tcp_update_reordering(struct sock *sk, const int metric,
const int ts)
{
struct tcp_sock *tp = tcp_sk(sk);
if (metric > tp->reordering) {
int mib_idx;
tp->reordering = min(TCP_MAX_REORDERING, metric);
/* This exciting event is worth to be remembered. 8) */
if (ts)
mib_idx = LINUX_MIB_TCPTSREORDER;
else if (tcp_is_reno(tp))
mib_idx = LINUX_MIB_TCPRENOREORDER;
else if (tcp_is_fack(tp))
mib_idx = LINUX_MIB_TCPFACKREORDER;
else
mib_idx = LINUX_MIB_TCPSACKREORDER;
NET_INC_STATS_BH(sock_net(sk), mib_idx);
#if FASTRETRANS_DEBUG > 1
printk(KERN_DEBUG "Disorder%d %d %u f%u s%u rr%d\n",
tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
tp->reordering,
tp->fackets_out,
tp->sacked_out,
tp->undo_marker ? tp->undo_retrans : 0);
#endif
tcp_disable_fack(tp);
}
}
/* This must be called before lost_out is incremented */
static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
{
if ((tp->retransmit_skb_hint == NULL) ||
before(TCP_SKB_CB(skb)->seq,
TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
tp->retransmit_skb_hint = skb;
if (!tp->lost_out ||
after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high))
tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
}
static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb)
{
if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
tcp_verify_retransmit_hint(tp, skb);
tp->lost_out += tcp_skb_pcount(skb);
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
}
}
static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
struct sk_buff *skb)
{
tcp_verify_retransmit_hint(tp, skb);
if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
tp->lost_out += tcp_skb_pcount(skb);
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
}
}
/* This procedure tags the retransmission queue when SACKs arrive.
*
* We have three tag bits: SACKED(S), RETRANS(R) and LOST(L).
* Packets in queue with these bits set are counted in variables
* sacked_out, retrans_out and lost_out, correspondingly.
*
* Valid combinations are:
* Tag InFlight Description
* 0 1 - orig segment is in flight.
* S 0 - nothing flies, orig reached receiver.
* L 0 - nothing flies, orig lost by net.
* R 2 - both orig and retransmit are in flight.
* L|R 1 - orig is lost, retransmit is in flight.
* S|R 1 - orig reached receiver, retrans is still in flight.
* (L|S|R is logically valid, it could occur when L|R is sacked,
* but it is equivalent to plain S and code short-curcuits it to S.
* L|S is logically invalid, it would mean -1 packet in flight 8))
*
* These 6 states form finite state machine, controlled by the following events:
* 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
* 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
* 3. Loss detection event of one of three flavors:
* A. Scoreboard estimator decided the packet is lost.
* A'. Reno "three dupacks" marks head of queue lost.
* A''. Its FACK modfication, head until snd.fack is lost.
* B. SACK arrives sacking data transmitted after never retransmitted
* hole was sent out.
* C. SACK arrives sacking SND.NXT at the moment, when the
* segment was retransmitted.
* 4. D-SACK added new rule: D-SACK changes any tag to S.
*
* It is pleasant to note, that state diagram turns out to be commutative,
* so that we are allowed not to be bothered by order of our actions,
* when multiple events arrive simultaneously. (see the function below).
*
* Reordering detection.
* --------------------
* Reordering metric is maximal distance, which a packet can be displaced
* in packet stream. With SACKs we can estimate it:
*
* 1. SACK fills old hole and the corresponding segment was not
* ever retransmitted -> reordering. Alas, we cannot use it
* when segment was retransmitted.
* 2. The last flaw is solved with D-SACK. D-SACK arrives
* for retransmitted and already SACKed segment -> reordering..
* Both of these heuristics are not used in Loss state, when we cannot
* account for retransmits accurately.
*
* SACK block validation.
* ----------------------
*
* SACK block range validation checks that the received SACK block fits to
* the expected sequence limits, i.e., it is between SND.UNA and SND.NXT.
* Note that SND.UNA is not included to the range though being valid because
* it means that the receiver is rather inconsistent with itself reporting
* SACK reneging when it should advance SND.UNA. Such SACK block this is
* perfectly valid, however, in light of RFC2018 which explicitly states
* that "SACK block MUST reflect the newest segment. Even if the newest
* segment is going to be discarded ...", not that it looks very clever
* in case of head skb. Due to potentional receiver driven attacks, we
* choose to avoid immediate execution of a walk in write queue due to
* reneging and defer head skb's loss recovery to standard loss recovery
* procedure that will eventually trigger (nothing forbids us doing this).
*
* Implements also blockage to start_seq wrap-around. Problem lies in the
* fact that though start_seq (s) is before end_seq (i.e., not reversed),
* there's no guarantee that it will be before snd_nxt (n). The problem
* happens when start_seq resides between end_seq wrap (e_w) and snd_nxt
* wrap (s_w):
*
* <- outs wnd -> <- wrapzone ->
* u e n u_w e_w s n_w
* | | | | | | |
* |<------------+------+----- TCP seqno space --------------+---------->|
* ...-- <2^31 ->| |<--------...
* ...---- >2^31 ------>| |<--------...
*
* Current code wouldn't be vulnerable but it's better still to discard such
* crazy SACK blocks. Doing this check for start_seq alone closes somewhat
* similar case (end_seq after snd_nxt wrap) as earlier reversed check in
* snd_nxt wrap -> snd_una region will then become "well defined", i.e.,
* equal to the ideal case (infinite seqno space without wrap caused issues).
*
* With D-SACK the lower bound is extended to cover sequence space below
* SND.UNA down to undo_marker, which is the last point of interest. Yet
* again, D-SACK block must not to go across snd_una (for the same reason as
* for the normal SACK blocks, explained above). But there all simplicity
* ends, TCP might receive valid D-SACKs below that. As long as they reside
* fully below undo_marker they do not affect behavior in anyway and can
* therefore be safely ignored. In rare cases (which are more or less
* theoretical ones), the D-SACK will nicely cross that boundary due to skb
* fragmentation and packet reordering past skb's retransmission. To consider
* them correctly, the acceptable range must be extended even more though
* the exact amount is rather hard to quantify. However, tp->max_window can
* be used as an exaggerated estimate.
*/
static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
u32 start_seq, u32 end_seq)
{
/* Too far in future, or reversed (interpretation is ambiguous) */
if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq))
return 0;
/* Nasty start_seq wrap-around check (see comments above) */
if (!before(start_seq, tp->snd_nxt))
return 0;
/* In outstanding window? ...This is valid exit for D-SACKs too.
* start_seq == snd_una is non-sensical (see comments above)
*/
if (after(start_seq, tp->snd_una))
return 1;
if (!is_dsack || !tp->undo_marker)
return 0;
/* ...Then it's D-SACK, and must reside below snd_una completely */
if (after(end_seq, tp->snd_una))
return 0;
if (!before(start_seq, tp->undo_marker))
return 1;
/* Too old */
if (!after(end_seq, tp->undo_marker))
return 0;
/* Undo_marker boundary crossing (overestimates a lot). Known already:
* start_seq < undo_marker and end_seq >= undo_marker.
*/
return !before(start_seq, end_seq - tp->max_window);
}
/* Check for lost retransmit. This superb idea is borrowed from "ratehalving".
* Event "C". Later note: FACK people cheated me again 8), we have to account
* for reordering! Ugly, but should help.
*
* Search retransmitted skbs from write_queue that were sent when snd_nxt was
* less than what is now known to be received by the other end (derived from
* highest SACK block). Also calculate the lowest snd_nxt among the remaining
* retransmitted skbs to avoid some costly processing per ACKs.
*/
static void tcp_mark_lost_retrans(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int cnt = 0;
u32 new_low_seq = tp->snd_nxt;
u32 received_upto = tcp_highest_sack_seq(tp);
if (!tcp_is_fack(tp) || !tp->retrans_out ||
!after(received_upto, tp->lost_retrans_low) ||
icsk->icsk_ca_state != TCP_CA_Recovery)
return;
tcp_for_write_queue(skb, sk) {
u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
if (skb == tcp_send_head(sk))
break;
if (cnt == tp->retrans_out)
break;
if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
continue;
if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
continue;
/* TODO: We would like to get rid of tcp_is_fack(tp) only
* constraint here (see above) but figuring out that at
* least tp->reordering SACK blocks reside between ack_seq
* and received_upto is not easy task to do cheaply with
* the available datastructures.
*
* Whether FACK should check here for tp->reordering segs
* in-between one could argue for either way (it would be
* rather simple to implement as we could count fack_count
* during the walk and do tp->fackets_out - fack_count).
*/
if (after(received_upto, ack_seq)) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= tcp_skb_pcount(skb);
tcp_skb_mark_lost_uncond_verify(tp, skb);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
} else {
if (before(ack_seq, new_low_seq))
new_low_seq = ack_seq;
cnt += tcp_skb_pcount(skb);
}
}
if (tp->retrans_out)
tp->lost_retrans_low = new_low_seq;
}
static int tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
struct tcp_sack_block_wire *sp, int num_sacks,
u32 prior_snd_una)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq);
u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq);
int dup_sack = 0;
if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
dup_sack = 1;
tcp_dsack_seen(tp);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
} else if (num_sacks > 1) {
u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
if (!after(end_seq_0, end_seq_1) &&
!before(start_seq_0, start_seq_1)) {
dup_sack = 1;
tcp_dsack_seen(tp);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPDSACKOFORECV);
}
}
/* D-SACK for already forgotten data... Do dumb counting. */
if (dup_sack && tp->undo_marker && tp->undo_retrans &&
!after(end_seq_0, prior_snd_una) &&
after(end_seq_0, tp->undo_marker))
tp->undo_retrans--;
return dup_sack;
}
struct tcp_sacktag_state {
int reord;
int fack_count;
int flag;
};
/* Check if skb is fully within the SACK block. In presence of GSO skbs,
* the incoming SACK may not exactly match but we can find smaller MSS
* aligned portion of it that matches. Therefore we might need to fragment
* which may fail and creates some hassle (caller must handle error case
* returns).
*
* FIXME: this could be merged to shift decision code
*/
static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
u32 start_seq, u32 end_seq)
{
int in_sack, err;
unsigned int pkt_len;
unsigned int mss;
in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
!before(end_seq, TCP_SKB_CB(skb)->end_seq);
if (tcp_skb_pcount(skb) > 1 && !in_sack &&
after(TCP_SKB_CB(skb)->end_seq, start_seq)) {
mss = tcp_skb_mss(skb);
in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
if (!in_sack) {
pkt_len = start_seq - TCP_SKB_CB(skb)->seq;
if (pkt_len < mss)
pkt_len = mss;
} else {
pkt_len = end_seq - TCP_SKB_CB(skb)->seq;
if (pkt_len < mss)
return -EINVAL;
}
/* Round if necessary so that SACKs cover only full MSSes
* and/or the remaining small portion (if present)
*/
if (pkt_len > mss) {
unsigned int new_len = (pkt_len / mss) * mss;
if (!in_sack && new_len < pkt_len) {
new_len += mss;
if (new_len > skb->len)
return 0;
}
pkt_len = new_len;
}
err = tcp_fragment(sk, skb, pkt_len, mss);
if (err < 0)
return err;
}
return in_sack;
}
static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk,
struct tcp_sacktag_state *state,
int dup_sack, int pcount)
{
struct tcp_sock *tp = tcp_sk(sk);
u8 sacked = TCP_SKB_CB(skb)->sacked;
int fack_count = state->fack_count;
/* Account D-SACK for retransmitted packet. */
if (dup_sack && (sacked & TCPCB_RETRANS)) {
if (tp->undo_marker && tp->undo_retrans &&
after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
tp->undo_retrans--;
if (sacked & TCPCB_SACKED_ACKED)
state->reord = min(fack_count, state->reord);
}
/* Nothing to do; acked frame is about to be dropped (was ACKed). */
if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
return sacked;
if (!(sacked & TCPCB_SACKED_ACKED)) {
if (sacked & TCPCB_SACKED_RETRANS) {
/* If the segment is not tagged as lost,
* we do not clear RETRANS, believing
* that retransmission is still in flight.
*/
if (sacked & TCPCB_LOST) {
sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
tp->lost_out -= pcount;
tp->retrans_out -= pcount;
}
} else {
if (!(sacked & TCPCB_RETRANS)) {
/* New sack for not retransmitted frame,
* which was in hole. It is reordering.
*/
if (before(TCP_SKB_CB(skb)->seq,
tcp_highest_sack_seq(tp)))
state->reord = min(fack_count,
state->reord);
/* SACK enhanced F-RTO (RFC4138; Appendix B) */
if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
state->flag |= FLAG_ONLY_ORIG_SACKED;
}
if (sacked & TCPCB_LOST) {
sacked &= ~TCPCB_LOST;
tp->lost_out -= pcount;
}
}
sacked |= TCPCB_SACKED_ACKED;
state->flag |= FLAG_DATA_SACKED;
tp->sacked_out += pcount;
fack_count += pcount;
/* Lost marker hint past SACKed? Tweak RFC3517 cnt */
if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
before(TCP_SKB_CB(skb)->seq,
TCP_SKB_CB(tp->lost_skb_hint)->seq))
tp->lost_cnt_hint += pcount;
if (fack_count > tp->fackets_out)
tp->fackets_out = fack_count;
}
/* D-SACK. We can detect redundant retransmission in S|R and plain R
* frames and clear it. undo_retrans is decreased above, L|R frames
* are accounted above as well.
*/
if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) {
sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= pcount;
}
return sacked;
}
static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
struct tcp_sacktag_state *state,
unsigned int pcount, int shifted, int mss,
int dup_sack)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *prev = tcp_write_queue_prev(sk, skb);
BUG_ON(!pcount);
if (skb == tp->lost_skb_hint)
tp->lost_cnt_hint += pcount;
TCP_SKB_CB(prev)->end_seq += shifted;
TCP_SKB_CB(skb)->seq += shifted;
skb_shinfo(prev)->gso_segs += pcount;
BUG_ON(skb_shinfo(skb)->gso_segs < pcount);
skb_shinfo(skb)->gso_segs -= pcount;
/* When we're adding to gso_segs == 1, gso_size will be zero,
* in theory this shouldn't be necessary but as long as DSACK
* code can come after this skb later on it's better to keep
* setting gso_size to something.
*/
if (!skb_shinfo(prev)->gso_size) {
skb_shinfo(prev)->gso_size = mss;
skb_shinfo(prev)->gso_type = sk->sk_gso_type;
}
/* CHECKME: To clear or not to clear? Mimics normal skb currently */
if (skb_shinfo(skb)->gso_segs <= 1) {
skb_shinfo(skb)->gso_size = 0;
skb_shinfo(skb)->gso_type = 0;
}
/* We discard results */
tcp_sacktag_one(skb, sk, state, dup_sack, pcount);
/* Difference in this won't matter, both ACKed by the same cumul. ACK */
TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
if (skb->len > 0) {
BUG_ON(!tcp_skb_pcount(skb));
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED);
return 0;
}
/* Whole SKB was eaten :-) */
if (skb == tp->retransmit_skb_hint)
tp->retransmit_skb_hint = prev;
if (skb == tp->scoreboard_skb_hint)
tp->scoreboard_skb_hint = prev;
if (skb == tp->lost_skb_hint) {
tp->lost_skb_hint = prev;
tp->lost_cnt_hint -= tcp_skb_pcount(prev);
}
TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(prev)->tcp_flags;
if (skb == tcp_highest_sack(sk))
tcp_advance_highest_sack(sk, skb);
tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED);
return 1;
}
/* I wish gso_size would have a bit more sane initialization than
* something-or-zero which complicates things
*/
static int tcp_skb_seglen(const struct sk_buff *skb)
{
return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb);
}
/* Shifting pages past head area doesn't work */
static int skb_can_shift(const struct sk_buff *skb)
{
return !skb_headlen(skb) && skb_is_nonlinear(skb);
}
/* Try collapsing SACK blocks spanning across multiple skbs to a single
* skb.
*/
static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
struct tcp_sacktag_state *state,
u32 start_seq, u32 end_seq,
int dup_sack)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *prev;
int mss;
int pcount = 0;
int len;
int in_sack;
if (!sk_can_gso(sk))
goto fallback;
/* Normally R but no L won't result in plain S */
if (!dup_sack &&
(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS)
goto fallback;
if (!skb_can_shift(skb))
goto fallback;
/* This frame is about to be dropped (was ACKed). */
if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
goto fallback;
/* Can only happen with delayed DSACK + discard craziness */
if (unlikely(skb == tcp_write_queue_head(sk)))
goto fallback;
prev = tcp_write_queue_prev(sk, skb);
if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED)
goto fallback;
in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) &&
!before(end_seq, TCP_SKB_CB(skb)->end_seq);
if (in_sack) {
len = skb->len;
pcount = tcp_skb_pcount(skb);
mss = tcp_skb_seglen(skb);
/* TODO: Fix DSACKs to not fragment already SACKed and we can
* drop this restriction as unnecessary
*/
if (mss != tcp_skb_seglen(prev))
goto fallback;
} else {
if (!after(TCP_SKB_CB(skb)->end_seq, start_seq))
goto noop;
/* CHECKME: This is non-MSS split case only?, this will
* cause skipped skbs due to advancing loop btw, original
* has that feature too
*/
if (tcp_skb_pcount(skb) <= 1)
goto noop;
in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq);
if (!in_sack) {
/* TODO: head merge to next could be attempted here
* if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)),
* though it might not be worth of the additional hassle
*
* ...we can probably just fallback to what was done
* previously. We could try merging non-SACKed ones
* as well but it probably isn't going to buy off
* because later SACKs might again split them, and
* it would make skb timestamp tracking considerably
* harder problem.
*/
goto fallback;
}
len = end_seq - TCP_SKB_CB(skb)->seq;
BUG_ON(len < 0);
BUG_ON(len > skb->len);
/* MSS boundaries should be honoured or else pcount will
* severely break even though it makes things bit trickier.
* Optimize common case to avoid most of the divides
*/
mss = tcp_skb_mss(skb);
/* TODO: Fix DSACKs to not fragment already SACKed and we can
* drop this restriction as unnecessary
*/
if (mss != tcp_skb_seglen(prev))
goto fallback;
if (len == mss) {
pcount = 1;
} else if (len < mss) {
goto noop;
} else {
pcount = len / mss;
len = pcount * mss;
}
}
if (!skb_shift(prev, skb, len))
goto fallback;
if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
goto out;
/* Hole filled allows collapsing with the next as well, this is very
* useful when hole on every nth skb pattern happens
*/
if (prev == tcp_write_queue_tail(sk))
goto out;
skb = tcp_write_queue_next(sk, prev);
if (!skb_can_shift(skb) ||
(skb == tcp_send_head(sk)) ||
((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) ||
(mss != tcp_skb_seglen(skb)))
goto out;
len = skb->len;
if (skb_shift(prev, skb, len)) {
pcount += tcp_skb_pcount(skb);
tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0);
}
out:
state->fack_count += pcount;
return prev;
noop:
return skb;
fallback:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
return NULL;
}
static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
struct tcp_sack_block *next_dup,
struct tcp_sacktag_state *state,
u32 start_seq, u32 end_seq,
int dup_sack_in)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *tmp;
tcp_for_write_queue_from(skb, sk) {
int in_sack = 0;
int dup_sack = dup_sack_in;
if (skb == tcp_send_head(sk))
break;
/* queue is in-order => we can short-circuit the walk early */
if (!before(TCP_SKB_CB(skb)->seq, end_seq))
break;
if ((next_dup != NULL) &&
before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
in_sack = tcp_match_skb_to_sack(sk, skb,
next_dup->start_seq,
next_dup->end_seq);
if (in_sack > 0)
dup_sack = 1;
}
/* skb reference here is a bit tricky to get right, since
* shifting can eat and free both this skb and the next,
* so not even _safe variant of the loop is enough.
*/
if (in_sack <= 0) {
tmp = tcp_shift_skb_data(sk, skb, state,
start_seq, end_seq, dup_sack);
if (tmp != NULL) {
if (tmp != skb) {
skb = tmp;
continue;
}
in_sack = 0;
} else {
in_sack = tcp_match_skb_to_sack(sk, skb,
start_seq,
end_seq);
}
}
if (unlikely(in_sack < 0))
break;
if (in_sack) {
TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk,
state,
dup_sack,
tcp_skb_pcount(skb));
if (!before(TCP_SKB_CB(skb)->seq,
tcp_highest_sack_seq(tp)))
tcp_advance_highest_sack(sk, skb);
}
state->fack_count += tcp_skb_pcount(skb);
}
return skb;
}
/* Avoid all extra work that is being done by sacktag while walking in
* a normal way
*/
static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
struct tcp_sacktag_state *state,
u32 skip_to_seq)
{
tcp_for_write_queue_from(skb, sk) {
if (skb == tcp_send_head(sk))
break;
if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
break;
state->fack_count += tcp_skb_pcount(skb);
}
return skb;
}
static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
struct sock *sk,
struct tcp_sack_block *next_dup,
struct tcp_sacktag_state *state,
u32 skip_to_seq)
{
if (next_dup == NULL)
return skb;
if (before(next_dup->start_seq, skip_to_seq)) {
skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq);
skb = tcp_sacktag_walk(skb, sk, NULL, state,
next_dup->start_seq, next_dup->end_seq,
1);
}
return skb;
}
static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache)
{
return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
}
static int
tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
u32 prior_snd_una)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
const unsigned char *ptr = (skb_transport_header(ack_skb) +
TCP_SKB_CB(ack_skb)->sacked);
struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
struct tcp_sack_block sp[TCP_NUM_SACKS];
struct tcp_sack_block *cache;
struct tcp_sacktag_state state;
struct sk_buff *skb;
int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3);
int used_sacks;
int found_dup_sack = 0;
int i, j;
int first_sack_index;
state.flag = 0;
state.reord = tp->packets_out;
if (!tp->sacked_out) {
if (WARN_ON(tp->fackets_out))
tp->fackets_out = 0;
tcp_highest_sack_reset(sk);
}
found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire,
num_sacks, prior_snd_una);
if (found_dup_sack)
state.flag |= FLAG_DSACKING_ACK;
/* Eliminate too old ACKs, but take into
* account more or less fresh ones, they can
* contain valid SACK info.
*/
if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window))
return 0;
if (!tp->packets_out)
goto out;
used_sacks = 0;
first_sack_index = 0;
for (i = 0; i < num_sacks; i++) {
int dup_sack = !i && found_dup_sack;
sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq);
sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq);
if (!tcp_is_sackblock_valid(tp, dup_sack,
sp[used_sacks].start_seq,
sp[used_sacks].end_seq)) {
int mib_idx;
if (dup_sack) {
if (!tp->undo_marker)
mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO;
else
mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD;
} else {
/* Don't count olds caused by ACK reordering */
if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
!after(sp[used_sacks].end_seq, tp->snd_una))
continue;
mib_idx = LINUX_MIB_TCPSACKDISCARD;
}
NET_INC_STATS_BH(sock_net(sk), mib_idx);
if (i == 0)
first_sack_index = -1;
continue;
}
/* Ignore very old stuff early */
if (!after(sp[used_sacks].end_seq, prior_snd_una))
continue;
used_sacks++;
}
/* order SACK blocks to allow in order walk of the retrans queue */
for (i = used_sacks - 1; i > 0; i--) {
for (j = 0; j < i; j++) {
if (after(sp[j].start_seq, sp[j + 1].start_seq)) {
swap(sp[j], sp[j + 1]);
/* Track where the first SACK block goes to */
if (j == first_sack_index)
first_sack_index = j + 1;
}
}
}
skb = tcp_write_queue_head(sk);
state.fack_count = 0;
i = 0;
if (!tp->sacked_out) {
/* It's already past, so skip checking against it */
cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
} else {
cache = tp->recv_sack_cache;
/* Skip empty blocks in at head of the cache */
while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq &&
!cache->end_seq)
cache++;
}
while (i < used_sacks) {
u32 start_seq = sp[i].start_seq;
u32 end_seq = sp[i].end_seq;
int dup_sack = (found_dup_sack && (i == first_sack_index));
struct tcp_sack_block *next_dup = NULL;
if (found_dup_sack && ((i + 1) == first_sack_index))
next_dup = &sp[i + 1];
/* Event "B" in the comment above. */
if (after(end_seq, tp->high_seq))
state.flag |= FLAG_DATA_LOST;
/* Skip too early cached blocks */
while (tcp_sack_cache_ok(tp, cache) &&
!before(start_seq, cache->end_seq))
cache++;
/* Can skip some work by looking recv_sack_cache? */
if (tcp_sack_cache_ok(tp, cache) && !dup_sack &&
after(end_seq, cache->start_seq)) {
/* Head todo? */
if (before(start_seq, cache->start_seq)) {
skb = tcp_sacktag_skip(skb, sk, &state,
start_seq);
skb = tcp_sacktag_walk(skb, sk, next_dup,
&state,
start_seq,
cache->start_seq,
dup_sack);
}
/* Rest of the block already fully processed? */
if (!after(end_seq, cache->end_seq))
goto advance_sp;
skb = tcp_maybe_skipping_dsack(skb, sk, next_dup,
&state,
cache->end_seq);
/* ...tail remains todo... */
if (tcp_highest_sack_seq(tp) == cache->end_seq) {
/* ...but better entrypoint exists! */
skb = tcp_highest_sack(sk);
if (skb == NULL)
break;
state.fack_count = tp->fackets_out;
cache++;
goto walk;
}
skb = tcp_sacktag_skip(skb, sk, &state, cache->end_seq);
/* Check overlap against next cached too (past this one already) */
cache++;
continue;
}
if (!before(start_seq, tcp_highest_sack_seq(tp))) {
skb = tcp_highest_sack(sk);
if (skb == NULL)
break;
state.fack_count = tp->fackets_out;
}
skb = tcp_sacktag_skip(skb, sk, &state, start_seq);
walk:
skb = tcp_sacktag_walk(skb, sk, next_dup, &state,
start_seq, end_seq, dup_sack);
advance_sp:
/* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct
* due to in-order walk
*/
if (after(end_seq, tp->frto_highmark))
state.flag &= ~FLAG_ONLY_ORIG_SACKED;
i++;
}
/* Clear the head of the cache sack blocks so we can skip it next time */
for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) {
tp->recv_sack_cache[i].start_seq = 0;
tp->recv_sack_cache[i].end_seq = 0;
}
for (j = 0; j < used_sacks; j++)
tp->recv_sack_cache[i++] = sp[j];
tcp_mark_lost_retrans(sk);
tcp_verify_left_out(tp);
if ((state.reord < tp->fackets_out) &&
((icsk->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker) &&
(!tp->frto_highmark || after(tp->snd_una, tp->frto_highmark)))
tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
out:
#if FASTRETRANS_DEBUG > 0
WARN_ON((int)tp->sacked_out < 0);
WARN_ON((int)tp->lost_out < 0);
WARN_ON((int)tp->retrans_out < 0);
WARN_ON((int)tcp_packets_in_flight(tp) < 0);
#endif
return state.flag;
}
/* Limits sacked_out so that sum with lost_out isn't ever larger than
* packets_out. Returns zero if sacked_out adjustement wasn't necessary.
*/
static int tcp_limit_reno_sacked(struct tcp_sock *tp)
{
u32 holes;
holes = max(tp->lost_out, 1U);
holes = min(holes, tp->packets_out);
if ((tp->sacked_out + holes) > tp->packets_out) {
tp->sacked_out = tp->packets_out - holes;
return 1;
}
return 0;
}
/* If we receive more dupacks than we expected counting segments
* in assumption of absent reordering, interpret this as reordering.
* The only another reason could be bug in receiver TCP.
*/
static void tcp_check_reno_reordering(struct sock *sk, const int addend)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_limit_reno_sacked(tp))
tcp_update_reordering(sk, tp->packets_out + addend, 0);
}
/* Emulate SACKs for SACKless connection: account for a new dupack. */
static void tcp_add_reno_sack(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
tp->sacked_out++;
tcp_check_reno_reordering(sk, 0);
tcp_verify_left_out(tp);
}
/* Account for ACK, ACKing some data in Reno Recovery phase. */
static void tcp_remove_reno_sacks(struct sock *sk, int acked)
{
struct tcp_sock *tp = tcp_sk(sk);
if (acked > 0) {
/* One ACK acked hole. The rest eat duplicate ACKs. */
if (acked - 1 >= tp->sacked_out)
tp->sacked_out = 0;
else
tp->sacked_out -= acked - 1;
}
tcp_check_reno_reordering(sk, acked);
tcp_verify_left_out(tp);
}
static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
{
tp->sacked_out = 0;
}
static int tcp_is_sackfrto(const struct tcp_sock *tp)
{
return (sysctl_tcp_frto == 0x2) && !tcp_is_reno(tp);
}
/* F-RTO can only be used if TCP has never retransmitted anything other than
* head (SACK enhanced variant from Appendix B of RFC4138 is more robust here)
*/
int tcp_use_frto(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb;
if (!sysctl_tcp_frto)
return 0;
/* MTU probe and F-RTO won't really play nicely along currently */
if (icsk->icsk_mtup.probe_size)
return 0;
if (tcp_is_sackfrto(tp))
return 1;
/* Avoid expensive walking of rexmit queue if possible */
if (tp->retrans_out > 1)
return 0;
skb = tcp_write_queue_head(sk);
if (tcp_skb_is_last(sk, skb))
return 1;
skb = tcp_write_queue_next(sk, skb); /* Skips head */
tcp_for_write_queue_from(skb, sk) {
if (skb == tcp_send_head(sk))
break;
if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
return 0;
/* Short-circuit when first non-SACKed skb has been checked */
if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
break;
}
return 1;
}
/* RTO occurred, but do not yet enter Loss state. Instead, defer RTO
* recovery a bit and use heuristics in tcp_process_frto() to detect if
* the RTO was spurious. Only clear SACKED_RETRANS of the head here to
* keep retrans_out counting accurate (with SACK F-RTO, other than head
* may still have that bit set); TCPCB_LOST and remaining SACKED_RETRANS
* bits are handled if the Loss state is really to be entered (in
* tcp_enter_frto_loss).
*
* Do like tcp_enter_loss() would; when RTO expires the second time it
* does:
* "Reduce ssthresh if it has not yet been made inside this window."
*/
void tcp_enter_frto(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
if ((!tp->frto_counter && icsk->icsk_ca_state <= TCP_CA_Disorder) ||
tp->snd_una == tp->high_seq ||
((icsk->icsk_ca_state == TCP_CA_Loss || tp->frto_counter) &&
!icsk->icsk_retransmits)) {
tp->prior_ssthresh = tcp_current_ssthresh(sk);
/* Our state is too optimistic in ssthresh() call because cwnd
* is not reduced until tcp_enter_frto_loss() when previous F-RTO
* recovery has not yet completed. Pattern would be this: RTO,
* Cumulative ACK, RTO (2xRTO for the same segment does not end
* up here twice).
* RFC4138 should be more specific on what to do, even though
* RTO is quite unlikely to occur after the first Cumulative ACK
* due to back-off and complexity of triggering events ...
*/
if (tp->frto_counter) {
u32 stored_cwnd;
stored_cwnd = tp->snd_cwnd;
tp->snd_cwnd = 2;
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
tp->snd_cwnd = stored_cwnd;
} else {
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
}
/* ... in theory, cong.control module could do "any tricks" in
* ssthresh(), which means that ca_state, lost bits and lost_out
* counter would have to be faked before the call occurs. We
* consider that too expensive, unlikely and hacky, so modules
* using these in ssthresh() must deal these incompatibility
* issues if they receives CA_EVENT_FRTO and frto_counter != 0
*/
tcp_ca_event(sk, CA_EVENT_FRTO);
}
tp->undo_marker = tp->snd_una;
tp->undo_retrans = 0;
skb = tcp_write_queue_head(sk);
if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
tp->undo_marker = 0;
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= tcp_skb_pcount(skb);
}
tcp_verify_left_out(tp);
/* Too bad if TCP was application limited */
tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
/* Earlier loss recovery underway (see RFC4138; Appendix B).
* The last condition is necessary at least in tp->frto_counter case.
*/
if (tcp_is_sackfrto(tp) && (tp->frto_counter ||
((1 << icsk->icsk_ca_state) & (TCPF_CA_Recovery|TCPF_CA_Loss))) &&
after(tp->high_seq, tp->snd_una)) {
tp->frto_highmark = tp->high_seq;
} else {
tp->frto_highmark = tp->snd_nxt;
}
tcp_set_ca_state(sk, TCP_CA_Disorder);
tp->high_seq = tp->snd_nxt;
tp->frto_counter = 1;
}
/* Enter Loss state after F-RTO was applied. Dupack arrived after RTO,
* which indicates that we should follow the traditional RTO recovery,
* i.e. mark everything lost and do go-back-N retransmission.
*/
static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
tp->lost_out = 0;
tp->retrans_out = 0;
if (tcp_is_reno(tp))
tcp_reset_reno_sack(tp);
tcp_for_write_queue(skb, sk) {
if (skb == tcp_send_head(sk))
break;
TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
/*
* Count the retransmission made on RTO correctly (only when
* waiting for the first ACK and did not get it)...
*/
if ((tp->frto_counter == 1) && !(flag & FLAG_DATA_ACKED)) {
/* For some reason this R-bit might get cleared? */
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
tp->retrans_out += tcp_skb_pcount(skb);
/* ...enter this if branch just for the first segment */
flag |= FLAG_DATA_ACKED;
} else {
if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
tp->undo_marker = 0;
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
}
/* Marking forward transmissions that were made after RTO lost
* can cause unnecessary retransmissions in some scenarios,
* SACK blocks will mitigate that in some but not in all cases.
* We used to not mark them but it was causing break-ups with
* receivers that do only in-order receival.
*
* TODO: we could detect presence of such receiver and select
* different behavior per flow.
*/
if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
}
}
tcp_verify_left_out(tp);
tp->snd_cwnd = tcp_packets_in_flight(tp) + allowed_segments;
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_time_stamp;
tp->frto_counter = 0;
tp->bytes_acked = 0;
tp->reordering = min_t(unsigned int, tp->reordering,
sysctl_tcp_reordering);
tcp_set_ca_state(sk, TCP_CA_Loss);
tp->high_seq = tp->snd_nxt;
TCP_ECN_queue_cwr(tp);
tcp_clear_all_retrans_hints(tp);
}
static void tcp_clear_retrans_partial(struct tcp_sock *tp)
{
tp->retrans_out = 0;
tp->lost_out = 0;
tp->undo_marker = 0;
tp->undo_retrans = 0;
}
void tcp_clear_retrans(struct tcp_sock *tp)
{
tcp_clear_retrans_partial(tp);
tp->fackets_out = 0;
tp->sacked_out = 0;
}
/* Enter Loss state. If "how" is not zero, forget all SACK information
* and reset tags completely, otherwise preserve SACKs. If receiver
* dropped its ofo queue, we will know this due to reneging detection.
*/
void tcp_enter_loss(struct sock *sk, int how)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
/* Reduce ssthresh if it has not yet been made inside this window. */
if (icsk->icsk_ca_state <= TCP_CA_Disorder || tp->snd_una == tp->high_seq ||
(icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
tp->prior_ssthresh = tcp_current_ssthresh(sk);
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
tcp_ca_event(sk, CA_EVENT_LOSS);
}
tp->snd_cwnd = 1;
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_time_stamp;
tp->bytes_acked = 0;
tcp_clear_retrans_partial(tp);
if (tcp_is_reno(tp))
tcp_reset_reno_sack(tp);
if (!how) {
/* Push undo marker, if it was plain RTO and nothing
* was retransmitted. */
tp->undo_marker = tp->snd_una;
} else {
tp->sacked_out = 0;
tp->fackets_out = 0;
}
tcp_clear_all_retrans_hints(tp);
tcp_for_write_queue(skb, sk) {
if (skb == tcp_send_head(sk))
break;
if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
tp->undo_marker = 0;
TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
}
}
tcp_verify_left_out(tp);
tp->reordering = min_t(unsigned int, tp->reordering,
sysctl_tcp_reordering);
tcp_set_ca_state(sk, TCP_CA_Loss);
tp->high_seq = tp->snd_nxt;
TCP_ECN_queue_cwr(tp);
/* Abort F-RTO algorithm if one is in progress */
tp->frto_counter = 0;
}
/* If ACK arrived pointing to a remembered SACK, it means that our
* remembered SACKs do not reflect real state of receiver i.e.
* receiver _host_ is heavily congested (or buggy).
*
* Do processing similar to RTO timeout.
*/
static int tcp_check_sack_reneging(struct sock *sk, int flag)
{
if (flag & FLAG_SACK_RENEGING) {
struct inet_connection_sock *icsk = inet_csk(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
tcp_enter_loss(sk, 1);
icsk->icsk_retransmits++;
tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
icsk->icsk_rto, TCP_RTO_MAX);
return 1;
}
return 0;
}
static inline int tcp_fackets_out(const struct tcp_sock *tp)
{
return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out;
}
/* Heurestics to calculate number of duplicate ACKs. There's no dupACKs
* counter when SACK is enabled (without SACK, sacked_out is used for
* that purpose).
*
* Instead, with FACK TCP uses fackets_out that includes both SACKed
* segments up to the highest received SACK block so far and holes in
* between them.
*
* With reordering, holes may still be in flight, so RFC3517 recovery
* uses pure sacked_out (total number of SACKed segments) even though
* it violates the RFC that uses duplicate ACKs, often these are equal
* but when e.g. out-of-window ACKs or packet duplication occurs,
* they differ. Since neither occurs due to loss, TCP should really
* ignore them.
*/
static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
{
return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
}
static inline int tcp_skb_timedout(const struct sock *sk,
const struct sk_buff *skb)
{
return tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto;
}
static inline int tcp_head_timedout(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
return tp->packets_out &&
tcp_skb_timedout(sk, tcp_write_queue_head(sk));
}
/* Linux NewReno/SACK/FACK/ECN state machine.
* --------------------------------------
*
* "Open" Normal state, no dubious events, fast path.
* "Disorder" In all the respects it is "Open",
* but requires a bit more attention. It is entered when
* we see some SACKs or dupacks. It is split of "Open"
* mainly to move some processing from fast path to slow one.
* "CWR" CWND was reduced due to some Congestion Notification event.
* It can be ECN, ICMP source quench, local device congestion.
* "Recovery" CWND was reduced, we are fast-retransmitting.
* "Loss" CWND was reduced due to RTO timeout or SACK reneging.
*
* tcp_fastretrans_alert() is entered:
* - each incoming ACK, if state is not "Open"
* - when arrived ACK is unusual, namely:
* * SACK
* * Duplicate ACK.
* * ECN ECE.
*
* Counting packets in flight is pretty simple.
*
* in_flight = packets_out - left_out + retrans_out
*
* packets_out is SND.NXT-SND.UNA counted in packets.
*
* retrans_out is number of retransmitted segments.
*
* left_out is number of segments left network, but not ACKed yet.
*
* left_out = sacked_out + lost_out
*
* sacked_out: Packets, which arrived to receiver out of order
* and hence not ACKed. With SACKs this number is simply
* amount of SACKed data. Even without SACKs
* it is easy to give pretty reliable estimate of this number,
* counting duplicate ACKs.
*
* lost_out: Packets lost by network. TCP has no explicit
* "loss notification" feedback from network (for now).
* It means that this number can be only _guessed_.
* Actually, it is the heuristics to predict lossage that
* distinguishes different algorithms.
*
* F.e. after RTO, when all the queue is considered as lost,
* lost_out = packets_out and in_flight = retrans_out.
*
* Essentially, we have now two algorithms counting
* lost packets.
*
* FACK: It is the simplest heuristics. As soon as we decided
* that something is lost, we decide that _all_ not SACKed
* packets until the most forward SACK are lost. I.e.
* lost_out = fackets_out - sacked_out and left_out = fackets_out.
* It is absolutely correct estimate, if network does not reorder
* packets. And it loses any connection to reality when reordering
* takes place. We use FACK by default until reordering
* is suspected on the path to this destination.
*
* NewReno: when Recovery is entered, we assume that one segment
* is lost (classic Reno). While we are in Recovery and
* a partial ACK arrives, we assume that one more packet
* is lost (NewReno). This heuristics are the same in NewReno
* and SACK.
*
* Imagine, that's all! Forget about all this shamanism about CWND inflation
* deflation etc. CWND is real congestion window, never inflated, changes
* only according to classic VJ rules.
*
* Really tricky (and requiring careful tuning) part of algorithm
* is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue().
* The first determines the moment _when_ we should reduce CWND and,
* hence, slow down forward transmission. In fact, it determines the moment
* when we decide that hole is caused by loss, rather than by a reorder.
*
* tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill
* holes, caused by lost packets.
*
* And the most logically complicated part of algorithm is undo
* heuristics. We detect false retransmits due to both too early
* fast retransmit (reordering) and underestimated RTO, analyzing
* timestamps and D-SACKs. When we detect that some segments were
* retransmitted by mistake and CWND reduction was wrong, we undo
* window reduction and abort recovery phase. This logic is hidden
* inside several functions named tcp_try_undo_<something>.
*/
/* This function decides, when we should leave Disordered state
* and enter Recovery phase, reducing congestion window.
*
* Main question: may we further continue forward transmission
* with the same cwnd?
*/
static int tcp_time_to_recover(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
__u32 packets_out;
/* Do not perform any recovery during F-RTO algorithm */
if (tp->frto_counter)
return 0;
/* Trick#1: The loss is proven. */
if (tp->lost_out)
return 1;
/* Not-A-Trick#2 : Classic rule... */
if (tcp_dupack_heuristics(tp) > tp->reordering)
return 1;
/* Trick#3 : when we use RFC2988 timer restart, fast
* retransmit can be triggered by timeout of queue head.
*/
if (tcp_is_fack(tp) && tcp_head_timedout(sk))
return 1;
/* Trick#4: It is still not OK... But will it be useful to delay
* recovery more?
*/
packets_out = tp->packets_out;
if (packets_out <= tp->reordering &&
tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) &&
!tcp_may_send_now(sk)) {
/* We have nothing to send. This connection is limited
* either by receiver window or by application.
*/
return 1;
}
/* If a thin stream is detected, retransmit after first
* received dupack. Employ only if SACK is supported in order
* to avoid possible corner-case series of spurious retransmissions
* Use only if there are no unsent data.
*/
if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
tcp_is_sack(tp) && !tcp_send_head(sk))
return 1;
return 0;
}
/* New heuristics: it is possible only after we switched to restart timer
* each time when something is ACKed. Hence, we can detect timed out packets
* during fast retransmit without falling to slow start.
*
* Usefulness of this as is very questionable, since we should know which of
* the segments is the next to timeout which is relatively expensive to find
* in general case unless we add some data structure just for that. The
* current approach certainly won't find the right one too often and when it
* finally does find _something_ it usually marks large part of the window
* right away (because a retransmission with a larger timestamp blocks the
* loop from advancing). -ij
*/
static void tcp_timeout_skbs(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
if (!tcp_is_fack(tp) || !tcp_head_timedout(sk))
return;
skb = tp->scoreboard_skb_hint;
if (tp->scoreboard_skb_hint == NULL)
skb = tcp_write_queue_head(sk);
tcp_for_write_queue_from(skb, sk) {
if (skb == tcp_send_head(sk))
break;
if (!tcp_skb_timedout(sk, skb))
break;
tcp_skb_mark_lost(tp, skb);
}
tp->scoreboard_skb_hint = skb;
tcp_verify_left_out(tp);
}
/* Mark head of queue up as lost. With RFC3517 SACK, the packets is
* is against sacked "cnt", otherwise it's against facked "cnt"
*/
static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
int cnt, oldcnt;
int err;
unsigned int mss;
WARN_ON(packets > tp->packets_out);
if (tp->lost_skb_hint) {
skb = tp->lost_skb_hint;
cnt = tp->lost_cnt_hint;
/* Head already handled? */
if (mark_head && skb != tcp_write_queue_head(sk))
return;
} else {
skb = tcp_write_queue_head(sk);
cnt = 0;
}
tcp_for_write_queue_from(skb, sk) {
if (skb == tcp_send_head(sk))
break;
/* TODO: do this better */
/* this is not the most efficient way to do this... */
tp->lost_skb_hint = skb;
tp->lost_cnt_hint = cnt;
if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq))
break;
oldcnt = cnt;
if (tcp_is_fack(tp) || tcp_is_reno(tp) ||
(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
cnt += tcp_skb_pcount(skb);
if (cnt > packets) {
if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
(oldcnt >= packets))
break;
mss = skb_shinfo(skb)->gso_size;
err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss);
if (err < 0)
break;
cnt = packets;
}
tcp_skb_mark_lost(tp, skb);
if (mark_head)
break;
}
tcp_verify_left_out(tp);
}
/* Account newly detected lost packet(s) */
static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_is_reno(tp)) {
tcp_mark_head_lost(sk, 1, 1);
} else if (tcp_is_fack(tp)) {
int lost = tp->fackets_out - tp->reordering;
if (lost <= 0)
lost = 1;
tcp_mark_head_lost(sk, lost, 0);
} else {
int sacked_upto = tp->sacked_out - tp->reordering;
if (sacked_upto >= 0)
tcp_mark_head_lost(sk, sacked_upto, 0);
else if (fast_rexmit)
tcp_mark_head_lost(sk, 1, 1);
}
tcp_timeout_skbs(sk);
}
/* CWND moderation, preventing bursts due to too big ACKs
* in dubious situations.
*/
static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
{
tp->snd_cwnd = min(tp->snd_cwnd,
tcp_packets_in_flight(tp) + tcp_max_burst(tp));
tp->snd_cwnd_stamp = tcp_time_stamp;
}
/* Lower bound on congestion window is slow start threshold
* unless congestion avoidance choice decides to overide it.
*/
static inline u32 tcp_cwnd_min(const struct sock *sk)
{
const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh;
}
/* Decrease cwnd each second ack. */
static void tcp_cwnd_down(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
int decr = tp->snd_cwnd_cnt + 1;
if ((flag & (FLAG_ANY_PROGRESS | FLAG_DSACKING_ACK)) ||
(tcp_is_reno(tp) && !(flag & FLAG_NOT_DUP))) {
tp->snd_cwnd_cnt = decr & 1;
decr >>= 1;
if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
tp->snd_cwnd -= decr;
tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp) + 1);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
}
/* Nothing was retransmitted or returned timestamp is less
* than timestamp of the first retransmission.
*/
static inline int tcp_packet_delayed(const struct tcp_sock *tp)
{
return !tp->retrans_stamp ||
(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp));
}
/* Undo procedures. */
#if FASTRETRANS_DEBUG > 1
static void DBGUNDO(struct sock *sk, const char *msg)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
if (sk->sk_family == AF_INET) {
printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
msg,
&inet->inet_daddr, ntohs(inet->inet_dport),
tp->snd_cwnd, tcp_left_out(tp),
tp->snd_ssthresh, tp->prior_ssthresh,
tp->packets_out);
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (sk->sk_family == AF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
msg,
&np->daddr, ntohs(inet->inet_dport),
tp->snd_cwnd, tcp_left_out(tp),
tp->snd_ssthresh, tp->prior_ssthresh,
tp->packets_out);
}
#endif
}
#else
#define DBGUNDO(x...) do { } while (0)
#endif
static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tp->prior_ssthresh) {
const struct inet_connection_sock *icsk = inet_csk(sk);
if (icsk->icsk_ca_ops->undo_cwnd)
tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
else
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) {
tp->snd_ssthresh = tp->prior_ssthresh;
TCP_ECN_withdraw_cwr(tp);
}
} else {
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
}
tp->snd_cwnd_stamp = tcp_time_stamp;
}
static inline int tcp_may_undo(const struct tcp_sock *tp)
{
return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp));
}
/* People celebrate: "We love our President!" */
static int tcp_try_undo_recovery(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_may_undo(tp)) {
int mib_idx;
/* Happy end! We did not retransmit anything
* or our original transmission succeeded.
*/
DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
tcp_undo_cwr(sk, true);
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
mib_idx = LINUX_MIB_TCPLOSSUNDO;
else
mib_idx = LINUX_MIB_TCPFULLUNDO;
NET_INC_STATS_BH(sock_net(sk), mib_idx);
tp->undo_marker = 0;
}
if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
/* Hold old state until something *above* high_seq
* is ACKed. For Reno it is MUST to prevent false
* fast retransmits (RFC2582). SACK TCP is safe. */
tcp_moderate_cwnd(tp);
return 1;
}
tcp_set_ca_state(sk, TCP_CA_Open);
return 0;
}
/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
static void tcp_try_undo_dsack(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tp->undo_marker && !tp->undo_retrans) {
DBGUNDO(sk, "D-SACK");
tcp_undo_cwr(sk, true);
tp->undo_marker = 0;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
}
}
/* We can clear retrans_stamp when there are no retransmissions in the
* window. It would seem that it is trivially available for us in
* tp->retrans_out, however, that kind of assumptions doesn't consider
* what will happen if errors occur when sending retransmission for the
* second time. ...It could the that such segment has only
* TCPCB_EVER_RETRANS set at the present time. It seems that checking
* the head skb is enough except for some reneging corner cases that
* are not worth the effort.
*
* Main reason for all this complexity is the fact that connection dying
* time now depends on the validity of the retrans_stamp, in particular,
* that successive retransmissions of a segment must not advance
* retrans_stamp under any conditions.
*/
static int tcp_any_retrans_done(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
if (tp->retrans_out)
return 1;
skb = tcp_write_queue_head(sk);
if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
return 1;
return 0;
}
/* Undo during fast recovery after partial ACK. */
static int tcp_try_undo_partial(struct sock *sk, int acked)
{
struct tcp_sock *tp = tcp_sk(sk);
/* Partial ACK arrived. Force Hoe's retransmit. */
int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering);
if (tcp_may_undo(tp)) {
/* Plain luck! Hole if filled with delayed
* packet, rather than with a retransmit.
*/
if (!tcp_any_retrans_done(sk))
tp->retrans_stamp = 0;
tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
DBGUNDO(sk, "Hoe");
tcp_undo_cwr(sk, false);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
/* So... Do not make Hoe's retransmit yet.
* If the first packet was delayed, the rest
* ones are most probably delayed as well.
*/
failed = 0;
}
return failed;
}
/* Undo during loss recovery after partial ACK. */
static int tcp_try_undo_loss(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_may_undo(tp)) {
struct sk_buff *skb;
tcp_for_write_queue(skb, sk) {
if (skb == tcp_send_head(sk))
break;
TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
}
tcp_clear_all_retrans_hints(tp);
DBGUNDO(sk, "partial loss");
tp->lost_out = 0;
tcp_undo_cwr(sk, true);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
inet_csk(sk)->icsk_retransmits = 0;
tp->undo_marker = 0;
if (tcp_is_sack(tp))
tcp_set_ca_state(sk, TCP_CA_Open);
return 1;
}
return 0;
}
static inline void tcp_complete_cwr(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
/* Do not moderate cwnd if it's already undone in cwr or recovery. */
if (tp->undo_marker) {
if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR)
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
else /* PRR */
tp->snd_cwnd = tp->snd_ssthresh;
tp->snd_cwnd_stamp = tcp_time_stamp;
}
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
}
static void tcp_try_keep_open(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
int state = TCP_CA_Open;
if (tcp_left_out(tp) || tcp_any_retrans_done(sk))
state = TCP_CA_Disorder;
if (inet_csk(sk)->icsk_ca_state != state) {
tcp_set_ca_state(sk, state);
tp->high_seq = tp->snd_nxt;
}
}
static void tcp_try_to_open(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
tcp_verify_left_out(tp);
if (!tp->frto_counter && !tcp_any_retrans_done(sk))
tp->retrans_stamp = 0;
if (flag & FLAG_ECE)
tcp_enter_cwr(sk, 1);
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
tcp_try_keep_open(sk);
if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
tcp_moderate_cwnd(tp);
} else {
tcp_cwnd_down(sk, flag);
}
}
static void tcp_mtup_probe_failed(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
icsk->icsk_mtup.probe_size = 0;
}
static void tcp_mtup_probe_success(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
/* FIXME: breaks with very large cwnd */
tp->prior_ssthresh = tcp_current_ssthresh(sk);
tp->snd_cwnd = tp->snd_cwnd *
tcp_mss_to_mtu(sk, tp->mss_cache) /
icsk->icsk_mtup.probe_size;
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_time_stamp;
tp->snd_ssthresh = tcp_current_ssthresh(sk);
icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
icsk->icsk_mtup.probe_size = 0;
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
}
/* Do a simple retransmit without using the backoff mechanisms in
* tcp_timer. This is used for path mtu discovery.
* The socket is already locked here.
*/
void tcp_simple_retransmit(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
unsigned int mss = tcp_current_mss(sk);
u32 prior_lost = tp->lost_out;
tcp_for_write_queue(skb, sk) {
if (skb == tcp_send_head(sk))
break;
if (tcp_skb_seglen(skb) > mss &&
!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= tcp_skb_pcount(skb);
}
tcp_skb_mark_lost_uncond_verify(tp, skb);
}
}
tcp_clear_retrans_hints_partial(tp);
if (prior_lost == tp->lost_out)
return;
if (tcp_is_reno(tp))
tcp_limit_reno_sacked(tp);
tcp_verify_left_out(tp);
/* Don't muck with the congestion window here.
* Reason is that we do not increase amount of _data_
* in network, but units changed and effective
* cwnd/ssthresh really reduced now.
*/
if (icsk->icsk_ca_state != TCP_CA_Loss) {
tp->high_seq = tp->snd_nxt;
tp->snd_ssthresh = tcp_current_ssthresh(sk);
tp->prior_ssthresh = 0;
tp->undo_marker = 0;
tcp_set_ca_state(sk, TCP_CA_Loss);
}
tcp_xmit_retransmit_queue(sk);
}
EXPORT_SYMBOL(tcp_simple_retransmit);
/* This function implements the PRR algorithm, specifcally the PRR-SSRB
* (proportional rate reduction with slow start reduction bound) as described in
* http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt.
* It computes the number of packets to send (sndcnt) based on packets newly
* delivered:
* 1) If the packets in flight is larger than ssthresh, PRR spreads the
* cwnd reductions across a full RTT.
* 2) If packets in flight is lower than ssthresh (such as due to excess
* losses and/or application stalls), do not perform any further cwnd
* reductions, but instead slow start up to ssthresh.
*/
static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked,
int fast_rexmit, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
int sndcnt = 0;
int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp);
if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) {
u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered +
tp->prior_cwnd - 1;
sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out;
} else {
sndcnt = min_t(int, delta,
max_t(int, tp->prr_delivered - tp->prr_out,
newly_acked_sacked) + 1);
}
sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0));
tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
}
/* Process an event, which can update packets-in-flight not trivially.
* Main goal of this function is to calculate new estimate for left_out,
* taking into account both packets sitting in receiver's buffer and
* packets lost by network.
*
* Besides that it does CWND reduction, when packet loss is detected
* and changes state of machine.
*
* It does _not_ decide what to send, it is made in function
* tcp_xmit_retransmit_queue().
*/
static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
int newly_acked_sacked, bool is_dupack,
int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) &&
(tcp_fackets_out(tp) > tp->reordering));
int fast_rexmit = 0, mib_idx;
if (WARN_ON(!tp->packets_out && tp->sacked_out))
tp->sacked_out = 0;
if (WARN_ON(!tp->sacked_out && tp->fackets_out))
tp->fackets_out = 0;
/* Now state machine starts.
* A. ECE, hence prohibit cwnd undoing, the reduction is required. */
if (flag & FLAG_ECE)
tp->prior_ssthresh = 0;
/* B. In all the states check for reneging SACKs. */
if (tcp_check_sack_reneging(sk, flag))
return;
/* C. Process data loss notification, provided it is valid. */
if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) &&
before(tp->snd_una, tp->high_seq) &&
icsk->icsk_ca_state != TCP_CA_Open &&
tp->fackets_out > tp->reordering) {
tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
}
/* D. Check consistency of the current state. */
tcp_verify_left_out(tp);
/* E. Check state exit conditions. State can be terminated
* when high_seq is ACKed. */
if (icsk->icsk_ca_state == TCP_CA_Open) {
WARN_ON(tp->retrans_out != 0);
tp->retrans_stamp = 0;
} else if (!before(tp->snd_una, tp->high_seq)) {
switch (icsk->icsk_ca_state) {
case TCP_CA_Loss:
icsk->icsk_retransmits = 0;
if (tcp_try_undo_recovery(sk))
return;
break;
case TCP_CA_CWR:
/* CWR is to be held something *above* high_seq
* is ACKed for CWR bit to reach receiver. */
if (tp->snd_una != tp->high_seq) {
tcp_complete_cwr(sk);
tcp_set_ca_state(sk, TCP_CA_Open);
}
break;
case TCP_CA_Recovery:
if (tcp_is_reno(tp))
tcp_reset_reno_sack(tp);
if (tcp_try_undo_recovery(sk))
return;
tcp_complete_cwr(sk);
break;
}
}
/* F. Process state. */
switch (icsk->icsk_ca_state) {
case TCP_CA_Recovery:
if (!(flag & FLAG_SND_UNA_ADVANCED)) {
if (tcp_is_reno(tp) && is_dupack)
tcp_add_reno_sack(sk);
} else
do_lost = tcp_try_undo_partial(sk, pkts_acked);
break;
case TCP_CA_Loss:
if (flag & FLAG_DATA_ACKED)
icsk->icsk_retransmits = 0;
if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED)
tcp_reset_reno_sack(tp);
if (!tcp_try_undo_loss(sk)) {
tcp_moderate_cwnd(tp);
tcp_xmit_retransmit_queue(sk);
return;
}
if (icsk->icsk_ca_state != TCP_CA_Open)
return;
/* Loss is undone; fall through to processing in Open state. */
default:
if (tcp_is_reno(tp)) {
if (flag & FLAG_SND_UNA_ADVANCED)
tcp_reset_reno_sack(tp);
if (is_dupack)
tcp_add_reno_sack(sk);
}
if (icsk->icsk_ca_state <= TCP_CA_Disorder)
tcp_try_undo_dsack(sk);
if (!tcp_time_to_recover(sk)) {
tcp_try_to_open(sk, flag);
return;
}
/* MTU probe failure: don't reduce cwnd */
if (icsk->icsk_ca_state < TCP_CA_CWR &&
icsk->icsk_mtup.probe_size &&
tp->snd_una == tp->mtu_probe.probe_seq_start) {
tcp_mtup_probe_failed(sk);
/* Restores the reduction we did in tcp_mtup_probe() */
tp->snd_cwnd++;
tcp_simple_retransmit(sk);
return;
}
/* Otherwise enter Recovery state */
if (tcp_is_reno(tp))
mib_idx = LINUX_MIB_TCPRENORECOVERY;
else
mib_idx = LINUX_MIB_TCPSACKRECOVERY;
NET_INC_STATS_BH(sock_net(sk), mib_idx);
tp->high_seq = tp->snd_nxt;
tp->prior_ssthresh = 0;
tp->undo_marker = tp->snd_una;
tp->undo_retrans = tp->retrans_out;
if (icsk->icsk_ca_state < TCP_CA_CWR) {
if (!(flag & FLAG_ECE))
tp->prior_ssthresh = tcp_current_ssthresh(sk);
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
TCP_ECN_queue_cwr(tp);
}
tp->bytes_acked = 0;
tp->snd_cwnd_cnt = 0;
tp->prior_cwnd = tp->snd_cwnd;
tp->prr_delivered = 0;
tp->prr_out = 0;
tcp_set_ca_state(sk, TCP_CA_Recovery);
fast_rexmit = 1;
}
if (do_lost || (tcp_is_fack(tp) && tcp_head_timedout(sk)))
tcp_update_scoreboard(sk, fast_rexmit);
tp->prr_delivered += newly_acked_sacked;
tcp_update_cwnd_in_recovery(sk, newly_acked_sacked, fast_rexmit, flag);
tcp_xmit_retransmit_queue(sk);
}
void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt)
{
tcp_rtt_estimator(sk, seq_rtt);
tcp_set_rto(sk);
inet_csk(sk)->icsk_backoff = 0;
}
EXPORT_SYMBOL(tcp_valid_rtt_meas);
/* Read draft-ietf-tcplw-high-performance before mucking
* with this code. (Supersedes RFC1323)
*/
static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
{
/* RTTM Rule: A TSecr value received in a segment is used to
* update the averaged RTT measurement only if the segment
* acknowledges some new data, i.e., only if it advances the
* left edge of the send window.
*
* See draft-ietf-tcplw-high-performance-00, section 3.3.
* 1998/04/10 Andrey V. Savochkin <saw@msu.ru>
*
* Changed: reset backoff as soon as we see the first valid sample.
* If we do not, we get strongly overestimated rto. With timestamps
* samples are accepted even from very old segments: f.e., when rtt=1
* increases to 8, we retransmit 5 times and after 8 seconds delayed
* answer arrives rto becomes 120 seconds! If at least one of segments
* in window is lost... Voila. --ANK (010210)
*/
struct tcp_sock *tp = tcp_sk(sk);
tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr);
}
static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
{
/* We don't have a timestamp. Can only use
* packets that are not retransmitted to determine
* rtt estimates. Also, we must not reset the
* backoff for rto until we get a non-retransmitted
* packet. This allows us to deal with a situation
* where the network delay has increased suddenly.
* I.e. Karn's algorithm. (SIGCOMM '87, p5.)
*/
if (flag & FLAG_RETRANS_DATA_ACKED)
return;
tcp_valid_rtt_meas(sk, seq_rtt);
}
static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
const s32 seq_rtt)
{
const struct tcp_sock *tp = tcp_sk(sk);
/* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
tcp_ack_saw_tstamp(sk, flag);
else if (seq_rtt >= 0)
tcp_ack_no_tstamp(sk, seq_rtt, flag);
}
static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight);
tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
}
/* Restart timer after forward progress on connection.
* RFC2988 recommends to restart timer to now+rto.
*/
static void tcp_rearm_rto(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
if (!tp->packets_out) {
inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
} else {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto, TCP_RTO_MAX);
}
}
/* If we get here, the whole TSO packet has not been acked. */
static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 packets_acked;
BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una));
packets_acked = tcp_skb_pcount(skb);
if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
return 0;
packets_acked -= tcp_skb_pcount(skb);
if (packets_acked) {
BUG_ON(tcp_skb_pcount(skb) == 0);
BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq));
}
return packets_acked;
}
/* Remove acknowledged frames from the retransmission queue. If our packet
* is before the ack sequence we can discard it as it's confirmed to have
* arrived at the other end.
*/
static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
u32 prior_snd_una)
{
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb;
u32 now = tcp_time_stamp;
int fully_acked = 1;
int flag = 0;
u32 pkts_acked = 0;
u32 reord = tp->packets_out;
u32 prior_sacked = tp->sacked_out;
s32 seq_rtt = -1;
s32 ca_seq_rtt = -1;
ktime_t last_ackt = net_invalid_timestamp();
while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
u32 acked_pcount;
u8 sacked = scb->sacked;
/* Determine how many packets and what bytes were acked, tso and else */
if (after(scb->end_seq, tp->snd_una)) {
if (tcp_skb_pcount(skb) == 1 ||
!after(tp->snd_una, scb->seq))
break;
acked_pcount = tcp_tso_acked(sk, skb);
if (!acked_pcount)
break;
fully_acked = 0;
} else {
acked_pcount = tcp_skb_pcount(skb);
}
if (sacked & TCPCB_RETRANS) {
if (sacked & TCPCB_SACKED_RETRANS)
tp->retrans_out -= acked_pcount;
flag |= FLAG_RETRANS_DATA_ACKED;
ca_seq_rtt = -1;
seq_rtt = -1;
if ((flag & FLAG_DATA_ACKED) || (acked_pcount > 1))
flag |= FLAG_NONHEAD_RETRANS_ACKED;
} else {
ca_seq_rtt = now - scb->when;
last_ackt = skb->tstamp;
if (seq_rtt < 0) {
seq_rtt = ca_seq_rtt;
}
if (!(sacked & TCPCB_SACKED_ACKED))
reord = min(pkts_acked, reord);
}
if (sacked & TCPCB_SACKED_ACKED)
tp->sacked_out -= acked_pcount;
if (sacked & TCPCB_LOST)
tp->lost_out -= acked_pcount;
tp->packets_out -= acked_pcount;
pkts_acked += acked_pcount;
/* Initial outgoing SYN's get put onto the write_queue
* just like anything else we transmit. It is not
* true data, and if we misinform our callers that
* this ACK acks real data, we will erroneously exit
* connection startup slow start one packet too
* quickly. This is severely frowned upon behavior.
*/
if (!(scb->tcp_flags & TCPHDR_SYN)) {
flag |= FLAG_DATA_ACKED;
} else {
flag |= FLAG_SYN_ACKED;
tp->retrans_stamp = 0;
}
if (!fully_acked)
break;
tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb);
tp->scoreboard_skb_hint = NULL;
if (skb == tp->retransmit_skb_hint)
tp->retransmit_skb_hint = NULL;
if (skb == tp->lost_skb_hint)
tp->lost_skb_hint = NULL;
}
if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una)))
tp->snd_up = tp->snd_una;
if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
flag |= FLAG_SACK_RENEGING;
if (flag & FLAG_ACKED) {
const struct tcp_congestion_ops *ca_ops
= inet_csk(sk)->icsk_ca_ops;
if (unlikely(icsk->icsk_mtup.probe_size &&
!after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
tcp_mtup_probe_success(sk);
}
tcp_ack_update_rtt(sk, flag, seq_rtt);
tcp_rearm_rto(sk);
if (tcp_is_reno(tp)) {
tcp_remove_reno_sacks(sk, pkts_acked);
} else {
int delta;
/* Non-retransmitted hole got filled? That's reordering */
if (reord < prior_fackets)
tcp_update_reordering(sk, tp->fackets_out - reord, 0);
delta = tcp_is_fack(tp) ? pkts_acked :
prior_sacked - tp->sacked_out;
tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta);
}
tp->fackets_out -= min(pkts_acked, tp->fackets_out);
if (ca_ops->pkts_acked) {
s32 rtt_us = -1;
/* Is the ACK triggering packet unambiguous? */
if (!(flag & FLAG_RETRANS_DATA_ACKED)) {
/* High resolution needed and available? */
if (ca_ops->flags & TCP_CONG_RTT_STAMP &&
!ktime_equal(last_ackt,
net_invalid_timestamp()))
rtt_us = ktime_us_delta(ktime_get_real(),
last_ackt);
else if (ca_seq_rtt >= 0)
rtt_us = jiffies_to_usecs(ca_seq_rtt);
}
ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
}
}
#if FASTRETRANS_DEBUG > 0
WARN_ON((int)tp->sacked_out < 0);
WARN_ON((int)tp->lost_out < 0);
WARN_ON((int)tp->retrans_out < 0);
if (!tp->packets_out && tcp_is_sack(tp)) {
icsk = inet_csk(sk);
if (tp->lost_out) {
printk(KERN_DEBUG "Leak l=%u %d\n",
tp->lost_out, icsk->icsk_ca_state);
tp->lost_out = 0;
}
if (tp->sacked_out) {
printk(KERN_DEBUG "Leak s=%u %d\n",
tp->sacked_out, icsk->icsk_ca_state);
tp->sacked_out = 0;
}
if (tp->retrans_out) {
printk(KERN_DEBUG "Leak r=%u %d\n",
tp->retrans_out, icsk->icsk_ca_state);
tp->retrans_out = 0;
}
}
#endif
return flag;
}
static void tcp_ack_probe(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
/* Was it a usable window open? */
if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) {
icsk->icsk_backoff = 0;
inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
/* Socket must be waked up by subsequent tcp_data_snd_check().
* This function is not for random using!
*/
} else {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
TCP_RTO_MAX);
}
}
static inline int tcp_ack_is_dubious(const struct sock *sk, const int flag)
{
return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) ||
inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
}
static inline int tcp_may_raise_cwnd(const struct sock *sk, const int flag)
{
const struct tcp_sock *tp = tcp_sk(sk);
return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
!((1 << inet_csk(sk)->icsk_ca_state) & (TCPF_CA_Recovery | TCPF_CA_CWR));
}
/* Check that window update is acceptable.
* The function assumes that snd_una<=ack<=snd_next.
*/
static inline int tcp_may_update_window(const struct tcp_sock *tp,
const u32 ack, const u32 ack_seq,
const u32 nwin)
{
return after(ack, tp->snd_una) ||
after(ack_seq, tp->snd_wl1) ||
(ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd);
}
/* Update our send window.
*
* Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
* and in FreeBSD. NetBSD's one is even worse.) is wrong.
*/
static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack,
u32 ack_seq)
{
struct tcp_sock *tp = tcp_sk(sk);
int flag = 0;
u32 nwin = ntohs(tcp_hdr(skb)->window);
if (likely(!tcp_hdr(skb)->syn))
nwin <<= tp->rx_opt.snd_wscale;
if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
flag |= FLAG_WIN_UPDATE;
tcp_update_wl(tp, ack_seq);
if (tp->snd_wnd != nwin) {
tp->snd_wnd = nwin;
/* Note, it is the only place, where
* fast path is recovered for sending TCP.
*/
tp->pred_flags = 0;
tcp_fast_path_check(sk);
if (nwin > tp->max_window) {
tp->max_window = nwin;
tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie);
}
}
}
tp->snd_una = ack;
return flag;
}
/* A very conservative spurious RTO response algorithm: reduce cwnd and
* continue in congestion avoidance.
*/
static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
{
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
tp->snd_cwnd_cnt = 0;
tp->bytes_acked = 0;
TCP_ECN_queue_cwr(tp);
tcp_moderate_cwnd(tp);
}
/* A conservative spurious RTO response algorithm: reduce cwnd using
* rate halving and continue in congestion avoidance.
*/
static void tcp_ratehalving_spur_to_response(struct sock *sk)
{
tcp_enter_cwr(sk, 0);
}
static void tcp_undo_spur_to_response(struct sock *sk, int flag)
{
if (flag & FLAG_ECE)
tcp_ratehalving_spur_to_response(sk);
else
tcp_undo_cwr(sk, true);
}
/* F-RTO spurious RTO detection algorithm (RFC4138)
*
* F-RTO affects during two new ACKs following RTO (well, almost, see inline
* comments). State (ACK number) is kept in frto_counter. When ACK advances
* window (but not to or beyond highest sequence sent before RTO):
* On First ACK, send two new segments out.
* On Second ACK, RTO was likely spurious. Do spurious response (response
* algorithm is not part of the F-RTO detection algorithm
* given in RFC4138 but can be selected separately).
* Otherwise (basically on duplicate ACK), RTO was (likely) caused by a loss
* and TCP falls back to conventional RTO recovery. F-RTO allows overriding
* of Nagle, this is done using frto_counter states 2 and 3, when a new data
* segment of any size sent during F-RTO, state 2 is upgraded to 3.
*
* Rationale: if the RTO was spurious, new ACKs should arrive from the
* original window even after we transmit two new data segments.
*
* SACK version:
* on first step, wait until first cumulative ACK arrives, then move to
* the second step. In second step, the next ACK decides.
*
* F-RTO is implemented (mainly) in four functions:
* - tcp_use_frto() is used to determine if TCP is can use F-RTO
* - tcp_enter_frto() prepares TCP state on RTO if F-RTO is used, it is
* called when tcp_use_frto() showed green light
* - tcp_process_frto() handles incoming ACKs during F-RTO algorithm
* - tcp_enter_frto_loss() is called if there is not enough evidence
* to prove that the RTO is indeed spurious. It transfers the control
* from F-RTO to the conventional RTO recovery
*/
static int tcp_process_frto(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
tcp_verify_left_out(tp);
/* Duplicate the behavior from Loss state (fastretrans_alert) */
if (flag & FLAG_DATA_ACKED)
inet_csk(sk)->icsk_retransmits = 0;
if ((flag & FLAG_NONHEAD_RETRANS_ACKED) ||
((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED)))
tp->undo_marker = 0;
if (!before(tp->snd_una, tp->frto_highmark)) {
tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag);
return 1;
}
if (!tcp_is_sackfrto(tp)) {
/* RFC4138 shortcoming in step 2; should also have case c):
* ACK isn't duplicate nor advances window, e.g., opposite dir
* data, winupdate
*/
if (!(flag & FLAG_ANY_PROGRESS) && (flag & FLAG_NOT_DUP))
return 1;
if (!(flag & FLAG_DATA_ACKED)) {
tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 0 : 3),
flag);
return 1;
}
} else {
if (!(flag & FLAG_DATA_ACKED) && (tp->frto_counter == 1)) {
/* Prevent sending of new data. */
tp->snd_cwnd = min(tp->snd_cwnd,
tcp_packets_in_flight(tp));
return 1;
}
if ((tp->frto_counter >= 2) &&
(!(flag & FLAG_FORWARD_PROGRESS) ||
((flag & FLAG_DATA_SACKED) &&
!(flag & FLAG_ONLY_ORIG_SACKED)))) {
/* RFC4138 shortcoming (see comment above) */
if (!(flag & FLAG_FORWARD_PROGRESS) &&
(flag & FLAG_NOT_DUP))
return 1;
tcp_enter_frto_loss(sk, 3, flag);
return 1;
}
}
if (tp->frto_counter == 1) {
/* tcp_may_send_now needs to see updated state */
tp->snd_cwnd = tcp_packets_in_flight(tp) + 2;
tp->frto_counter = 2;
if (!tcp_may_send_now(sk))
tcp_enter_frto_loss(sk, 2, flag);
return 1;
} else {
switch (sysctl_tcp_frto_response) {
case 2:
tcp_undo_spur_to_response(sk, flag);
break;
case 1:
tcp_conservative_spur_to_response(tp);
break;
default:
tcp_ratehalving_spur_to_response(sk);
break;
}
tp->frto_counter = 0;
tp->undo_marker = 0;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS);
}
return 0;
}
/* This routine deals with incoming acks, but not outgoing ones. */
static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
u32 prior_snd_una = tp->snd_una;
u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
bool is_dupack = false;
u32 prior_in_flight;
u32 prior_fackets;
int prior_packets;
int prior_sacked = tp->sacked_out;
int pkts_acked = 0;
int newly_acked_sacked = 0;
int frto_cwnd = 0;
/* If the ack is older than previous acks
* then we can probably ignore it.
*/
if (before(ack, prior_snd_una))
goto old_ack;
/* If the ack includes data we haven't sent yet, discard
* this segment (RFC793 Section 3.9).
*/
if (after(ack, tp->snd_nxt))
goto invalid_ack;
if (after(ack, prior_snd_una))
flag |= FLAG_SND_UNA_ADVANCED;
if (sysctl_tcp_abc) {
if (icsk->icsk_ca_state < TCP_CA_CWR)
tp->bytes_acked += ack - prior_snd_una;
else if (icsk->icsk_ca_state == TCP_CA_Loss)
/* we assume just one segment left network */
tp->bytes_acked += min(ack - prior_snd_una,
tp->mss_cache);
}
prior_fackets = tp->fackets_out;
prior_in_flight = tcp_packets_in_flight(tp);
if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
/* Window is constant, pure forward advance.
* No more checks are required.
* Note, we use the fact that SND.UNA>=SND.WL2.
*/
tcp_update_wl(tp, ack_seq);
tp->snd_una = ack;
flag |= FLAG_WIN_UPDATE;
tcp_ca_event(sk, CA_EVENT_FAST_ACK);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS);
} else {
if (ack_seq != TCP_SKB_CB(skb)->end_seq)
flag |= FLAG_DATA;
else
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS);
flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
if (TCP_SKB_CB(skb)->sacked)
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
flag |= FLAG_ECE;
tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
}
/* We passed data and got it acked, remove any soft error
* log. Something worked...
*/
sk->sk_err_soft = 0;
icsk->icsk_probes_out = 0;
tp->rcv_tstamp = tcp_time_stamp;
prior_packets = tp->packets_out;
if (!prior_packets)
goto no_queue;
/* See if we can take anything off of the retransmit queue. */
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
pkts_acked = prior_packets - tp->packets_out;
newly_acked_sacked = (prior_packets - prior_sacked) -
(tp->packets_out - tp->sacked_out);
if (tp->frto_counter)
frto_cwnd = tcp_process_frto(sk, flag);
/* Guarantee sacktag reordering detection against wrap-arounds */
if (before(tp->frto_highmark, tp->snd_una))
tp->frto_highmark = 0;
if (tcp_ack_is_dubious(sk, flag)) {
/* Advance CWND, if state allows this. */
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd &&
tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, prior_in_flight);
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
is_dupack, flag);
} else {
if ((flag & FLAG_DATA_ACKED) && !frto_cwnd)
tcp_cong_avoid(sk, ack, prior_in_flight);
}
if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
dst_confirm(__sk_dst_get(sk));
return 1;
no_queue:
/* If data was DSACKed, see if we can undo a cwnd reduction. */
if (flag & FLAG_DSACKING_ACK)
tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
is_dupack, flag);
/* If this ack opens up a zero window, clear backoff. It was
* being used to time the probes, and is probably far higher than
* it needs to be for normal retransmission.
*/
if (tcp_send_head(sk))
tcp_ack_probe(sk);
return 1;
invalid_ack:
SOCK_DEBUG(sk, "Ack %u after %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
return -1;
old_ack:
/* If data was SACKed, tag it and see if we should send more data.
* If data was DSACKed, see if we can undo a cwnd reduction.
*/
if (TCP_SKB_CB(skb)->sacked) {
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
newly_acked_sacked = tp->sacked_out - prior_sacked;
tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked,
is_dupack, flag);
}
SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
return 0;
}
/* Look for tcp options. Normally only called on SYN and SYNACK packets.
* But, this can also be called on packets in the established flow when
* the fast version below fails.
*/
void tcp_parse_options(const struct sk_buff *skb, struct tcp_options_received *opt_rx,
const u8 **hvpp, int estab)
{
const unsigned char *ptr;
const struct tcphdr *th = tcp_hdr(skb);
int length = (th->doff * 4) - sizeof(struct tcphdr);
ptr = (const unsigned char *)(th + 1);
opt_rx->saw_tstamp = 0;
while (length > 0) {
int opcode = *ptr++;
int opsize;
switch (opcode) {
case TCPOPT_EOL:
return;
case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
length--;
continue;
default:
opsize = *ptr++;
if (opsize < 2) /* "silly options" */
return;
if (opsize > length)
return; /* don't parse partial options */
switch (opcode) {
case TCPOPT_MSS:
if (opsize == TCPOLEN_MSS && th->syn && !estab) {
u16 in_mss = get_unaligned_be16(ptr);
if (in_mss) {
if (opt_rx->user_mss &&
opt_rx->user_mss < in_mss)
in_mss = opt_rx->user_mss;
opt_rx->mss_clamp = in_mss;
}
}
break;
case TCPOPT_WINDOW:
if (opsize == TCPOLEN_WINDOW && th->syn &&
!estab && sysctl_tcp_window_scaling) {
__u8 snd_wscale = *(__u8 *)ptr;
opt_rx->wscale_ok = 1;
if (snd_wscale > 14) {
if (net_ratelimit())
printk(KERN_INFO "tcp_parse_options: Illegal window "
"scaling value %d >14 received.\n",
snd_wscale);
snd_wscale = 14;
}
opt_rx->snd_wscale = snd_wscale;
}
break;
case TCPOPT_TIMESTAMP:
if ((opsize == TCPOLEN_TIMESTAMP) &&
((estab && opt_rx->tstamp_ok) ||
(!estab && sysctl_tcp_timestamps))) {
opt_rx->saw_tstamp = 1;
opt_rx->rcv_tsval = get_unaligned_be32(ptr);
opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4);
}
break;
case TCPOPT_SACK_PERM:
if (opsize == TCPOLEN_SACK_PERM && th->syn &&
!estab && sysctl_tcp_sack) {
opt_rx->sack_ok = 1;
tcp_sack_reset(opt_rx);
}
break;
case TCPOPT_SACK:
if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
!((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
opt_rx->sack_ok) {
TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
}
break;
#ifdef CONFIG_TCP_MD5SIG
case TCPOPT_MD5SIG:
/*
* The MD5 Hash has already been
* checked (see tcp_v{4,6}_do_rcv()).
*/
break;
#endif
case TCPOPT_COOKIE:
/* This option is variable length.
*/
switch (opsize) {
case TCPOLEN_COOKIE_BASE:
/* not yet implemented */
break;
case TCPOLEN_COOKIE_PAIR:
/* not yet implemented */
break;
case TCPOLEN_COOKIE_MIN+0:
case TCPOLEN_COOKIE_MIN+2:
case TCPOLEN_COOKIE_MIN+4:
case TCPOLEN_COOKIE_MIN+6:
case TCPOLEN_COOKIE_MAX:
/* 16-bit multiple */
opt_rx->cookie_plus = opsize;
*hvpp = ptr;
break;
default:
/* ignore option */
break;
}
break;
}
ptr += opsize-2;
length -= opsize;
}
}
}
EXPORT_SYMBOL(tcp_parse_options);
static int tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th)
{
const __be32 *ptr = (const __be32 *)(th + 1);
if (*ptr == htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16)
| (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP)) {
tp->rx_opt.saw_tstamp = 1;
++ptr;
tp->rx_opt.rcv_tsval = ntohl(*ptr);
++ptr;
tp->rx_opt.rcv_tsecr = ntohl(*ptr);
return 1;
}
return 0;
}
/* Fast parse options. This hopes to only see timestamps.
* If it is wrong it falls back on tcp_parse_options().
*/
static int tcp_fast_parse_options(const struct sk_buff *skb,
const struct tcphdr *th,
struct tcp_sock *tp, const u8 **hvpp)
{
/* In the spirit of fast parsing, compare doff directly to constant
* values. Because equality is used, short doff can be ignored here.
*/
if (th->doff == (sizeof(*th) / 4)) {
tp->rx_opt.saw_tstamp = 0;
return 0;
} else if (tp->rx_opt.tstamp_ok &&
th->doff == ((sizeof(*th) + TCPOLEN_TSTAMP_ALIGNED) / 4)) {
if (tcp_parse_aligned_timestamp(tp, th))
return 1;
}
tcp_parse_options(skb, &tp->rx_opt, hvpp, 1);
return 1;
}
#ifdef CONFIG_TCP_MD5SIG
/*
* Parse MD5 Signature option
*/
const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
{
int length = (th->doff << 2) - sizeof(*th);
const u8 *ptr = (const u8 *)(th + 1);
/* If the TCP option is too short, we can short cut */
if (length < TCPOLEN_MD5SIG)
return NULL;
while (length > 0) {
int opcode = *ptr++;
int opsize;
switch(opcode) {
case TCPOPT_EOL:
return NULL;
case TCPOPT_NOP:
length--;
continue;
default:
opsize = *ptr++;
if (opsize < 2 || opsize > length)
return NULL;
if (opcode == TCPOPT_MD5SIG)
return opsize == TCPOLEN_MD5SIG ? ptr : NULL;
}
ptr += opsize - 2;
length -= opsize;
}
return NULL;
}
EXPORT_SYMBOL(tcp_parse_md5sig_option);
#endif
static inline void tcp_store_ts_recent(struct tcp_sock *tp)
{
tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
tp->rx_opt.ts_recent_stamp = get_seconds();
}
static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
{
if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) {
/* PAWS bug workaround wrt. ACK frames, the PAWS discard
* extra check below makes sure this can only happen
* for pure ACK frames. -DaveM
*
* Not only, also it occurs for expired timestamps.
*/
if (tcp_paws_check(&tp->rx_opt, 0))
tcp_store_ts_recent(tp);
}
}
/* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM
*
* It is not fatal. If this ACK does _not_ change critical state (seqs, window)
* it can pass through stack. So, the following predicate verifies that
* this segment is not used for anything but congestion avoidance or
* fast retransmit. Moreover, we even are able to eliminate most of such
* second order effects, if we apply some small "replay" window (~RTO)
* to timestamp space.
*
* All these measures still do not guarantee that we reject wrapped ACKs
* on networks with high bandwidth, when sequence space is recycled fastly,
* but it guarantees that such events will be very rare and do not affect
* connection seriously. This doesn't look nice, but alas, PAWS is really
* buggy extension.
*
* [ Later note. Even worse! It is buggy for segments _with_ data. RFC
* states that events when retransmit arrives after original data are rare.
* It is a blatant lie. VJ forgot about fast retransmit! 8)8) It is
* the biggest problem on large power networks even with minor reordering.
* OK, let's give it small replay window. If peer clock is even 1hz, it is safe
* up to bandwidth of 18Gigabit/sec. 8) ]
*/
static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct tcphdr *th = tcp_hdr(skb);
u32 seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
return (/* 1. Pure ACK with correct sequence number. */
(th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) &&
/* 2. ... and duplicate ACK. */
ack == tp->snd_una &&
/* 3. ... and does not update window. */
!tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) &&
/* 4. ... and sits in replay window. */
(s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= (inet_csk(sk)->icsk_rto * 1024) / HZ);
}
static inline int tcp_paws_discard(const struct sock *sk,
const struct sk_buff *skb)
{
const struct tcp_sock *tp = tcp_sk(sk);
return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) &&
!tcp_disordered_ack(sk, skb);
}
/* Check segment sequence number for validity.
*
* Segment controls are considered valid, if the segment
* fits to the window after truncation to the window. Acceptability
* of data (and SYN, FIN, of course) is checked separately.
* See tcp_data_queue(), for example.
*
* Also, controls (RST is main one) are accepted using RCV.WUP instead
* of RCV.NXT. Peer still did not advance his SND.UNA when we
* delayed ACK, so that hisSND.UNA<=ourRCV.WUP.
* (borrowed from freebsd)
*/
static inline int tcp_sequence(const struct tcp_sock *tp, u32 seq, u32 end_seq)
{
return !before(end_seq, tp->rcv_wup) &&
!after(seq, tp->rcv_nxt + tcp_receive_window(tp));
}
/* When we get a reset we do this. */
static void tcp_reset(struct sock *sk)
{
/* We want the right error as BSD sees it (and indeed as we do). */
switch (sk->sk_state) {
case TCP_SYN_SENT:
sk->sk_err = ECONNREFUSED;
break;
case TCP_CLOSE_WAIT:
sk->sk_err = EPIPE;
break;
case TCP_CLOSE:
return;
default:
sk->sk_err = ECONNRESET;
}
/* This barrier is coupled with smp_rmb() in tcp_poll() */
smp_wmb();
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_error_report(sk);
tcp_done(sk);
}
/*
* Process the FIN bit. This now behaves as it is supposed to work
* and the FIN takes effect when it is validly part of sequence
* space. Not before when we get holes.
*
* If we are ESTABLISHED, a received fin moves us to CLOSE-WAIT
* (and thence onto LAST-ACK and finally, CLOSE, we never enter
* TIME-WAIT)
*
* If we are in FINWAIT-1, a received FIN indicates simultaneous
* close and we go into CLOSING (and later onto TIME-WAIT)
*
* If we are in FINWAIT-2, a received FIN moves us to TIME-WAIT.
*/
static void tcp_fin(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
inet_csk_schedule_ack(sk);
sk->sk_shutdown |= RCV_SHUTDOWN;
sock_set_flag(sk, SOCK_DONE);
switch (sk->sk_state) {
case TCP_SYN_RECV:
case TCP_ESTABLISHED:
/* Move to CLOSE_WAIT */
tcp_set_state(sk, TCP_CLOSE_WAIT);
inet_csk(sk)->icsk_ack.pingpong = 1;
break;
case TCP_CLOSE_WAIT:
case TCP_CLOSING:
/* Received a retransmission of the FIN, do
* nothing.
*/
break;
case TCP_LAST_ACK:
/* RFC793: Remain in the LAST-ACK state. */
break;
case TCP_FIN_WAIT1:
/* This case occurs when a simultaneous close
* happens, we must ack the received FIN and
* enter the CLOSING state.
*/
tcp_send_ack(sk);
tcp_set_state(sk, TCP_CLOSING);
break;
case TCP_FIN_WAIT2:
/* Received a FIN -- send ACK and enter TIME_WAIT. */
tcp_send_ack(sk);
tcp_time_wait(sk, TCP_TIME_WAIT, 0);
break;
default:
/* Only TCP_LISTEN and TCP_CLOSE are left, in these
* cases we should never reach this piece of code.
*/
printk(KERN_ERR "%s: Impossible, sk->sk_state=%d\n",
__func__, sk->sk_state);
break;
}
/* It _is_ possible, that we have something out-of-order _after_ FIN.
* Probably, we should reset in this case. For now drop them.
*/
__skb_queue_purge(&tp->out_of_order_queue);
if (tcp_is_sack(tp))
tcp_sack_reset(&tp->rx_opt);
sk_mem_reclaim(sk);
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
/* Do not send POLL_HUP for half duplex close. */
if (sk->sk_shutdown == SHUTDOWN_MASK ||
sk->sk_state == TCP_CLOSE)
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
else
sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
}
static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq,
u32 end_seq)
{
if (!after(seq, sp->end_seq) && !after(sp->start_seq, end_seq)) {
if (before(seq, sp->start_seq))
sp->start_seq = seq;
if (after(end_seq, sp->end_seq))
sp->end_seq = end_seq;
return 1;
}
return 0;
}
static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
int mib_idx;
if (before(seq, tp->rcv_nxt))
mib_idx = LINUX_MIB_TCPDSACKOLDSENT;
else
mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
NET_INC_STATS_BH(sock_net(sk), mib_idx);
tp->rx_opt.dsack = 1;
tp->duplicate_sack[0].start_seq = seq;
tp->duplicate_sack[0].end_seq = end_seq;
}
}
static void tcp_dsack_extend(struct sock *sk, u32 seq, u32 end_seq)
{
struct tcp_sock *tp = tcp_sk(sk);
if (!tp->rx_opt.dsack)
tcp_dsack_set(sk, seq, end_seq);
else
tcp_sack_extend(tp->duplicate_sack, seq, end_seq);
}
static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
tcp_enter_quickack_mode(sk);
if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))
end_seq = tp->rcv_nxt;
tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, end_seq);
}
}
tcp_send_ack(sk);
}
/* These routines update the SACK block as out-of-order packets arrive or
* in-order packets close up the sequence space.
*/
static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
{
int this_sack;
struct tcp_sack_block *sp = &tp->selective_acks[0];
struct tcp_sack_block *swalk = sp + 1;
/* See if the recent change to the first SACK eats into
* or hits the sequence space of other SACK blocks, if so coalesce.
*/
for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) {
if (tcp_sack_extend(sp, swalk->start_seq, swalk->end_seq)) {
int i;
/* Zap SWALK, by moving every further SACK up by one slot.
* Decrease num_sacks.
*/
tp->rx_opt.num_sacks--;
for (i = this_sack; i < tp->rx_opt.num_sacks; i++)
sp[i] = sp[i + 1];
continue;
}
this_sack++, swalk++;
}
}
static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_sack_block *sp = &tp->selective_acks[0];
int cur_sacks = tp->rx_opt.num_sacks;
int this_sack;
if (!cur_sacks)
goto new_sack;
for (this_sack = 0; this_sack < cur_sacks; this_sack++, sp++) {
if (tcp_sack_extend(sp, seq, end_seq)) {
/* Rotate this_sack to the first one. */
for (; this_sack > 0; this_sack--, sp--)
swap(*sp, *(sp - 1));
if (cur_sacks > 1)
tcp_sack_maybe_coalesce(tp);
return;
}
}
/* Could not find an adjacent existing SACK, build a new one,
* put it at the front, and shift everyone else down. We
* always know there is at least one SACK present already here.
*
* If the sack array is full, forget about the last one.
*/
if (this_sack >= TCP_NUM_SACKS) {
this_sack--;
tp->rx_opt.num_sacks--;
sp--;
}
for (; this_sack > 0; this_sack--, sp--)
*sp = *(sp - 1);
new_sack:
/* Build the new head SACK, and we're done. */
sp->start_seq = seq;
sp->end_seq = end_seq;
tp->rx_opt.num_sacks++;
}
/* RCV.NXT advances, some SACKs should be eaten. */
static void tcp_sack_remove(struct tcp_sock *tp)
{
struct tcp_sack_block *sp = &tp->selective_acks[0];
int num_sacks = tp->rx_opt.num_sacks;
int this_sack;
/* Empty ofo queue, hence, all the SACKs are eaten. Clear. */
if (skb_queue_empty(&tp->out_of_order_queue)) {
tp->rx_opt.num_sacks = 0;
return;
}
for (this_sack = 0; this_sack < num_sacks;) {
/* Check if the start of the sack is covered by RCV.NXT. */
if (!before(tp->rcv_nxt, sp->start_seq)) {
int i;
/* RCV.NXT must cover all the block! */
WARN_ON(before(tp->rcv_nxt, sp->end_seq));
/* Zap this SACK, by moving forward any other SACKS. */
for (i=this_sack+1; i < num_sacks; i++)
tp->selective_acks[i-1] = tp->selective_acks[i];
num_sacks--;
continue;
}
this_sack++;
sp++;
}
tp->rx_opt.num_sacks = num_sacks;
}
/* This one checks to see if we can put data from the
* out_of_order queue into the receive_queue.
*/
static void tcp_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
__u32 dsack_high = tp->rcv_nxt;
struct sk_buff *skb;
while ((skb = skb_peek(&tp->out_of_order_queue)) != NULL) {
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
break;
if (before(TCP_SKB_CB(skb)->seq, dsack_high)) {
__u32 dsack = dsack_high;
if (before(TCP_SKB_CB(skb)->end_seq, dsack_high))
dsack_high = TCP_SKB_CB(skb)->end_seq;
tcp_dsack_extend(sk, TCP_SKB_CB(skb)->seq, dsack);
}
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
SOCK_DEBUG(sk, "ofo packet was already received\n");
__skb_unlink(skb, &tp->out_of_order_queue);
__kfree_skb(skb);
continue;
}
SOCK_DEBUG(sk, "ofo requeuing : rcv_next %X seq %X - %X\n",
tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
TCP_SKB_CB(skb)->end_seq);
__skb_unlink(skb, &tp->out_of_order_queue);
__skb_queue_tail(&sk->sk_receive_queue, skb);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if (tcp_hdr(skb)->fin)
tcp_fin(sk);
}
}
static int tcp_prune_ofo_queue(struct sock *sk);
static int tcp_prune_queue(struct sock *sk);
static inline int tcp_try_rmem_schedule(struct sock *sk, unsigned int size)
{
if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf ||
!sk_rmem_schedule(sk, size)) {
if (tcp_prune_queue(sk) < 0)
return -1;
if (!sk_rmem_schedule(sk, size)) {
if (!tcp_prune_ofo_queue(sk))
return -1;
if (!sk_rmem_schedule(sk, size))
return -1;
}
}
return 0;
}
static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
{
const struct tcphdr *th = tcp_hdr(skb);
struct tcp_sock *tp = tcp_sk(sk);
int eaten = -1;
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
goto drop;
skb_dst_drop(skb);
__skb_pull(skb, th->doff * 4);
TCP_ECN_accept_cwr(tp, skb);
tp->rx_opt.dsack = 0;
/* Queue data for delivery to the user.
* Packets in sequence go to the receive queue.
* Out of sequence packets to the out_of_order_queue.
*/
if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
if (tcp_receive_window(tp) == 0)
goto out_of_window;
/* Ok. In sequence. In window. */
if (tp->ucopy.task == current &&
tp->copied_seq == tp->rcv_nxt && tp->ucopy.len &&
sock_owned_by_user(sk) && !tp->urg_data) {
int chunk = min_t(unsigned int, skb->len,
tp->ucopy.len);
__set_current_state(TASK_RUNNING);
local_bh_enable();
if (!skb_copy_datagram_iovec(skb, 0, tp->ucopy.iov, chunk)) {
tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
eaten = (chunk == skb->len);
tcp_rcv_space_adjust(sk);
}
local_bh_disable();
}
if (eaten <= 0) {
queue_and_out:
if (eaten < 0 &&
tcp_try_rmem_schedule(sk, skb->truesize))
goto drop;
skb_set_owner_r(skb, sk);
__skb_queue_tail(&sk->sk_receive_queue, skb);
}
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
if (skb->len)
tcp_event_data_recv(sk, skb);
if (th->fin)
tcp_fin(sk);
if (!skb_queue_empty(&tp->out_of_order_queue)) {
tcp_ofo_queue(sk);
/* RFC2581. 4.2. SHOULD send immediate ACK, when
* gap in queue is filled.
*/
if (skb_queue_empty(&tp->out_of_order_queue))
inet_csk(sk)->icsk_ack.pingpong = 0;
}
if (tp->rx_opt.num_sacks)
tcp_sack_remove(tp);
tcp_fast_path_check(sk);
if (eaten > 0)
__kfree_skb(skb);
else if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, 0);
return;
}
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
/* A retransmit, 2nd most common case. Force an immediate ack. */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
out_of_window:
tcp_enter_quickack_mode(sk);
inet_csk_schedule_ack(sk);
drop:
__kfree_skb(skb);
return;
}
/* Out of window. F.e. zero window probe. */
if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
goto out_of_window;
tcp_enter_quickack_mode(sk);
if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
/* Partial packet, seq < rcv_next < end_seq */
SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
tp->rcv_nxt, TCP_SKB_CB(skb)->seq,
TCP_SKB_CB(skb)->end_seq);
tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt);
/* If window is closed, drop tail of packet. But after
* remembering D-SACK for its head made in previous line.
*/
if (!tcp_receive_window(tp))
goto out_of_window;
goto queue_and_out;
}
TCP_ECN_check_ce(tp, skb);
if (tcp_try_rmem_schedule(sk, skb->truesize))
goto drop;
/* Disable header prediction. */
tp->pred_flags = 0;
inet_csk_schedule_ack(sk);
SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
skb_set_owner_r(skb, sk);
if (!skb_peek(&tp->out_of_order_queue)) {
/* Initial out of order segment, build 1 SACK. */
if (tcp_is_sack(tp)) {
tp->rx_opt.num_sacks = 1;
tp->selective_acks[0].start_seq = TCP_SKB_CB(skb)->seq;
tp->selective_acks[0].end_seq =
TCP_SKB_CB(skb)->end_seq;
}
__skb_queue_head(&tp->out_of_order_queue, skb);
} else {
struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue);
u32 seq = TCP_SKB_CB(skb)->seq;
u32 end_seq = TCP_SKB_CB(skb)->end_seq;
if (seq == TCP_SKB_CB(skb1)->end_seq) {
__skb_queue_after(&tp->out_of_order_queue, skb1, skb);
if (!tp->rx_opt.num_sacks ||
tp->selective_acks[0].end_seq != seq)
goto add_sack;
/* Common case: data arrive in order after hole. */
tp->selective_acks[0].end_seq = end_seq;
return;
}
/* Find place to insert this segment. */
while (1) {
if (!after(TCP_SKB_CB(skb1)->seq, seq))
break;
if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) {
skb1 = NULL;
break;
}
skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1);
}
/* Do skb overlap to previous one? */
if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
/* All the bits are present. Drop. */
__kfree_skb(skb);
tcp_dsack_set(sk, seq, end_seq);
goto add_sack;
}
if (after(seq, TCP_SKB_CB(skb1)->seq)) {
/* Partial overlap. */
tcp_dsack_set(sk, seq,
TCP_SKB_CB(skb1)->end_seq);
} else {
if (skb_queue_is_first(&tp->out_of_order_queue,
skb1))
skb1 = NULL;
else
skb1 = skb_queue_prev(
&tp->out_of_order_queue,
skb1);
}
}
if (!skb1)
__skb_queue_head(&tp->out_of_order_queue, skb);
else
__skb_queue_after(&tp->out_of_order_queue, skb1, skb);
/* And clean segments covered by new one as whole. */
while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) {
skb1 = skb_queue_next(&tp->out_of_order_queue, skb);
if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
break;
if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
end_seq);
break;
}
__skb_unlink(skb1, &tp->out_of_order_queue);
tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
TCP_SKB_CB(skb1)->end_seq);
__kfree_skb(skb1);
}
add_sack:
if (tcp_is_sack(tp))
tcp_sack_new_ofo_skb(sk, seq, end_seq);
}
}
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
struct sk_buff_head *list)
{
struct sk_buff *next = NULL;
if (!skb_queue_is_last(list, skb))
next = skb_queue_next(list, skb);
__skb_unlink(skb, list);
__kfree_skb(skb);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
return next;
}
/* Collapse contiguous sequence of skbs head..tail with
* sequence numbers start..end.
*
* If tail is NULL, this means until the end of the list.
*
* Segments with FIN/SYN are not collapsed (only because this
* simplifies code)
*/
static void
tcp_collapse(struct sock *sk, struct sk_buff_head *list,
struct sk_buff *head, struct sk_buff *tail,
u32 start, u32 end)
{
struct sk_buff *skb, *n;
bool end_of_skbs;
/* First, check that queue is collapsible and find
* the point where collapsing can be useful. */
skb = head;
restart:
end_of_skbs = true;
skb_queue_walk_from_safe(list, skb, n) {
if (skb == tail)
break;
/* No new bits? It is possible on ofo queue. */
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
skb = tcp_collapse_one(sk, skb, list);
if (!skb)
break;
goto restart;
}
/* The first skb to collapse is:
* - not SYN/FIN and
* - bloated or contains data before "start" or
* overlaps to the next one.
*/
if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
(tcp_win_from_space(skb->truesize) > skb->len ||
before(TCP_SKB_CB(skb)->seq, start))) {
end_of_skbs = false;
break;
}
if (!skb_queue_is_last(list, skb)) {
struct sk_buff *next = skb_queue_next(list, skb);
if (next != tail &&
TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) {
end_of_skbs = false;
break;
}
}
/* Decided to skip this, advance start seq. */
start = TCP_SKB_CB(skb)->end_seq;
}
if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
return;
while (before(start, end)) {
struct sk_buff *nskb;
unsigned int header = skb_headroom(skb);
int copy = SKB_MAX_ORDER(header, 0);
/* Too big header? This can happen with IPv6. */
if (copy < 0)
return;
if (end - start < copy)
copy = end - start;
nskb = alloc_skb(copy + header, GFP_ATOMIC);
if (!nskb)
return;
skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head);
skb_set_network_header(nskb, (skb_network_header(skb) -
skb->head));
skb_set_transport_header(nskb, (skb_transport_header(skb) -
skb->head));
skb_reserve(nskb, header);
memcpy(nskb->head, skb->head, header);
memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
__skb_queue_before(list, skb, nskb);
skb_set_owner_r(nskb, sk);
/* Copy data, releasing collapsed skbs. */
while (copy > 0) {
int offset = start - TCP_SKB_CB(skb)->seq;
int size = TCP_SKB_CB(skb)->end_seq - start;
BUG_ON(offset < 0);
if (size > 0) {
size = min(copy, size);
if (skb_copy_bits(skb, offset, skb_put(nskb, size), size))
BUG();
TCP_SKB_CB(nskb)->end_seq += size;
copy -= size;
start += size;
}
if (!before(start, TCP_SKB_CB(skb)->end_seq)) {
skb = tcp_collapse_one(sk, skb, list);
if (!skb ||
skb == tail ||
tcp_hdr(skb)->syn ||
tcp_hdr(skb)->fin)
return;
}
}
}
}
/* Collapse ofo queue. Algorithm: select contiguous sequence of skbs
* and tcp_collapse() them until all the queue is collapsed.
*/
static void tcp_collapse_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = skb_peek(&tp->out_of_order_queue);
struct sk_buff *head;
u32 start, end;
if (skb == NULL)
return;
start = TCP_SKB_CB(skb)->seq;
end = TCP_SKB_CB(skb)->end_seq;
head = skb;
for (;;) {
struct sk_buff *next = NULL;
if (!skb_queue_is_last(&tp->out_of_order_queue, skb))
next = skb_queue_next(&tp->out_of_order_queue, skb);
skb = next;
/* Segment is terminated when we see gap or when
* we are at the end of all the queue. */
if (!skb ||
after(TCP_SKB_CB(skb)->seq, end) ||
before(TCP_SKB_CB(skb)->end_seq, start)) {
tcp_collapse(sk, &tp->out_of_order_queue,
head, skb, start, end);
head = skb;
if (!skb)
break;
/* Start new segment */
start = TCP_SKB_CB(skb)->seq;
end = TCP_SKB_CB(skb)->end_seq;
} else {
if (before(TCP_SKB_CB(skb)->seq, start))
start = TCP_SKB_CB(skb)->seq;
if (after(TCP_SKB_CB(skb)->end_seq, end))
end = TCP_SKB_CB(skb)->end_seq;
}
}
}
/*
* Purge the out-of-order queue.
* Return true if queue was pruned.
*/
static int tcp_prune_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
int res = 0;
if (!skb_queue_empty(&tp->out_of_order_queue)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED);
__skb_queue_purge(&tp->out_of_order_queue);
/* Reset SACK state. A conforming SACK implementation will
* do the same at a timeout based retransmit. When a connection
* is in a sad state like this, we care only about integrity
* of the connection not performance.
*/
if (tp->rx_opt.sack_ok)
tcp_sack_reset(&tp->rx_opt);
sk_mem_reclaim(sk);
res = 1;
}
return res;
}
/* Reduce allocated memory if we can, trying to get
* the socket within its memory limits again.
*
* Return less than zero if we should start dropping frames
* until the socket owning process reads some of the data
* to stabilize the situation.
*/
static int tcp_prune_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED);
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
tcp_clamp_window(sk);
else if (tcp_memory_pressure)
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
tcp_collapse_ofo_queue(sk);
if (!skb_queue_empty(&sk->sk_receive_queue))
tcp_collapse(sk, &sk->sk_receive_queue,
skb_peek(&sk->sk_receive_queue),
NULL,
tp->copied_seq, tp->rcv_nxt);
sk_mem_reclaim(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
return 0;
/* Collapsing did not help, destructive actions follow.
* This must not ever occur. */
tcp_prune_ofo_queue(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
return 0;
/* If we are really being abused, tell the caller to silently
* drop receive data on the floor. It will get retransmitted
* and hopefully then we'll have sufficient space.
*/
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED);
/* Massive buffer overcommit. */
tp->pred_flags = 0;
return -1;
}
/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
* As additional protections, we do not touch cwnd in retransmission phases,
* and if application hit its sndbuf limit recently.
*/
void tcp_cwnd_application_limited(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
/* Limited by application or receiver window. */
u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
u32 win_used = max(tp->snd_cwnd_used, init_win);
if (win_used < tp->snd_cwnd) {
tp->snd_ssthresh = tcp_current_ssthresh(sk);
tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
}
tp->snd_cwnd_used = 0;
}
tp->snd_cwnd_stamp = tcp_time_stamp;
}
static int tcp_should_expand_sndbuf(const struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
/* If the user specified a specific send buffer setting, do
* not modify it.
*/
if (sk->sk_userlocks & SOCK_SNDBUF_LOCK)
return 0;
/* If we are under global TCP memory pressure, do not expand. */
if (tcp_memory_pressure)
return 0;
/* If we are under soft global TCP memory pressure, do not expand. */
if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
return 0;
/* If we filled the congestion window, do not expand. */
if (tp->packets_out >= tp->snd_cwnd)
return 0;
return 1;
}
/* When incoming ACK allowed to free some skb from write_queue,
* we remember this event in flag SOCK_QUEUE_SHRUNK and wake up socket
* on the exit from tcp input handler.
*
* PROBLEM: sndbuf expansion does not work well with largesend.
*/
static void tcp_new_space(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_should_expand_sndbuf(sk)) {
int sndmem = SKB_TRUESIZE(max_t(u32,
tp->rx_opt.mss_clamp,
tp->mss_cache) +
MAX_TCP_HEADER);
int demanded = max_t(unsigned int, tp->snd_cwnd,
tp->reordering + 1);
sndmem *= 2 * demanded;
if (sndmem > sk->sk_sndbuf)
sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]);
tp->snd_cwnd_stamp = tcp_time_stamp;
}
sk->sk_write_space(sk);
}
static void tcp_check_space(struct sock *sk)
{
if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
if (sk->sk_socket &&
test_bit(SOCK_NOSPACE, &sk->sk_socket->flags))
tcp_new_space(sk);
}
}
static inline void tcp_data_snd_check(struct sock *sk)
{
tcp_push_pending_frames(sk);
tcp_check_space(sk);
}
/*
* Check if sending an ack is needed.
*/
static void __tcp_ack_snd_check(struct sock *sk, int ofo_possible)
{
struct tcp_sock *tp = tcp_sk(sk);
/* More than one full frame received... */
if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss &&
/* ... and right edge of window advances far enough.
* (tcp_recvmsg() will send ACK otherwise). Or...
*/
__tcp_select_window(sk) >= tp->rcv_wnd) ||
/* We ACK each frame or... */
tcp_in_quickack_mode(sk) ||
/* We have out of order data. */
(ofo_possible && skb_peek(&tp->out_of_order_queue))) {
/* Then ack it now */
tcp_send_ack(sk);
} else {
/* Else, send delayed ack. */
tcp_send_delayed_ack(sk);
}
}
static inline void tcp_ack_snd_check(struct sock *sk)
{
if (!inet_csk_ack_scheduled(sk)) {
/* We sent a data segment already. */
return;
}
__tcp_ack_snd_check(sk, 1);
}
/*
* This routine is only called when we have urgent data
* signaled. Its the 'slow' part of tcp_urg. It could be
* moved inline now as tcp_urg is only called from one
* place. We handle URGent data wrong. We have to - as
* BSD still doesn't use the correction from RFC961.
* For 1003.1g we should support a new option TCP_STDURG to permit
* either form (or just set the sysctl tcp_stdurg).
*/
static void tcp_check_urg(struct sock *sk, const struct tcphdr *th)
{
struct tcp_sock *tp = tcp_sk(sk);
u32 ptr = ntohs(th->urg_ptr);
if (ptr && !sysctl_tcp_stdurg)
ptr--;
ptr += ntohl(th->seq);
/* Ignore urgent data that we've already seen and read. */
if (after(tp->copied_seq, ptr))
return;
/* Do not replay urg ptr.
*
* NOTE: interesting situation not covered by specs.
* Misbehaving sender may send urg ptr, pointing to segment,
* which we already have in ofo queue. We are not able to fetch
* such data and will stay in TCP_URG_NOTYET until will be eaten
* by recvmsg(). Seems, we are not obliged to handle such wicked
* situations. But it is worth to think about possibility of some
* DoSes using some hypothetical application level deadlock.
*/
if (before(ptr, tp->rcv_nxt))
return;
/* Do we already have a newer (or duplicate) urgent pointer? */
if (tp->urg_data && !after(ptr, tp->urg_seq))
return;
/* Tell the world about our new urgent pointer. */
sk_send_sigurg(sk);
/* We may be adding urgent data when the last byte read was
* urgent. To do this requires some care. We cannot just ignore
* tp->copied_seq since we would read the last urgent byte again
* as data, nor can we alter copied_seq until this data arrives
* or we break the semantics of SIOCATMARK (and thus sockatmark())
*
* NOTE. Double Dutch. Rendering to plain English: author of comment
* above did something sort of send("A", MSG_OOB); send("B", MSG_OOB);
* and expect that both A and B disappear from stream. This is _wrong_.
* Though this happens in BSD with high probability, this is occasional.
* Any application relying on this is buggy. Note also, that fix "works"
* only in this artificial test. Insert some normal data between A and B and we will
* decline of BSD again. Verdict: it is better to remove to trap
* buggy users.
*/
if (tp->urg_seq == tp->copied_seq && tp->urg_data &&
!sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) {
struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
tp->copied_seq++;
if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) {
__skb_unlink(skb, &sk->sk_receive_queue);
__kfree_skb(skb);
}
}
tp->urg_data = TCP_URG_NOTYET;
tp->urg_seq = ptr;
/* Disable header prediction. */
tp->pred_flags = 0;
}
/* This is the 'fast' part of urgent handling. */
static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *th)
{
struct tcp_sock *tp = tcp_sk(sk);
/* Check if we get a new urgent pointer - normally not. */
if (th->urg)
tcp_check_urg(sk, th);
/* Do we wait for any urgent data? - normally not... */
if (tp->urg_data == TCP_URG_NOTYET) {
u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) -
th->syn;
/* Is the urgent pointer pointing into this packet? */
if (ptr < skb->len) {
u8 tmp;
if (skb_copy_bits(skb, ptr, &tmp, 1))
BUG();
tp->urg_data = TCP_URG_VALID | tmp;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_data_ready(sk, 0);
}
}
}
static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
{
struct tcp_sock *tp = tcp_sk(sk);
int chunk = skb->len - hlen;
int err;
local_bh_enable();
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, hlen, tp->ucopy.iov, chunk);
else
err = skb_copy_and_csum_datagram_iovec(skb, hlen,
tp->ucopy.iov);
if (!err) {
tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
tcp_rcv_space_adjust(sk);
}
local_bh_disable();
return err;
}
static __sum16 __tcp_checksum_complete_user(struct sock *sk,
struct sk_buff *skb)
{
__sum16 result;
if (sock_owned_by_user(sk)) {
local_bh_enable();
result = __tcp_checksum_complete(skb);
local_bh_disable();
} else {
result = __tcp_checksum_complete(skb);
}
return result;
}
static inline int tcp_checksum_complete_user(struct sock *sk,
struct sk_buff *skb)
{
return !skb_csum_unnecessary(skb) &&
__tcp_checksum_complete_user(sk, skb);
}
#ifdef CONFIG_NET_DMA
static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
int hlen)
{
struct tcp_sock *tp = tcp_sk(sk);
int chunk = skb->len - hlen;
int dma_cookie;
int copied_early = 0;
if (tp->ucopy.wakeup)
return 0;
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
dma_cookie = dma_skb_copy_datagram_iovec(tp->ucopy.dma_chan,
skb, hlen,
tp->ucopy.iov, chunk,
tp->ucopy.pinned_list);
if (dma_cookie < 0)
goto out;
tp->ucopy.dma_cookie = dma_cookie;
copied_early = 1;
tp->ucopy.len -= chunk;
tp->copied_seq += chunk;
tcp_rcv_space_adjust(sk);
if ((tp->ucopy.len == 0) ||
(tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
(atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
tp->ucopy.wakeup = 1;
sk->sk_data_ready(sk, 0);
}
} else if (chunk > 0) {
tp->ucopy.wakeup = 1;
sk->sk_data_ready(sk, 0);
}
out:
return copied_early;
}
#endif /* CONFIG_NET_DMA */
/* Does PAWS and seqno based validation of an incoming segment, flags will
* play significant role here.
*/
static int tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, int syn_inerr)
{
const u8 *hash_location;
struct tcp_sock *tp = tcp_sk(sk);
/* RFC1323: H1. Apply PAWS check first. */
if (tcp_fast_parse_options(skb, th, tp, &hash_location) &&
tp->rx_opt.saw_tstamp &&
tcp_paws_discard(sk, skb)) {
if (!th->rst) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
tcp_send_dupack(sk, skb);
goto discard;
}
/* Reset is accepted even if it did not pass PAWS. */
}
/* Step 1: check sequence number */
if (!tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)) {
/* RFC793, page 37: "In all states except SYN-SENT, all reset
* (RST) segments are validated by checking their SEQ-fields."
* And page 69: "If an incoming segment is not acceptable,
* an acknowledgment should be sent in reply (unless the RST
* bit is set, if so drop the segment and return)".
*/
if (!th->rst)
tcp_send_dupack(sk, skb);
goto discard;
}
/* Step 2: check RST bit */
if (th->rst) {
tcp_reset(sk);
goto discard;
}
/* ts_recent update must be made after we are sure that the packet
* is in window.
*/
tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq);
/* step 3: check security and precedence [ignored] */
/* step 4: Check for a SYN in window. */
if (th->syn && !before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
if (syn_inerr)
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONSYN);
tcp_reset(sk);
return -1;
}
return 1;
discard:
__kfree_skb(skb);
return 0;
}
/*
* TCP receive function for the ESTABLISHED state.
*
* It is split into a fast path and a slow path. The fast path is
* disabled when:
* - A zero window was announced from us - zero window probing
* is only handled properly in the slow path.
* - Out of order segments arrived.
* - Urgent data is expected.
* - There is no buffer space left
* - Unexpected TCP flags/window values/header lengths are received
* (detected by checking the TCP header against pred_flags)
* - Data is sent in both directions. Fast path only supports pure senders
* or pure receivers (this means either the sequence number or the ack
* value must stay constant)
* - Unexpected TCP option.
*
* When these conditions are not satisfied it drops into a standard
* receive procedure patterned after RFC793 to handle all cases.
* The first three cases are guaranteed by proper pred_flags setting,
* the rest is checked inline. Fast processing is turned on in
* tcp_data_queue when everything is OK.
*/
int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len)
{
struct tcp_sock *tp = tcp_sk(sk);
int res;
/*
* Header prediction.
* The code loosely follows the one in the famous
* "30 instruction TCP receive" Van Jacobson mail.
*
* Van's trick is to deposit buffers into socket queue
* on a device interrupt, to call tcp_recv function
* on the receive process context and checksum and copy
* the buffer to user space. smart...
*
* Our current scheme is not silly either but we take the
* extra cost of the net_bh soft interrupt processing...
* We do checksum and copy also but from device to kernel.
*/
tp->rx_opt.saw_tstamp = 0;
/* pred_flags is 0xS?10 << 16 + snd_wnd
* if header_prediction is to be made
* 'S' will always be tp->tcp_header_len >> 2
* '?' will be 0 for the fast path, otherwise pred_flags is 0 to
* turn it off (when there are holes in the receive
* space for instance)
* PSH flag is ignored.
*/
if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags &&
TCP_SKB_CB(skb)->seq == tp->rcv_nxt &&
!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) {
int tcp_header_len = tp->tcp_header_len;
/* Timestamp header prediction: tcp_header_len
* is automatically equal to th->doff*4 due to pred_flags
* match.
*/
/* Check timestamp */
if (tcp_header_len == sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) {
/* No? Slow path! */
if (!tcp_parse_aligned_timestamp(tp, th))
goto slow_path;
/* If PAWS failed, check it more carefully in slow path */
if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0)
goto slow_path;
/* DO NOT update ts_recent here, if checksum fails
* and timestamp was corrupted part, it will result
* in a hung connection since we will drop all
* future packets due to the PAWS test.
*/
}
if (len <= tcp_header_len) {
/* Bulk data transfer: sender */
if (len == tcp_header_len) {
/* Predicted packet is in window by definition.
* seq == rcv_nxt and rcv_wup <= rcv_nxt.
* Hence, check seq<=rcv_wup reduces to:
*/
if (tcp_header_len ==
(sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
tp->rcv_nxt == tp->rcv_wup)
tcp_store_ts_recent(tp);
/* We know that such packets are checksummed
* on entry.
*/
tcp_ack(sk, skb, 0);
__kfree_skb(skb);
tcp_data_snd_check(sk);
return 0;
} else { /* Header too small */
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
goto discard;
}
} else {
int eaten = 0;
int copied_early = 0;
if (tp->copied_seq == tp->rcv_nxt &&
len - tcp_header_len <= tp->ucopy.len) {
#ifdef CONFIG_NET_DMA
if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
copied_early = 1;
eaten = 1;
}
#endif
if (tp->ucopy.task == current &&
sock_owned_by_user(sk) && !copied_early) {
__set_current_state(TASK_RUNNING);
if (!tcp_copy_to_iovec(sk, skb, tcp_header_len))
eaten = 1;
}
if (eaten) {
/* Predicted packet is in window by definition.
* seq == rcv_nxt and rcv_wup <= rcv_nxt.
* Hence, check seq<=rcv_wup reduces to:
*/
if (tcp_header_len ==
(sizeof(struct tcphdr) +
TCPOLEN_TSTAMP_ALIGNED) &&
tp->rcv_nxt == tp->rcv_wup)
tcp_store_ts_recent(tp);
tcp_rcv_rtt_measure_ts(sk, skb);
__skb_pull(skb, tcp_header_len);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
}
if (copied_early)
tcp_cleanup_rbuf(sk, skb->len);
}
if (!eaten) {
if (tcp_checksum_complete_user(sk, skb))
goto csum_error;
/* Predicted packet is in window by definition.
* seq == rcv_nxt and rcv_wup <= rcv_nxt.
* Hence, check seq<=rcv_wup reduces to:
*/
if (tcp_header_len ==
(sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED) &&
tp->rcv_nxt == tp->rcv_wup)
tcp_store_ts_recent(tp);
tcp_rcv_rtt_measure_ts(sk, skb);
if ((int)skb->truesize > sk->sk_forward_alloc)
goto step5;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS);
/* Bulk data transfer: receiver */
__skb_pull(skb, tcp_header_len);
__skb_queue_tail(&sk->sk_receive_queue, skb);
skb_set_owner_r(skb, sk);
tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
}
tcp_event_data_recv(sk, skb);
if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) {
/* Well, only one small jumplet in fast path... */
tcp_ack(sk, skb, FLAG_DATA);
tcp_data_snd_check(sk);
if (!inet_csk_ack_scheduled(sk))
goto no_ack;
}
if (!copied_early || tp->rcv_nxt != tp->rcv_wup)
__tcp_ack_snd_check(sk, 0);
no_ack:
#ifdef CONFIG_NET_DMA
if (copied_early)
__skb_queue_tail(&sk->sk_async_wait_queue, skb);
else
#endif
if (eaten)
__kfree_skb(skb);
else
sk->sk_data_ready(sk, 0);
return 0;
}
}
slow_path:
if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
goto csum_error;
/*
* Standard slow path.
*/
res = tcp_validate_incoming(sk, skb, th, 1);
if (res <= 0)
return -res;
step5:
if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0)
goto discard;
tcp_rcv_rtt_measure_ts(sk, skb);
/* Process urgent data. */
tcp_urg(sk, skb, th);
/* step 7: process the segment text */
tcp_data_queue(sk, skb);
tcp_data_snd_check(sk);
tcp_ack_snd_check(sk);
return 0;
csum_error:
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
discard:
__kfree_skb(skb);
return 0;
}
EXPORT_SYMBOL(tcp_rcv_established);
static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len)
{
const u8 *hash_location;
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_cookie_values *cvp = tp->cookie_values;
int saved_clamp = tp->rx_opt.mss_clamp;
tcp_parse_options(skb, &tp->rx_opt, &hash_location, 0);
if (th->ack) {
/* rfc793:
* "If the state is SYN-SENT then
* first check the ACK bit
* If the ACK bit is set
* If SEG.ACK =< ISS, or SEG.ACK > SND.NXT, send
* a reset (unless the RST bit is set, if so drop
* the segment and return)"
*
* We do not send data with SYN, so that RFC-correct
* test reduces to:
*/
if (TCP_SKB_CB(skb)->ack_seq != tp->snd_nxt)
goto reset_and_undo;
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
!between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
tcp_time_stamp)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED);
goto reset_and_undo;
}
/* Now ACK is acceptable.
*
* "If the RST bit is set
* If the ACK was acceptable then signal the user "error:
* connection reset", drop the segment, enter CLOSED state,
* delete TCB, and return."
*/
if (th->rst) {
tcp_reset(sk);
goto discard;
}
/* rfc793:
* "fifth, if neither of the SYN or RST bits is set then
* drop the segment and return."
*
* See note below!
* --ANK(990513)
*/
if (!th->syn)
goto discard_and_undo;
/* rfc793:
* "If the SYN bit is on ...
* are acceptable then ...
* (our SYN has been ACKed), change the connection
* state to ESTABLISHED..."
*/
TCP_ECN_rcv_synack(tp, th);
tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
tcp_ack(sk, skb, FLAG_SLOWPATH);
/* Ok.. it's good. Set up sequence numbers and
* move to established.
*/
tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
/* RFC1323: The window in SYN & SYN/ACK segments is
* never scaled.
*/
tp->snd_wnd = ntohs(th->window);
tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
if (!tp->rx_opt.wscale_ok) {
tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0;
tp->window_clamp = min(tp->window_clamp, 65535U);
}
if (tp->rx_opt.saw_tstamp) {
tp->rx_opt.tstamp_ok = 1;
tp->tcp_header_len =
sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
tcp_store_ts_recent(tp);
} else {
tp->tcp_header_len = sizeof(struct tcphdr);
}
if (tcp_is_sack(tp) && sysctl_tcp_fack)
tcp_enable_fack(tp);
tcp_mtup_init(sk);
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
tcp_initialize_rcv_mss(sk);
/* Remember, tcp_poll() does not lock socket!
* Change state from SYN-SENT only after copied_seq
* is initialized. */
tp->copied_seq = tp->rcv_nxt;
if (cvp != NULL &&
cvp->cookie_pair_size > 0 &&
tp->rx_opt.cookie_plus > 0) {
int cookie_size = tp->rx_opt.cookie_plus
- TCPOLEN_COOKIE_BASE;
int cookie_pair_size = cookie_size
+ cvp->cookie_desired;
/* A cookie extension option was sent and returned.
* Note that each incoming SYNACK replaces the
* Responder cookie. The initial exchange is most
* fragile, as protection against spoofing relies
* entirely upon the sequence and timestamp (above).
* This replacement strategy allows the correct pair to
* pass through, while any others will be filtered via
* Responder verification later.
*/
if (sizeof(cvp->cookie_pair) >= cookie_pair_size) {
memcpy(&cvp->cookie_pair[cvp->cookie_desired],
hash_location, cookie_size);
cvp->cookie_pair_size = cookie_pair_size;
}
}
smp_mb();
tcp_set_state(sk, TCP_ESTABLISHED);
security_inet_conn_established(sk, skb);
/* Make sure socket is routed, for correct metrics. */
icsk->icsk_af_ops->rebuild_header(sk);
tcp_init_metrics(sk);
tcp_init_congestion_control(sk);
/* Prevent spurious tcp_cwnd_restart() on first data
* packet.
*/
tp->lsndtime = tcp_time_stamp;
tcp_init_buffer_space(sk);
if (sock_flag(sk, SOCK_KEEPOPEN))
inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp));
if (!tp->rx_opt.snd_wscale)
__tcp_fast_path_on(tp, tp->snd_wnd);
else
tp->pred_flags = 0;
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
}
if (sk->sk_write_pending ||
icsk->icsk_accept_queue.rskq_defer_accept ||
icsk->icsk_ack.pingpong) {
/* Save one ACK. Data will be ready after
* several ticks, if write_pending is set.
*
* It may be deleted, but with this feature tcpdumps
* look so _wonderfully_ clever, that I was not able
* to stand against the temptation 8) --ANK
*/
inet_csk_schedule_ack(sk);
icsk->icsk_ack.lrcvtime = tcp_time_stamp;
icsk->icsk_ack.ato = TCP_ATO_MIN;
tcp_incr_quickack(sk);
tcp_enter_quickack_mode(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
TCP_DELACK_MAX, TCP_RTO_MAX);
discard:
__kfree_skb(skb);
return 0;
} else {
tcp_send_ack(sk);
}
return -1;
}
/* No ACK in the segment */
if (th->rst) {
/* rfc793:
* "If the RST bit is set
*
* Otherwise (no ACK) drop the segment and return."
*/
goto discard_and_undo;
}
/* PAWS check. */
if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp &&
tcp_paws_reject(&tp->rx_opt, 0))
goto discard_and_undo;
if (th->syn) {
/* We see SYN without ACK. It is attempt of
* simultaneous connect with crossed SYNs.
* Particularly, it can be connect to self.
*/
tcp_set_state(sk, TCP_SYN_RECV);
if (tp->rx_opt.saw_tstamp) {
tp->rx_opt.tstamp_ok = 1;
tcp_store_ts_recent(tp);
tp->tcp_header_len =
sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
} else {
tp->tcp_header_len = sizeof(struct tcphdr);
}
tp->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1;
/* RFC1323: The window in SYN & SYN/ACK segments is
* never scaled.
*/
tp->snd_wnd = ntohs(th->window);
tp->snd_wl1 = TCP_SKB_CB(skb)->seq;
tp->max_window = tp->snd_wnd;
TCP_ECN_rcv_syn(tp, th);
tcp_mtup_init(sk);
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
tcp_initialize_rcv_mss(sk);
tcp_send_synack(sk);
#if 0
/* Note, we could accept data and URG from this segment.
* There are no obstacles to make this.
*
* However, if we ignore data in ACKless segments sometimes,
* we have no reasons to accept it sometimes.
* Also, seems the code doing it in step6 of tcp_rcv_state_process
* is not flawless. So, discard packet for sanity.
* Uncomment this return to process the data.
*/
return -1;
#else
goto discard;
#endif
}
/* "fifth, if neither of the SYN or RST bits is set then
* drop the segment and return."
*/
discard_and_undo:
tcp_clear_options(&tp->rx_opt);
tp->rx_opt.mss_clamp = saved_clamp;
goto discard;
reset_and_undo:
tcp_clear_options(&tp->rx_opt);
tp->rx_opt.mss_clamp = saved_clamp;
return 1;
}
/*
* This function implements the receiving procedure of RFC 793 for
* all states except ESTABLISHED and TIME_WAIT.
* It's called from both tcp_v4_rcv and tcp_v6_rcv and should be
* address independent.
*/
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
int queued = 0;
int res;
tp->rx_opt.saw_tstamp = 0;
switch (sk->sk_state) {
case TCP_CLOSE:
goto discard;
case TCP_LISTEN:
if (th->ack)
return 1;
if (th->rst)
goto discard;
if (th->syn) {
if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
return 1;
/* Now we have several options: In theory there is
* nothing else in the frame. KA9Q has an option to
* send data with the syn, BSD accepts data with the
* syn up to the [to be] advertised window and
* Solaris 2.1 gives you a protocol error. For now
* we just ignore it, that fits the spec precisely
* and avoids incompatibilities. It would be nice in
* future to drop through and process the data.
*
* Now that TTCP is starting to be used we ought to
* queue this data.
* But, this leaves one open to an easy denial of
* service attack, and SYN cookies can't defend
* against this problem. So, we drop the data
* in the interest of security over speed unless
* it's still in use.
*/
kfree_skb(skb);
return 0;
}
goto discard;
case TCP_SYN_SENT:
queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
if (queued >= 0)
return queued;
/* Do step6 onward by hand. */
tcp_urg(sk, skb, th);
__kfree_skb(skb);
tcp_data_snd_check(sk);
return 0;
}
res = tcp_validate_incoming(sk, skb, th, 0);
if (res <= 0)
return -res;
/* step 5: check the ACK field */
if (th->ack) {
int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0;
switch (sk->sk_state) {
case TCP_SYN_RECV:
if (acceptable) {
tp->copied_seq = tp->rcv_nxt;
smp_mb();
tcp_set_state(sk, TCP_ESTABLISHED);
sk->sk_state_change(sk);
/* Note, that this wakeup is only for marginal
* crossed SYN case. Passively open sockets
* are not waked up, because sk->sk_sleep ==
* NULL and sk->sk_socket == NULL.
*/
if (sk->sk_socket)
sk_wake_async(sk,
SOCK_WAKE_IO, POLL_OUT);
tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
tp->snd_wnd = ntohs(th->window) <<
tp->rx_opt.snd_wscale;
tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
if (tp->rx_opt.tstamp_ok)
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
/* Make sure socket is routed, for
* correct metrics.
*/
icsk->icsk_af_ops->rebuild_header(sk);
tcp_init_metrics(sk);
tcp_init_congestion_control(sk);
/* Prevent spurious tcp_cwnd_restart() on
* first data packet.
*/
tp->lsndtime = tcp_time_stamp;
tcp_mtup_init(sk);
tcp_initialize_rcv_mss(sk);
tcp_init_buffer_space(sk);
tcp_fast_path_on(tp);
} else {
return 1;
}
break;
case TCP_FIN_WAIT1:
if (tp->snd_una == tp->write_seq) {
tcp_set_state(sk, TCP_FIN_WAIT2);
sk->sk_shutdown |= SEND_SHUTDOWN;
dst_confirm(__sk_dst_get(sk));
if (!sock_flag(sk, SOCK_DEAD))
/* Wake up lingering close() */
sk->sk_state_change(sk);
else {
int tmo;
if (tp->linger2 < 0 ||
(TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
tcp_done(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
return 1;
}
tmo = tcp_fin_time(sk);
if (tmo > TCP_TIMEWAIT_LEN) {
inet_csk_reset_keepalive_timer(sk, tmo - TCP_TIMEWAIT_LEN);
} else if (th->fin || sock_owned_by_user(sk)) {
/* Bad case. We could lose such FIN otherwise.
* It is not a big problem, but it looks confusing
* and not so rare event. We still can lose it now,
* if it spins in bh_lock_sock(), but it is really
* marginal case.
*/
inet_csk_reset_keepalive_timer(sk, tmo);
} else {
tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
goto discard;
}
}
}
break;
case TCP_CLOSING:
if (tp->snd_una == tp->write_seq) {
tcp_time_wait(sk, TCP_TIME_WAIT, 0);
goto discard;
}
break;
case TCP_LAST_ACK:
if (tp->snd_una == tp->write_seq) {
tcp_update_metrics(sk);
tcp_done(sk);
goto discard;
}
break;
}
} else
goto discard;
/* step 6: check the URG bit */
tcp_urg(sk, skb, th);
/* step 7: process the segment text */
switch (sk->sk_state) {
case TCP_CLOSE_WAIT:
case TCP_CLOSING:
case TCP_LAST_ACK:
if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
break;
case TCP_FIN_WAIT1:
case TCP_FIN_WAIT2:
/* RFC 793 says to queue data in these states,
* RFC 1122 says we MUST send a reset.
* BSD 4.4 also does reset.
*/
if (sk->sk_shutdown & RCV_SHUTDOWN) {
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
tcp_reset(sk);
return 1;
}
}
/* Fall through */
case TCP_ESTABLISHED:
tcp_data_queue(sk, skb);
queued = 1;
break;
}
/* tcp_data could move socket to TIME-WAIT */
if (sk->sk_state != TCP_CLOSE) {
tcp_data_snd_check(sk);
tcp_ack_snd_check(sk);
}
if (!queued) {
discard:
__kfree_skb(skb);
}
return 0;
}
EXPORT_SYMBOL(tcp_rcv_state_process);
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3846_0 |
crossvul-cpp_data_bad_5725_0 | /*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2 of the
* License.
*/
#include <linux/export.h>
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <linux/user_namespace.h>
#include <linux/proc_ns.h>
#include <linux/highuid.h>
#include <linux/cred.h>
#include <linux/securebits.h>
#include <linux/keyctl.h>
#include <linux/key-type.h>
#include <keys/user-type.h>
#include <linux/seq_file.h>
#include <linux/fs.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>
#include <linux/projid.h>
#include <linux/fs_struct.h>
static struct kmem_cache *user_ns_cachep __read_mostly;
static bool new_idmap_permitted(const struct file *file,
struct user_namespace *ns, int cap_setid,
struct uid_gid_map *map);
static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
{
/* Start with the same capabilities as init but useless for doing
* anything as the capabilities are bound to the new user namespace.
*/
cred->securebits = SECUREBITS_DEFAULT;
cred->cap_inheritable = CAP_EMPTY_SET;
cred->cap_permitted = CAP_FULL_SET;
cred->cap_effective = CAP_FULL_SET;
cred->cap_bset = CAP_FULL_SET;
#ifdef CONFIG_KEYS
key_put(cred->request_key_auth);
cred->request_key_auth = NULL;
#endif
/* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
cred->user_ns = user_ns;
}
/*
* Create a new user namespace, deriving the creator from the user in the
* passed credentials, and replacing that user with the new root user for the
* new namespace.
*
* This is called by copy_creds(), which will finish setting the target task's
* credentials.
*/
int create_user_ns(struct cred *new)
{
struct user_namespace *ns, *parent_ns = new->user_ns;
kuid_t owner = new->euid;
kgid_t group = new->egid;
int ret;
/*
* Verify that we can not violate the policy of which files
* may be accessed that is specified by the root directory,
* by verifing that the root directory is at the root of the
* mount namespace which allows all files to be accessed.
*/
if (current_chrooted())
return -EPERM;
/* The creator needs a mapping in the parent user namespace
* or else we won't be able to reasonably tell userspace who
* created a user_namespace.
*/
if (!kuid_has_mapping(parent_ns, owner) ||
!kgid_has_mapping(parent_ns, group))
return -EPERM;
ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
if (!ns)
return -ENOMEM;
ret = proc_alloc_inum(&ns->proc_inum);
if (ret) {
kmem_cache_free(user_ns_cachep, ns);
return ret;
}
atomic_set(&ns->count, 1);
/* Leave the new->user_ns reference with the new user namespace. */
ns->parent = parent_ns;
ns->owner = owner;
ns->group = group;
set_cred_user_ns(new, ns);
update_mnt_policy(ns);
return 0;
}
int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
{
struct cred *cred;
if (!(unshare_flags & CLONE_NEWUSER))
return 0;
cred = prepare_creds();
if (!cred)
return -ENOMEM;
*new_cred = cred;
return create_user_ns(cred);
}
void free_user_ns(struct user_namespace *ns)
{
struct user_namespace *parent;
do {
parent = ns->parent;
proc_free_inum(ns->proc_inum);
kmem_cache_free(user_ns_cachep, ns);
ns = parent;
} while (atomic_dec_and_test(&parent->count));
}
EXPORT_SYMBOL(free_user_ns);
static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
{
unsigned idx, extents;
u32 first, last, id2;
id2 = id + count - 1;
/* Find the matching extent */
extents = map->nr_extents;
smp_read_barrier_depends();
for (idx = 0; idx < extents; idx++) {
first = map->extent[idx].first;
last = first + map->extent[idx].count - 1;
if (id >= first && id <= last &&
(id2 >= first && id2 <= last))
break;
}
/* Map the id or note failure */
if (idx < extents)
id = (id - first) + map->extent[idx].lower_first;
else
id = (u32) -1;
return id;
}
static u32 map_id_down(struct uid_gid_map *map, u32 id)
{
unsigned idx, extents;
u32 first, last;
/* Find the matching extent */
extents = map->nr_extents;
smp_read_barrier_depends();
for (idx = 0; idx < extents; idx++) {
first = map->extent[idx].first;
last = first + map->extent[idx].count - 1;
if (id >= first && id <= last)
break;
}
/* Map the id or note failure */
if (idx < extents)
id = (id - first) + map->extent[idx].lower_first;
else
id = (u32) -1;
return id;
}
static u32 map_id_up(struct uid_gid_map *map, u32 id)
{
unsigned idx, extents;
u32 first, last;
/* Find the matching extent */
extents = map->nr_extents;
smp_read_barrier_depends();
for (idx = 0; idx < extents; idx++) {
first = map->extent[idx].lower_first;
last = first + map->extent[idx].count - 1;
if (id >= first && id <= last)
break;
}
/* Map the id or note failure */
if (idx < extents)
id = (id - first) + map->extent[idx].first;
else
id = (u32) -1;
return id;
}
/**
* make_kuid - Map a user-namespace uid pair into a kuid.
* @ns: User namespace that the uid is in
* @uid: User identifier
*
* Maps a user-namespace uid pair into a kernel internal kuid,
* and returns that kuid.
*
* When there is no mapping defined for the user-namespace uid
* pair INVALID_UID is returned. Callers are expected to test
* for and handle handle INVALID_UID being returned. INVALID_UID
* may be tested for using uid_valid().
*/
kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
{
/* Map the uid to a global kernel uid */
return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
}
EXPORT_SYMBOL(make_kuid);
/**
* from_kuid - Create a uid from a kuid user-namespace pair.
* @targ: The user namespace we want a uid in.
* @kuid: The kernel internal uid to start with.
*
* Map @kuid into the user-namespace specified by @targ and
* return the resulting uid.
*
* There is always a mapping into the initial user_namespace.
*
* If @kuid has no mapping in @targ (uid_t)-1 is returned.
*/
uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
{
/* Map the uid from a global kernel uid */
return map_id_up(&targ->uid_map, __kuid_val(kuid));
}
EXPORT_SYMBOL(from_kuid);
/**
* from_kuid_munged - Create a uid from a kuid user-namespace pair.
* @targ: The user namespace we want a uid in.
* @kuid: The kernel internal uid to start with.
*
* Map @kuid into the user-namespace specified by @targ and
* return the resulting uid.
*
* There is always a mapping into the initial user_namespace.
*
* Unlike from_kuid from_kuid_munged never fails and always
* returns a valid uid. This makes from_kuid_munged appropriate
* for use in syscalls like stat and getuid where failing the
* system call and failing to provide a valid uid are not an
* options.
*
* If @kuid has no mapping in @targ overflowuid is returned.
*/
uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
{
uid_t uid;
uid = from_kuid(targ, kuid);
if (uid == (uid_t) -1)
uid = overflowuid;
return uid;
}
EXPORT_SYMBOL(from_kuid_munged);
/**
* make_kgid - Map a user-namespace gid pair into a kgid.
* @ns: User namespace that the gid is in
* @uid: group identifier
*
* Maps a user-namespace gid pair into a kernel internal kgid,
* and returns that kgid.
*
* When there is no mapping defined for the user-namespace gid
* pair INVALID_GID is returned. Callers are expected to test
* for and handle INVALID_GID being returned. INVALID_GID may be
* tested for using gid_valid().
*/
kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
{
/* Map the gid to a global kernel gid */
return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
}
EXPORT_SYMBOL(make_kgid);
/**
* from_kgid - Create a gid from a kgid user-namespace pair.
* @targ: The user namespace we want a gid in.
* @kgid: The kernel internal gid to start with.
*
* Map @kgid into the user-namespace specified by @targ and
* return the resulting gid.
*
* There is always a mapping into the initial user_namespace.
*
* If @kgid has no mapping in @targ (gid_t)-1 is returned.
*/
gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
{
/* Map the gid from a global kernel gid */
return map_id_up(&targ->gid_map, __kgid_val(kgid));
}
EXPORT_SYMBOL(from_kgid);
/**
* from_kgid_munged - Create a gid from a kgid user-namespace pair.
* @targ: The user namespace we want a gid in.
* @kgid: The kernel internal gid to start with.
*
* Map @kgid into the user-namespace specified by @targ and
* return the resulting gid.
*
* There is always a mapping into the initial user_namespace.
*
* Unlike from_kgid from_kgid_munged never fails and always
* returns a valid gid. This makes from_kgid_munged appropriate
* for use in syscalls like stat and getgid where failing the
* system call and failing to provide a valid gid are not options.
*
* If @kgid has no mapping in @targ overflowgid is returned.
*/
gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
{
gid_t gid;
gid = from_kgid(targ, kgid);
if (gid == (gid_t) -1)
gid = overflowgid;
return gid;
}
EXPORT_SYMBOL(from_kgid_munged);
/**
* make_kprojid - Map a user-namespace projid pair into a kprojid.
* @ns: User namespace that the projid is in
* @projid: Project identifier
*
* Maps a user-namespace uid pair into a kernel internal kuid,
* and returns that kuid.
*
* When there is no mapping defined for the user-namespace projid
* pair INVALID_PROJID is returned. Callers are expected to test
* for and handle handle INVALID_PROJID being returned. INVALID_PROJID
* may be tested for using projid_valid().
*/
kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
{
/* Map the uid to a global kernel uid */
return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
}
EXPORT_SYMBOL(make_kprojid);
/**
* from_kprojid - Create a projid from a kprojid user-namespace pair.
* @targ: The user namespace we want a projid in.
* @kprojid: The kernel internal project identifier to start with.
*
* Map @kprojid into the user-namespace specified by @targ and
* return the resulting projid.
*
* There is always a mapping into the initial user_namespace.
*
* If @kprojid has no mapping in @targ (projid_t)-1 is returned.
*/
projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
{
/* Map the uid from a global kernel uid */
return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
}
EXPORT_SYMBOL(from_kprojid);
/**
* from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
* @targ: The user namespace we want a projid in.
* @kprojid: The kernel internal projid to start with.
*
* Map @kprojid into the user-namespace specified by @targ and
* return the resulting projid.
*
* There is always a mapping into the initial user_namespace.
*
* Unlike from_kprojid from_kprojid_munged never fails and always
* returns a valid projid. This makes from_kprojid_munged
* appropriate for use in syscalls like stat and where
* failing the system call and failing to provide a valid projid are
* not an options.
*
* If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
*/
projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
{
projid_t projid;
projid = from_kprojid(targ, kprojid);
if (projid == (projid_t) -1)
projid = OVERFLOW_PROJID;
return projid;
}
EXPORT_SYMBOL(from_kprojid_munged);
static int uid_m_show(struct seq_file *seq, void *v)
{
struct user_namespace *ns = seq->private;
struct uid_gid_extent *extent = v;
struct user_namespace *lower_ns;
uid_t lower;
lower_ns = seq_user_ns(seq);
if ((lower_ns == ns) && lower_ns->parent)
lower_ns = lower_ns->parent;
lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
seq_printf(seq, "%10u %10u %10u\n",
extent->first,
lower,
extent->count);
return 0;
}
static int gid_m_show(struct seq_file *seq, void *v)
{
struct user_namespace *ns = seq->private;
struct uid_gid_extent *extent = v;
struct user_namespace *lower_ns;
gid_t lower;
lower_ns = seq_user_ns(seq);
if ((lower_ns == ns) && lower_ns->parent)
lower_ns = lower_ns->parent;
lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
seq_printf(seq, "%10u %10u %10u\n",
extent->first,
lower,
extent->count);
return 0;
}
static int projid_m_show(struct seq_file *seq, void *v)
{
struct user_namespace *ns = seq->private;
struct uid_gid_extent *extent = v;
struct user_namespace *lower_ns;
projid_t lower;
lower_ns = seq_user_ns(seq);
if ((lower_ns == ns) && lower_ns->parent)
lower_ns = lower_ns->parent;
lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
seq_printf(seq, "%10u %10u %10u\n",
extent->first,
lower,
extent->count);
return 0;
}
static void *m_start(struct seq_file *seq, loff_t *ppos, struct uid_gid_map *map)
{
struct uid_gid_extent *extent = NULL;
loff_t pos = *ppos;
if (pos < map->nr_extents)
extent = &map->extent[pos];
return extent;
}
static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
{
struct user_namespace *ns = seq->private;
return m_start(seq, ppos, &ns->uid_map);
}
static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
{
struct user_namespace *ns = seq->private;
return m_start(seq, ppos, &ns->gid_map);
}
static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
{
struct user_namespace *ns = seq->private;
return m_start(seq, ppos, &ns->projid_map);
}
static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
{
(*pos)++;
return seq->op->start(seq, pos);
}
static void m_stop(struct seq_file *seq, void *v)
{
return;
}
struct seq_operations proc_uid_seq_operations = {
.start = uid_m_start,
.stop = m_stop,
.next = m_next,
.show = uid_m_show,
};
struct seq_operations proc_gid_seq_operations = {
.start = gid_m_start,
.stop = m_stop,
.next = m_next,
.show = gid_m_show,
};
struct seq_operations proc_projid_seq_operations = {
.start = projid_m_start,
.stop = m_stop,
.next = m_next,
.show = projid_m_show,
};
static bool mappings_overlap(struct uid_gid_map *new_map, struct uid_gid_extent *extent)
{
u32 upper_first, lower_first, upper_last, lower_last;
unsigned idx;
upper_first = extent->first;
lower_first = extent->lower_first;
upper_last = upper_first + extent->count - 1;
lower_last = lower_first + extent->count - 1;
for (idx = 0; idx < new_map->nr_extents; idx++) {
u32 prev_upper_first, prev_lower_first;
u32 prev_upper_last, prev_lower_last;
struct uid_gid_extent *prev;
prev = &new_map->extent[idx];
prev_upper_first = prev->first;
prev_lower_first = prev->lower_first;
prev_upper_last = prev_upper_first + prev->count - 1;
prev_lower_last = prev_lower_first + prev->count - 1;
/* Does the upper range intersect a previous extent? */
if ((prev_upper_first <= upper_last) &&
(prev_upper_last >= upper_first))
return true;
/* Does the lower range intersect a previous extent? */
if ((prev_lower_first <= lower_last) &&
(prev_lower_last >= lower_first))
return true;
}
return false;
}
static DEFINE_MUTEX(id_map_mutex);
static ssize_t map_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos,
int cap_setid,
struct uid_gid_map *map,
struct uid_gid_map *parent_map)
{
struct seq_file *seq = file->private_data;
struct user_namespace *ns = seq->private;
struct uid_gid_map new_map;
unsigned idx;
struct uid_gid_extent *extent = NULL;
unsigned long page = 0;
char *kbuf, *pos, *next_line;
ssize_t ret = -EINVAL;
/*
* The id_map_mutex serializes all writes to any given map.
*
* Any map is only ever written once.
*
* An id map fits within 1 cache line on most architectures.
*
* On read nothing needs to be done unless you are on an
* architecture with a crazy cache coherency model like alpha.
*
* There is a one time data dependency between reading the
* count of the extents and the values of the extents. The
* desired behavior is to see the values of the extents that
* were written before the count of the extents.
*
* To achieve this smp_wmb() is used on guarantee the write
* order and smp_read_barrier_depends() is guaranteed that we
* don't have crazy architectures returning stale data.
*
*/
mutex_lock(&id_map_mutex);
ret = -EPERM;
/* Only allow one successful write to the map */
if (map->nr_extents != 0)
goto out;
/*
* Adjusting namespace settings requires capabilities on the target.
*/
if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
goto out;
/* Get a buffer */
ret = -ENOMEM;
page = __get_free_page(GFP_TEMPORARY);
kbuf = (char *) page;
if (!page)
goto out;
/* Only allow <= page size writes at the beginning of the file */
ret = -EINVAL;
if ((*ppos != 0) || (count >= PAGE_SIZE))
goto out;
/* Slurp in the user data */
ret = -EFAULT;
if (copy_from_user(kbuf, buf, count))
goto out;
kbuf[count] = '\0';
/* Parse the user data */
ret = -EINVAL;
pos = kbuf;
new_map.nr_extents = 0;
for (;pos; pos = next_line) {
extent = &new_map.extent[new_map.nr_extents];
/* Find the end of line and ensure I don't look past it */
next_line = strchr(pos, '\n');
if (next_line) {
*next_line = '\0';
next_line++;
if (*next_line == '\0')
next_line = NULL;
}
pos = skip_spaces(pos);
extent->first = simple_strtoul(pos, &pos, 10);
if (!isspace(*pos))
goto out;
pos = skip_spaces(pos);
extent->lower_first = simple_strtoul(pos, &pos, 10);
if (!isspace(*pos))
goto out;
pos = skip_spaces(pos);
extent->count = simple_strtoul(pos, &pos, 10);
if (*pos && !isspace(*pos))
goto out;
/* Verify there is not trailing junk on the line */
pos = skip_spaces(pos);
if (*pos != '\0')
goto out;
/* Verify we have been given valid starting values */
if ((extent->first == (u32) -1) ||
(extent->lower_first == (u32) -1 ))
goto out;
/* Verify count is not zero and does not cause the extent to wrap */
if ((extent->first + extent->count) <= extent->first)
goto out;
if ((extent->lower_first + extent->count) <= extent->lower_first)
goto out;
/* Do the ranges in extent overlap any previous extents? */
if (mappings_overlap(&new_map, extent))
goto out;
new_map.nr_extents++;
/* Fail if the file contains too many extents */
if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) &&
(next_line != NULL))
goto out;
}
/* Be very certaint the new map actually exists */
if (new_map.nr_extents == 0)
goto out;
ret = -EPERM;
/* Validate the user is allowed to use user id's mapped to. */
if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
goto out;
/* Map the lower ids from the parent user namespace to the
* kernel global id space.
*/
for (idx = 0; idx < new_map.nr_extents; idx++) {
u32 lower_first;
extent = &new_map.extent[idx];
lower_first = map_id_range_down(parent_map,
extent->lower_first,
extent->count);
/* Fail if we can not map the specified extent to
* the kernel global id space.
*/
if (lower_first == (u32) -1)
goto out;
extent->lower_first = lower_first;
}
/* Install the map */
memcpy(map->extent, new_map.extent,
new_map.nr_extents*sizeof(new_map.extent[0]));
smp_wmb();
map->nr_extents = new_map.nr_extents;
*ppos = count;
ret = count;
out:
mutex_unlock(&id_map_mutex);
if (page)
free_page(page);
return ret;
}
ssize_t proc_uid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
{
struct seq_file *seq = file->private_data;
struct user_namespace *ns = seq->private;
struct user_namespace *seq_ns = seq_user_ns(seq);
if (!ns->parent)
return -EPERM;
if ((seq_ns != ns) && (seq_ns != ns->parent))
return -EPERM;
return map_write(file, buf, size, ppos, CAP_SETUID,
&ns->uid_map, &ns->parent->uid_map);
}
ssize_t proc_gid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
{
struct seq_file *seq = file->private_data;
struct user_namespace *ns = seq->private;
struct user_namespace *seq_ns = seq_user_ns(seq);
if (!ns->parent)
return -EPERM;
if ((seq_ns != ns) && (seq_ns != ns->parent))
return -EPERM;
return map_write(file, buf, size, ppos, CAP_SETGID,
&ns->gid_map, &ns->parent->gid_map);
}
ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
{
struct seq_file *seq = file->private_data;
struct user_namespace *ns = seq->private;
struct user_namespace *seq_ns = seq_user_ns(seq);
if (!ns->parent)
return -EPERM;
if ((seq_ns != ns) && (seq_ns != ns->parent))
return -EPERM;
/* Anyone can set any valid project id no capability needed */
return map_write(file, buf, size, ppos, -1,
&ns->projid_map, &ns->parent->projid_map);
}
static bool new_idmap_permitted(const struct file *file,
struct user_namespace *ns, int cap_setid,
struct uid_gid_map *new_map)
{
/* Allow mapping to your own filesystem ids */
if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) {
u32 id = new_map->extent[0].lower_first;
if (cap_setid == CAP_SETUID) {
kuid_t uid = make_kuid(ns->parent, id);
if (uid_eq(uid, file->f_cred->fsuid))
return true;
}
else if (cap_setid == CAP_SETGID) {
kgid_t gid = make_kgid(ns->parent, id);
if (gid_eq(gid, file->f_cred->fsgid))
return true;
}
}
/* Allow anyone to set a mapping that doesn't require privilege */
if (!cap_valid(cap_setid))
return true;
/* Allow the specified ids if we have the appropriate capability
* (CAP_SETUID or CAP_SETGID) over the parent user namespace.
* And the opener of the id file also had the approprpiate capability.
*/
if (ns_capable(ns->parent, cap_setid) &&
file_ns_capable(file, ns->parent, cap_setid))
return true;
return false;
}
static void *userns_get(struct task_struct *task)
{
struct user_namespace *user_ns;
rcu_read_lock();
user_ns = get_user_ns(__task_cred(task)->user_ns);
rcu_read_unlock();
return user_ns;
}
static void userns_put(void *ns)
{
put_user_ns(ns);
}
static int userns_install(struct nsproxy *nsproxy, void *ns)
{
struct user_namespace *user_ns = ns;
struct cred *cred;
/* Don't allow gaining capabilities by reentering
* the same user namespace.
*/
if (user_ns == current_user_ns())
return -EINVAL;
/* Threaded processes may not enter a different user namespace */
if (atomic_read(¤t->mm->mm_users) > 1)
return -EINVAL;
if (current->fs->users != 1)
return -EINVAL;
if (!ns_capable(user_ns, CAP_SYS_ADMIN))
return -EPERM;
cred = prepare_creds();
if (!cred)
return -ENOMEM;
put_user_ns(cred->user_ns);
set_cred_user_ns(cred, get_user_ns(user_ns));
return commit_creds(cred);
}
static unsigned int userns_inum(void *ns)
{
struct user_namespace *user_ns = ns;
return user_ns->proc_inum;
}
const struct proc_ns_operations userns_operations = {
.name = "user",
.type = CLONE_NEWUSER,
.get = userns_get,
.put = userns_put,
.install = userns_install,
.inum = userns_inum,
};
static __init int user_namespaces_init(void)
{
user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
return 0;
}
module_init(user_namespaces_init);
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_5725_0 |
crossvul-cpp_data_bad_3449_1 | /*
* fs/inotify_user.c - inotify support for userspace
*
* Authors:
* John McCutchan <ttb@tentacle.dhs.org>
* Robert Love <rml@novell.com>
*
* Copyright (C) 2005 John McCutchan
* Copyright 2006 Hewlett-Packard Development Company, L.P.
*
* Copyright (C) 2009 Eric Paris <Red Hat Inc>
* inotify was largely rewriten to make use of the fsnotify infrastructure
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/file.h>
#include <linux/fs.h> /* struct inode */
#include <linux/fsnotify_backend.h>
#include <linux/idr.h>
#include <linux/init.h> /* module_init */
#include <linux/inotify.h>
#include <linux/kernel.h> /* roundup() */
#include <linux/namei.h> /* LOOKUP_FOLLOW */
#include <linux/sched.h> /* struct user */
#include <linux/slab.h> /* struct kmem_cache */
#include <linux/syscalls.h>
#include <linux/types.h>
#include <linux/anon_inodes.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/wait.h>
#include "inotify.h"
#include <asm/ioctls.h>
/* these are configurable via /proc/sys/fs/inotify/ */
static int inotify_max_user_instances __read_mostly;
static int inotify_max_queued_events __read_mostly;
static int inotify_max_user_watches __read_mostly;
static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
struct kmem_cache *event_priv_cachep __read_mostly;
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
static int zero;
ctl_table inotify_table[] = {
{
.procname = "max_user_instances",
.data = &inotify_max_user_instances,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
{
.procname = "max_user_watches",
.data = &inotify_max_user_watches,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
{
.procname = "max_queued_events",
.data = &inotify_max_queued_events,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero
},
{ }
};
#endif /* CONFIG_SYSCTL */
static inline __u32 inotify_arg_to_mask(u32 arg)
{
__u32 mask;
/*
* everything should accept their own ignored, cares about children,
* and should receive events when the inode is unmounted
*/
mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
/* mask off the flags used to open the fd */
mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
return mask;
}
static inline u32 inotify_mask_to_arg(__u32 mask)
{
return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
IN_Q_OVERFLOW);
}
/* intofiy userspace file descriptor functions */
static unsigned int inotify_poll(struct file *file, poll_table *wait)
{
struct fsnotify_group *group = file->private_data;
int ret = 0;
poll_wait(file, &group->notification_waitq, wait);
mutex_lock(&group->notification_mutex);
if (!fsnotify_notify_queue_is_empty(group))
ret = POLLIN | POLLRDNORM;
mutex_unlock(&group->notification_mutex);
return ret;
}
/*
* Get an inotify_kernel_event if one exists and is small
* enough to fit in "count". Return an error pointer if
* not large enough.
*
* Called with the group->notification_mutex held.
*/
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
size_t count)
{
size_t event_size = sizeof(struct inotify_event);
struct fsnotify_event *event;
if (fsnotify_notify_queue_is_empty(group))
return NULL;
event = fsnotify_peek_notify_event(group);
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
if (event->name_len)
event_size += roundup(event->name_len + 1, event_size);
if (event_size > count)
return ERR_PTR(-EINVAL);
/* held the notification_mutex the whole time, so this is the
* same event we peeked above */
fsnotify_remove_notify_event(group);
return event;
}
/*
* Copy an event to user space, returning how much we copied.
*
* We already checked that the event size is smaller than the
* buffer we had in "get_one_event()" above.
*/
static ssize_t copy_event_to_user(struct fsnotify_group *group,
struct fsnotify_event *event,
char __user *buf)
{
struct inotify_event inotify_event;
struct fsnotify_event_private_data *fsn_priv;
struct inotify_event_private_data *priv;
size_t event_size = sizeof(struct inotify_event);
size_t name_len = 0;
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
/* we get the inotify watch descriptor from the event private data */
spin_lock(&event->lock);
fsn_priv = fsnotify_remove_priv_from_event(group, event);
spin_unlock(&event->lock);
if (!fsn_priv)
inotify_event.wd = -1;
else {
priv = container_of(fsn_priv, struct inotify_event_private_data,
fsnotify_event_priv_data);
inotify_event.wd = priv->wd;
inotify_free_event_priv(fsn_priv);
}
/*
* round up event->name_len so it is a multiple of event_size
* plus an extra byte for the terminating '\0'.
*/
if (event->name_len)
name_len = roundup(event->name_len + 1, event_size);
inotify_event.len = name_len;
inotify_event.mask = inotify_mask_to_arg(event->mask);
inotify_event.cookie = event->sync_cookie;
/* send the main event */
if (copy_to_user(buf, &inotify_event, event_size))
return -EFAULT;
buf += event_size;
/*
* fsnotify only stores the pathname, so here we have to send the pathname
* and then pad that pathname out to a multiple of sizeof(inotify_event)
* with zeros. I get my zeros from the nul_inotify_event.
*/
if (name_len) {
unsigned int len_to_zero = name_len - event->name_len;
/* copy the path name */
if (copy_to_user(buf, event->file_name, event->name_len))
return -EFAULT;
buf += event->name_len;
/* fill userspace with 0's */
if (clear_user(buf, len_to_zero))
return -EFAULT;
buf += len_to_zero;
event_size += name_len;
}
return event_size;
}
static ssize_t inotify_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct fsnotify_group *group;
struct fsnotify_event *kevent;
char __user *start;
int ret;
DEFINE_WAIT(wait);
start = buf;
group = file->private_data;
while (1) {
prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&group->notification_mutex);
kevent = get_one_event(group, count);
mutex_unlock(&group->notification_mutex);
pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
if (kevent) {
ret = PTR_ERR(kevent);
if (IS_ERR(kevent))
break;
ret = copy_event_to_user(group, kevent, buf);
fsnotify_put_event(kevent);
if (ret < 0)
break;
buf += ret;
count -= ret;
continue;
}
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
break;
ret = -EINTR;
if (signal_pending(current))
break;
if (start != buf)
break;
schedule();
}
finish_wait(&group->notification_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
return ret;
}
static int inotify_fasync(int fd, struct file *file, int on)
{
struct fsnotify_group *group = file->private_data;
return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
}
static int inotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
struct user_struct *user = group->inotify_data.user;
pr_debug("%s: group=%p\n", __func__, group);
fsnotify_clear_marks_by_group(group);
/* free this group, matching get was inotify_init->fsnotify_obtain_group */
fsnotify_put_group(group);
atomic_dec(&user->inotify_devs);
return 0;
}
static long inotify_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct fsnotify_group *group;
struct fsnotify_event_holder *holder;
struct fsnotify_event *event;
void __user *p;
int ret = -ENOTTY;
size_t send_len = 0;
group = file->private_data;
p = (void __user *) arg;
pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
switch (cmd) {
case FIONREAD:
mutex_lock(&group->notification_mutex);
list_for_each_entry(holder, &group->notification_list, event_list) {
event = holder->event;
send_len += sizeof(struct inotify_event);
if (event->name_len)
send_len += roundup(event->name_len + 1,
sizeof(struct inotify_event));
}
mutex_unlock(&group->notification_mutex);
ret = put_user(send_len, (int __user *) p);
break;
}
return ret;
}
static const struct file_operations inotify_fops = {
.poll = inotify_poll,
.read = inotify_read,
.fasync = inotify_fasync,
.release = inotify_release,
.unlocked_ioctl = inotify_ioctl,
.compat_ioctl = inotify_ioctl,
.llseek = noop_llseek,
};
/*
* find_inode - resolve a user-given path to a specific inode
*/
static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
{
int error;
error = user_path_at(AT_FDCWD, dirname, flags, path);
if (error)
return error;
/* you can only watch an inode if you have read permissions on it */
error = inode_permission(path->dentry->d_inode, MAY_READ);
if (error)
path_put(path);
return error;
}
static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
int *last_wd,
struct inotify_inode_mark *i_mark)
{
int ret;
do {
if (unlikely(!idr_pre_get(idr, GFP_KERNEL)))
return -ENOMEM;
spin_lock(idr_lock);
ret = idr_get_new_above(idr, i_mark, *last_wd + 1,
&i_mark->wd);
/* we added the mark to the idr, take a reference */
if (!ret) {
*last_wd = i_mark->wd;
fsnotify_get_mark(&i_mark->fsn_mark);
}
spin_unlock(idr_lock);
} while (ret == -EAGAIN);
return ret;
}
static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
int wd)
{
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
struct inotify_inode_mark *i_mark;
assert_spin_locked(idr_lock);
i_mark = idr_find(idr, wd);
if (i_mark) {
struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
fsnotify_get_mark(fsn_mark);
/* One ref for being in the idr, one ref we just took */
BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
}
return i_mark;
}
static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
int wd)
{
struct inotify_inode_mark *i_mark;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
spin_lock(idr_lock);
i_mark = inotify_idr_find_locked(group, wd);
spin_unlock(idr_lock);
return i_mark;
}
static void do_inotify_remove_from_idr(struct fsnotify_group *group,
struct inotify_inode_mark *i_mark)
{
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
int wd = i_mark->wd;
assert_spin_locked(idr_lock);
idr_remove(idr, wd);
/* removed from the idr, drop that ref */
fsnotify_put_mark(&i_mark->fsn_mark);
}
/*
* Remove the mark from the idr (if present) and drop the reference
* on the mark because it was in the idr.
*/
static void inotify_remove_from_idr(struct fsnotify_group *group,
struct inotify_inode_mark *i_mark)
{
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
struct inotify_inode_mark *found_i_mark = NULL;
int wd;
spin_lock(idr_lock);
wd = i_mark->wd;
/*
* does this i_mark think it is in the idr? we shouldn't get called
* if it wasn't....
*/
if (wd == -1) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out;
}
/* Lets look in the idr to see if we find it */
found_i_mark = inotify_idr_find_locked(group, wd);
if (unlikely(!found_i_mark)) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out;
}
/*
* We found an mark in the idr at the right wd, but it's
* not the mark we were told to remove. eparis seriously
* fucked up somewhere.
*/
if (unlikely(found_i_mark != i_mark)) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
"mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
"found_i_mark->group=%p found_i_mark->inode=%p\n",
__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group,
i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd,
found_i_mark->fsn_mark.group,
found_i_mark->fsn_mark.i.inode);
goto out;
}
/*
* One ref for being in the idr
* one ref held by the caller trying to kill us
* one ref grabbed by inotify_idr_find
*/
if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
/* we can't really recover with bad ref cnting.. */
BUG();
}
do_inotify_remove_from_idr(group, i_mark);
out:
/* match the ref taken by inotify_idr_find_locked() */
if (found_i_mark)
fsnotify_put_mark(&found_i_mark->fsn_mark);
i_mark->wd = -1;
spin_unlock(idr_lock);
}
/*
* Send IN_IGNORED for this wd, remove this wd from the idr.
*/
void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_group *group)
{
struct inotify_inode_mark *i_mark;
struct fsnotify_event *ignored_event, *notify_event;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
int ret;
ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
FSNOTIFY_EVENT_NONE, NULL, 0,
GFP_NOFS);
if (!ignored_event)
return;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
if (unlikely(!event_priv))
goto skip_send_ignore;
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsn_event_priv->group = group;
event_priv->wd = i_mark->wd;
notify_event = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
if (notify_event) {
if (IS_ERR(notify_event))
ret = PTR_ERR(notify_event);
else
fsnotify_put_event(notify_event);
inotify_free_event_priv(fsn_event_priv);
}
skip_send_ignore:
/* matches the reference taken when the event was created */
fsnotify_put_event(ignored_event);
/* remove this mark from the idr */
inotify_remove_from_idr(group, i_mark);
atomic_dec(&group->inotify_data.user->inotify_watches);
}
/* ding dong the mark is dead */
static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
{
struct inotify_inode_mark *i_mark;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
kmem_cache_free(inotify_inode_mark_cachep, i_mark);
}
static int inotify_update_existing_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
struct fsnotify_mark *fsn_mark;
struct inotify_inode_mark *i_mark;
__u32 old_mask, new_mask;
__u32 mask;
int add = (arg & IN_MASK_ADD);
int ret;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
if (unlikely(!(mask & IN_ALL_EVENTS)))
return -EINVAL;
fsn_mark = fsnotify_find_inode_mark(group, inode);
if (!fsn_mark)
return -ENOENT;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
spin_lock(&fsn_mark->lock);
old_mask = fsn_mark->mask;
if (add)
fsnotify_set_mark_mask_locked(fsn_mark, (fsn_mark->mask | mask));
else
fsnotify_set_mark_mask_locked(fsn_mark, mask);
new_mask = fsn_mark->mask;
spin_unlock(&fsn_mark->lock);
if (old_mask != new_mask) {
/* more bits in old than in new? */
int dropped = (old_mask & ~new_mask);
/* more bits in this fsn_mark than the inode's mask? */
int do_inode = (new_mask & ~inode->i_fsnotify_mask);
/* update the inode with this new fsn_mark */
if (dropped || do_inode)
fsnotify_recalc_inode_mask(inode);
}
/* return the wd */
ret = i_mark->wd;
/* match the get from fsnotify_find_mark() */
fsnotify_put_mark(fsn_mark);
return ret;
}
static int inotify_new_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
struct inotify_inode_mark *tmp_i_mark;
__u32 mask;
int ret;
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
if (unlikely(!(mask & IN_ALL_EVENTS)))
return -EINVAL;
tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
if (unlikely(!tmp_i_mark))
return -ENOMEM;
fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark);
tmp_i_mark->fsn_mark.mask = mask;
tmp_i_mark->wd = -1;
ret = -ENOSPC;
if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
goto out_err;
ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd,
tmp_i_mark);
if (ret)
goto out_err;
/* we are on the idr, now get on the inode */
ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, NULL, 0);
if (ret) {
/* we failed to get on the inode, get off the idr */
inotify_remove_from_idr(group, tmp_i_mark);
goto out_err;
}
/* increment the number of watches the user has */
atomic_inc(&group->inotify_data.user->inotify_watches);
/* return the watch descriptor for this new mark */
ret = tmp_i_mark->wd;
out_err:
/* match the ref from fsnotify_init_mark() */
fsnotify_put_mark(&tmp_i_mark->fsn_mark);
return ret;
}
static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
{
int ret = 0;
retry:
/* try to update and existing watch with the new arg */
ret = inotify_update_existing_watch(group, inode, arg);
/* no mark present, try to add a new one */
if (ret == -ENOENT)
ret = inotify_new_watch(group, inode, arg);
/*
* inotify_new_watch could race with another thread which did an
* inotify_new_watch between the update_existing and the add watch
* here, go back and try to update an existing mark again.
*/
if (ret == -EEXIST)
goto retry;
return ret;
}
static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events)
{
struct fsnotify_group *group;
group = fsnotify_alloc_group(&inotify_fsnotify_ops);
if (IS_ERR(group))
return group;
group->max_events = max_events;
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
group->inotify_data.last_wd = 0;
group->inotify_data.user = user;
group->inotify_data.fa = NULL;
return group;
}
/* inotify syscalls */
SYSCALL_DEFINE1(inotify_init1, int, flags)
{
struct fsnotify_group *group;
struct user_struct *user;
int ret;
/* Check the IN_* constants for consistency. */
BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
return -EINVAL;
user = get_current_user();
if (unlikely(atomic_read(&user->inotify_devs) >=
inotify_max_user_instances)) {
ret = -EMFILE;
goto out_free_uid;
}
/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
group = inotify_new_group(user, inotify_max_queued_events);
if (IS_ERR(group)) {
ret = PTR_ERR(group);
goto out_free_uid;
}
atomic_inc(&user->inotify_devs);
ret = anon_inode_getfd("inotify", &inotify_fops, group,
O_RDONLY | flags);
if (ret >= 0)
return ret;
fsnotify_put_group(group);
atomic_dec(&user->inotify_devs);
out_free_uid:
free_uid(user);
return ret;
}
SYSCALL_DEFINE0(inotify_init)
{
return sys_inotify_init1(0);
}
SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
u32, mask)
{
struct fsnotify_group *group;
struct inode *inode;
struct path path;
struct file *filp;
int ret, fput_needed;
unsigned flags = 0;
filp = fget_light(fd, &fput_needed);
if (unlikely(!filp))
return -EBADF;
/* verify that this is indeed an inotify instance */
if (unlikely(filp->f_op != &inotify_fops)) {
ret = -EINVAL;
goto fput_and_out;
}
if (!(mask & IN_DONT_FOLLOW))
flags |= LOOKUP_FOLLOW;
if (mask & IN_ONLYDIR)
flags |= LOOKUP_DIRECTORY;
ret = inotify_find_inode(pathname, &path, flags);
if (ret)
goto fput_and_out;
/* inode held in place by reference to path; group by fget on fd */
inode = path.dentry->d_inode;
group = filp->private_data;
/* create/update an inode mark */
ret = inotify_update_watch(group, inode, mask);
path_put(&path);
fput_and_out:
fput_light(filp, fput_needed);
return ret;
}
SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
{
struct fsnotify_group *group;
struct inotify_inode_mark *i_mark;
struct file *filp;
int ret = 0, fput_needed;
filp = fget_light(fd, &fput_needed);
if (unlikely(!filp))
return -EBADF;
/* verify that this is indeed an inotify instance */
ret = -EINVAL;
if (unlikely(filp->f_op != &inotify_fops))
goto out;
group = filp->private_data;
ret = -EINVAL;
i_mark = inotify_idr_find(group, wd);
if (unlikely(!i_mark))
goto out;
ret = 0;
fsnotify_destroy_mark(&i_mark->fsn_mark);
/* match ref taken by inotify_idr_find */
fsnotify_put_mark(&i_mark->fsn_mark);
out:
fput_light(filp, fput_needed);
return ret;
}
/*
* inotify_user_setup - Our initialization function. Note that we cannot return
* error because we have compiled-in VFS hooks. So an (unlikely) failure here
* must result in panic().
*/
static int __init inotify_user_setup(void)
{
BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
BUILD_BUG_ON(IN_OPEN != FS_OPEN);
BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
BUILD_BUG_ON(IN_CREATE != FS_CREATE);
BUILD_BUG_ON(IN_DELETE != FS_DELETE);
BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);
inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
inotify_max_queued_events = 16384;
inotify_max_user_instances = 128;
inotify_max_user_watches = 8192;
return 0;
}
module_init(inotify_user_setup);
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3449_1 |
crossvul-cpp_data_good_5367_0 | /* $OpenBSD: kex.c,v 1.127 2016/10/10 19:28:48 markus Exp $ */
/*
* Copyright (c) 2000, 2001 Markus Friedl. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "includes.h"
#include <signal.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef WITH_OPENSSL
#include <openssl/crypto.h>
#include <openssl/dh.h>
#endif
#include "ssh2.h"
#include "packet.h"
#include "compat.h"
#include "cipher.h"
#include "sshkey.h"
#include "kex.h"
#include "log.h"
#include "mac.h"
#include "match.h"
#include "misc.h"
#include "dispatch.h"
#include "monitor.h"
#include "ssherr.h"
#include "sshbuf.h"
#include "digest.h"
#if OPENSSL_VERSION_NUMBER >= 0x00907000L
# if defined(HAVE_EVP_SHA256)
# define evp_ssh_sha256 EVP_sha256
# else
extern const EVP_MD *evp_ssh_sha256(void);
# endif
#endif
/* prototype */
static int kex_choose_conf(struct ssh *);
static int kex_input_newkeys(int, u_int32_t, void *);
static const char *proposal_names[PROPOSAL_MAX] = {
"KEX algorithms",
"host key algorithms",
"ciphers ctos",
"ciphers stoc",
"MACs ctos",
"MACs stoc",
"compression ctos",
"compression stoc",
"languages ctos",
"languages stoc",
};
struct kexalg {
char *name;
u_int type;
int ec_nid;
int hash_alg;
};
static const struct kexalg kexalgs[] = {
#ifdef WITH_OPENSSL
{ KEX_DH1, KEX_DH_GRP1_SHA1, 0, SSH_DIGEST_SHA1 },
{ KEX_DH14_SHA1, KEX_DH_GRP14_SHA1, 0, SSH_DIGEST_SHA1 },
{ KEX_DH14_SHA256, KEX_DH_GRP14_SHA256, 0, SSH_DIGEST_SHA256 },
{ KEX_DH16_SHA512, KEX_DH_GRP16_SHA512, 0, SSH_DIGEST_SHA512 },
{ KEX_DH18_SHA512, KEX_DH_GRP18_SHA512, 0, SSH_DIGEST_SHA512 },
{ KEX_DHGEX_SHA1, KEX_DH_GEX_SHA1, 0, SSH_DIGEST_SHA1 },
#ifdef HAVE_EVP_SHA256
{ KEX_DHGEX_SHA256, KEX_DH_GEX_SHA256, 0, SSH_DIGEST_SHA256 },
#endif /* HAVE_EVP_SHA256 */
#ifdef OPENSSL_HAS_ECC
{ KEX_ECDH_SHA2_NISTP256, KEX_ECDH_SHA2,
NID_X9_62_prime256v1, SSH_DIGEST_SHA256 },
{ KEX_ECDH_SHA2_NISTP384, KEX_ECDH_SHA2, NID_secp384r1,
SSH_DIGEST_SHA384 },
# ifdef OPENSSL_HAS_NISTP521
{ KEX_ECDH_SHA2_NISTP521, KEX_ECDH_SHA2, NID_secp521r1,
SSH_DIGEST_SHA512 },
# endif /* OPENSSL_HAS_NISTP521 */
#endif /* OPENSSL_HAS_ECC */
#endif /* WITH_OPENSSL */
#if defined(HAVE_EVP_SHA256) || !defined(WITH_OPENSSL)
{ KEX_CURVE25519_SHA256, KEX_C25519_SHA256, 0, SSH_DIGEST_SHA256 },
{ KEX_CURVE25519_SHA256_OLD, KEX_C25519_SHA256, 0, SSH_DIGEST_SHA256 },
#endif /* HAVE_EVP_SHA256 || !WITH_OPENSSL */
{ NULL, -1, -1, -1},
};
char *
kex_alg_list(char sep)
{
char *ret = NULL, *tmp;
size_t nlen, rlen = 0;
const struct kexalg *k;
for (k = kexalgs; k->name != NULL; k++) {
if (ret != NULL)
ret[rlen++] = sep;
nlen = strlen(k->name);
if ((tmp = realloc(ret, rlen + nlen + 2)) == NULL) {
free(ret);
return NULL;
}
ret = tmp;
memcpy(ret + rlen, k->name, nlen + 1);
rlen += nlen;
}
return ret;
}
static const struct kexalg *
kex_alg_by_name(const char *name)
{
const struct kexalg *k;
for (k = kexalgs; k->name != NULL; k++) {
if (strcmp(k->name, name) == 0)
return k;
}
return NULL;
}
/* Validate KEX method name list */
int
kex_names_valid(const char *names)
{
char *s, *cp, *p;
if (names == NULL || strcmp(names, "") == 0)
return 0;
if ((s = cp = strdup(names)) == NULL)
return 0;
for ((p = strsep(&cp, ",")); p && *p != '\0';
(p = strsep(&cp, ","))) {
if (kex_alg_by_name(p) == NULL) {
error("Unsupported KEX algorithm \"%.100s\"", p);
free(s);
return 0;
}
}
debug3("kex names ok: [%s]", names);
free(s);
return 1;
}
/*
* Concatenate algorithm names, avoiding duplicates in the process.
* Caller must free returned string.
*/
char *
kex_names_cat(const char *a, const char *b)
{
char *ret = NULL, *tmp = NULL, *cp, *p;
size_t len;
if (a == NULL || *a == '\0')
return NULL;
if (b == NULL || *b == '\0')
return strdup(a);
if (strlen(b) > 1024*1024)
return NULL;
len = strlen(a) + strlen(b) + 2;
if ((tmp = cp = strdup(b)) == NULL ||
(ret = calloc(1, len)) == NULL) {
free(tmp);
return NULL;
}
strlcpy(ret, a, len);
for ((p = strsep(&cp, ",")); p && *p != '\0'; (p = strsep(&cp, ","))) {
if (match_list(ret, p, NULL) != NULL)
continue; /* Algorithm already present */
if (strlcat(ret, ",", len) >= len ||
strlcat(ret, p, len) >= len) {
free(tmp);
free(ret);
return NULL; /* Shouldn't happen */
}
}
free(tmp);
return ret;
}
/*
* Assemble a list of algorithms from a default list and a string from a
* configuration file. The user-provided string may begin with '+' to
* indicate that it should be appended to the default.
*/
int
kex_assemble_names(const char *def, char **list)
{
char *ret;
if (list == NULL || *list == NULL || **list == '\0') {
*list = strdup(def);
return 0;
}
if (**list != '+') {
return 0;
}
if ((ret = kex_names_cat(def, *list + 1)) == NULL)
return SSH_ERR_ALLOC_FAIL;
free(*list);
*list = ret;
return 0;
}
/* put algorithm proposal into buffer */
int
kex_prop2buf(struct sshbuf *b, char *proposal[PROPOSAL_MAX])
{
u_int i;
int r;
sshbuf_reset(b);
/*
* add a dummy cookie, the cookie will be overwritten by
* kex_send_kexinit(), each time a kexinit is set
*/
for (i = 0; i < KEX_COOKIE_LEN; i++) {
if ((r = sshbuf_put_u8(b, 0)) != 0)
return r;
}
for (i = 0; i < PROPOSAL_MAX; i++) {
if ((r = sshbuf_put_cstring(b, proposal[i])) != 0)
return r;
}
if ((r = sshbuf_put_u8(b, 0)) != 0 || /* first_kex_packet_follows */
(r = sshbuf_put_u32(b, 0)) != 0) /* uint32 reserved */
return r;
return 0;
}
/* parse buffer and return algorithm proposal */
int
kex_buf2prop(struct sshbuf *raw, int *first_kex_follows, char ***propp)
{
struct sshbuf *b = NULL;
u_char v;
u_int i;
char **proposal = NULL;
int r;
*propp = NULL;
if ((proposal = calloc(PROPOSAL_MAX, sizeof(char *))) == NULL)
return SSH_ERR_ALLOC_FAIL;
if ((b = sshbuf_fromb(raw)) == NULL) {
r = SSH_ERR_ALLOC_FAIL;
goto out;
}
if ((r = sshbuf_consume(b, KEX_COOKIE_LEN)) != 0) /* skip cookie */
goto out;
/* extract kex init proposal strings */
for (i = 0; i < PROPOSAL_MAX; i++) {
if ((r = sshbuf_get_cstring(b, &(proposal[i]), NULL)) != 0)
goto out;
debug2("%s: %s", proposal_names[i], proposal[i]);
}
/* first kex follows / reserved */
if ((r = sshbuf_get_u8(b, &v)) != 0 || /* first_kex_follows */
(r = sshbuf_get_u32(b, &i)) != 0) /* reserved */
goto out;
if (first_kex_follows != NULL)
*first_kex_follows = v;
debug2("first_kex_follows %d ", v);
debug2("reserved %u ", i);
r = 0;
*propp = proposal;
out:
if (r != 0 && proposal != NULL)
kex_prop_free(proposal);
sshbuf_free(b);
return r;
}
void
kex_prop_free(char **proposal)
{
u_int i;
if (proposal == NULL)
return;
for (i = 0; i < PROPOSAL_MAX; i++)
free(proposal[i]);
free(proposal);
}
/* ARGSUSED */
static int
kex_protocol_error(int type, u_int32_t seq, void *ctxt)
{
struct ssh *ssh = active_state; /* XXX */
int r;
error("kex protocol error: type %d seq %u", type, seq);
if ((r = sshpkt_start(ssh, SSH2_MSG_UNIMPLEMENTED)) != 0 ||
(r = sshpkt_put_u32(ssh, seq)) != 0 ||
(r = sshpkt_send(ssh)) != 0)
return r;
return 0;
}
static void
kex_reset_dispatch(struct ssh *ssh)
{
ssh_dispatch_range(ssh, SSH2_MSG_TRANSPORT_MIN,
SSH2_MSG_TRANSPORT_MAX, &kex_protocol_error);
ssh_dispatch_set(ssh, SSH2_MSG_KEXINIT, &kex_input_kexinit);
}
static int
kex_send_ext_info(struct ssh *ssh)
{
int r;
char *algs;
if ((algs = sshkey_alg_list(0, 1, ',')) == NULL)
return SSH_ERR_ALLOC_FAIL;
if ((r = sshpkt_start(ssh, SSH2_MSG_EXT_INFO)) != 0 ||
(r = sshpkt_put_u32(ssh, 1)) != 0 ||
(r = sshpkt_put_cstring(ssh, "server-sig-algs")) != 0 ||
(r = sshpkt_put_cstring(ssh, algs)) != 0 ||
(r = sshpkt_send(ssh)) != 0)
goto out;
/* success */
r = 0;
out:
free(algs);
return r;
}
int
kex_send_newkeys(struct ssh *ssh)
{
int r;
kex_reset_dispatch(ssh);
if ((r = sshpkt_start(ssh, SSH2_MSG_NEWKEYS)) != 0 ||
(r = sshpkt_send(ssh)) != 0)
return r;
debug("SSH2_MSG_NEWKEYS sent");
debug("expecting SSH2_MSG_NEWKEYS");
ssh_dispatch_set(ssh, SSH2_MSG_NEWKEYS, &kex_input_newkeys);
if (ssh->kex->ext_info_c)
if ((r = kex_send_ext_info(ssh)) != 0)
return r;
return 0;
}
int
kex_input_ext_info(int type, u_int32_t seq, void *ctxt)
{
struct ssh *ssh = ctxt;
struct kex *kex = ssh->kex;
u_int32_t i, ninfo;
char *name, *val, *found;
int r;
debug("SSH2_MSG_EXT_INFO received");
ssh_dispatch_set(ssh, SSH2_MSG_EXT_INFO, &kex_protocol_error);
if ((r = sshpkt_get_u32(ssh, &ninfo)) != 0)
return r;
for (i = 0; i < ninfo; i++) {
if ((r = sshpkt_get_cstring(ssh, &name, NULL)) != 0)
return r;
if ((r = sshpkt_get_cstring(ssh, &val, NULL)) != 0) {
free(name);
return r;
}
debug("%s: %s=<%s>", __func__, name, val);
if (strcmp(name, "server-sig-algs") == 0) {
found = match_list("rsa-sha2-256", val, NULL);
if (found) {
kex->rsa_sha2 = 256;
free(found);
}
found = match_list("rsa-sha2-512", val, NULL);
if (found) {
kex->rsa_sha2 = 512;
free(found);
}
}
free(name);
free(val);
}
return sshpkt_get_end(ssh);
}
static int
kex_input_newkeys(int type, u_int32_t seq, void *ctxt)
{
struct ssh *ssh = ctxt;
struct kex *kex = ssh->kex;
int r;
debug("SSH2_MSG_NEWKEYS received");
ssh_dispatch_set(ssh, SSH2_MSG_NEWKEYS, &kex_protocol_error);
if ((r = sshpkt_get_end(ssh)) != 0)
return r;
if ((r = ssh_set_newkeys(ssh, MODE_IN)) != 0)
return r;
kex->done = 1;
sshbuf_reset(kex->peer);
/* sshbuf_reset(kex->my); */
kex->flags &= ~KEX_INIT_SENT;
free(kex->name);
kex->name = NULL;
return 0;
}
int
kex_send_kexinit(struct ssh *ssh)
{
u_char *cookie;
struct kex *kex = ssh->kex;
int r;
if (kex == NULL)
return SSH_ERR_INTERNAL_ERROR;
if (kex->flags & KEX_INIT_SENT)
return 0;
kex->done = 0;
/* generate a random cookie */
if (sshbuf_len(kex->my) < KEX_COOKIE_LEN)
return SSH_ERR_INVALID_FORMAT;
if ((cookie = sshbuf_mutable_ptr(kex->my)) == NULL)
return SSH_ERR_INTERNAL_ERROR;
arc4random_buf(cookie, KEX_COOKIE_LEN);
if ((r = sshpkt_start(ssh, SSH2_MSG_KEXINIT)) != 0 ||
(r = sshpkt_putb(ssh, kex->my)) != 0 ||
(r = sshpkt_send(ssh)) != 0)
return r;
debug("SSH2_MSG_KEXINIT sent");
kex->flags |= KEX_INIT_SENT;
return 0;
}
/* ARGSUSED */
int
kex_input_kexinit(int type, u_int32_t seq, void *ctxt)
{
struct ssh *ssh = ctxt;
struct kex *kex = ssh->kex;
const u_char *ptr;
u_int i;
size_t dlen;
int r;
debug("SSH2_MSG_KEXINIT received");
if (kex == NULL)
return SSH_ERR_INVALID_ARGUMENT;
ssh_dispatch_set(ssh, SSH2_MSG_KEXINIT, NULL);
ptr = sshpkt_ptr(ssh, &dlen);
if ((r = sshbuf_put(kex->peer, ptr, dlen)) != 0)
return r;
/* discard packet */
for (i = 0; i < KEX_COOKIE_LEN; i++)
if ((r = sshpkt_get_u8(ssh, NULL)) != 0)
return r;
for (i = 0; i < PROPOSAL_MAX; i++)
if ((r = sshpkt_get_string(ssh, NULL, NULL)) != 0)
return r;
/*
* XXX RFC4253 sec 7: "each side MAY guess" - currently no supported
* KEX method has the server move first, but a server might be using
* a custom method or one that we otherwise don't support. We should
* be prepared to remember first_kex_follows here so we can eat a
* packet later.
* XXX2 - RFC4253 is kind of ambiguous on what first_kex_follows means
* for cases where the server *doesn't* go first. I guess we should
* ignore it when it is set for these cases, which is what we do now.
*/
if ((r = sshpkt_get_u8(ssh, NULL)) != 0 || /* first_kex_follows */
(r = sshpkt_get_u32(ssh, NULL)) != 0 || /* reserved */
(r = sshpkt_get_end(ssh)) != 0)
return r;
if (!(kex->flags & KEX_INIT_SENT))
if ((r = kex_send_kexinit(ssh)) != 0)
return r;
if ((r = kex_choose_conf(ssh)) != 0)
return r;
if (kex->kex_type < KEX_MAX && kex->kex[kex->kex_type] != NULL)
return (kex->kex[kex->kex_type])(ssh);
return SSH_ERR_INTERNAL_ERROR;
}
int
kex_new(struct ssh *ssh, char *proposal[PROPOSAL_MAX], struct kex **kexp)
{
struct kex *kex;
int r;
*kexp = NULL;
if ((kex = calloc(1, sizeof(*kex))) == NULL)
return SSH_ERR_ALLOC_FAIL;
if ((kex->peer = sshbuf_new()) == NULL ||
(kex->my = sshbuf_new()) == NULL) {
r = SSH_ERR_ALLOC_FAIL;
goto out;
}
if ((r = kex_prop2buf(kex->my, proposal)) != 0)
goto out;
kex->done = 0;
kex_reset_dispatch(ssh);
r = 0;
*kexp = kex;
out:
if (r != 0)
kex_free(kex);
return r;
}
void
kex_free_newkeys(struct newkeys *newkeys)
{
if (newkeys == NULL)
return;
if (newkeys->enc.key) {
explicit_bzero(newkeys->enc.key, newkeys->enc.key_len);
free(newkeys->enc.key);
newkeys->enc.key = NULL;
}
if (newkeys->enc.iv) {
explicit_bzero(newkeys->enc.iv, newkeys->enc.iv_len);
free(newkeys->enc.iv);
newkeys->enc.iv = NULL;
}
free(newkeys->enc.name);
explicit_bzero(&newkeys->enc, sizeof(newkeys->enc));
free(newkeys->comp.name);
explicit_bzero(&newkeys->comp, sizeof(newkeys->comp));
mac_clear(&newkeys->mac);
if (newkeys->mac.key) {
explicit_bzero(newkeys->mac.key, newkeys->mac.key_len);
free(newkeys->mac.key);
newkeys->mac.key = NULL;
}
free(newkeys->mac.name);
explicit_bzero(&newkeys->mac, sizeof(newkeys->mac));
explicit_bzero(newkeys, sizeof(*newkeys));
free(newkeys);
}
void
kex_free(struct kex *kex)
{
u_int mode;
#ifdef WITH_OPENSSL
if (kex->dh)
DH_free(kex->dh);
#ifdef OPENSSL_HAS_ECC
if (kex->ec_client_key)
EC_KEY_free(kex->ec_client_key);
#endif /* OPENSSL_HAS_ECC */
#endif /* WITH_OPENSSL */
for (mode = 0; mode < MODE_MAX; mode++) {
kex_free_newkeys(kex->newkeys[mode]);
kex->newkeys[mode] = NULL;
}
sshbuf_free(kex->peer);
sshbuf_free(kex->my);
free(kex->session_id);
free(kex->client_version_string);
free(kex->server_version_string);
free(kex->failed_choice);
free(kex->hostkey_alg);
free(kex->name);
free(kex);
}
int
kex_setup(struct ssh *ssh, char *proposal[PROPOSAL_MAX])
{
int r;
if ((r = kex_new(ssh, proposal, &ssh->kex)) != 0)
return r;
if ((r = kex_send_kexinit(ssh)) != 0) { /* we start */
kex_free(ssh->kex);
ssh->kex = NULL;
return r;
}
return 0;
}
/*
* Request key re-exchange, returns 0 on success or a ssherr.h error
* code otherwise. Must not be called if KEX is incomplete or in-progress.
*/
int
kex_start_rekex(struct ssh *ssh)
{
if (ssh->kex == NULL) {
error("%s: no kex", __func__);
return SSH_ERR_INTERNAL_ERROR;
}
if (ssh->kex->done == 0) {
error("%s: requested twice", __func__);
return SSH_ERR_INTERNAL_ERROR;
}
ssh->kex->done = 0;
return kex_send_kexinit(ssh);
}
static int
choose_enc(struct sshenc *enc, char *client, char *server)
{
char *name = match_list(client, server, NULL);
if (name == NULL)
return SSH_ERR_NO_CIPHER_ALG_MATCH;
if ((enc->cipher = cipher_by_name(name)) == NULL)
return SSH_ERR_INTERNAL_ERROR;
enc->name = name;
enc->enabled = 0;
enc->iv = NULL;
enc->iv_len = cipher_ivlen(enc->cipher);
enc->key = NULL;
enc->key_len = cipher_keylen(enc->cipher);
enc->block_size = cipher_blocksize(enc->cipher);
return 0;
}
static int
choose_mac(struct ssh *ssh, struct sshmac *mac, char *client, char *server)
{
char *name = match_list(client, server, NULL);
if (name == NULL)
return SSH_ERR_NO_MAC_ALG_MATCH;
if (mac_setup(mac, name) < 0)
return SSH_ERR_INTERNAL_ERROR;
/* truncate the key */
if (ssh->compat & SSH_BUG_HMAC)
mac->key_len = 16;
mac->name = name;
mac->key = NULL;
mac->enabled = 0;
return 0;
}
static int
choose_comp(struct sshcomp *comp, char *client, char *server)
{
char *name = match_list(client, server, NULL);
if (name == NULL)
return SSH_ERR_NO_COMPRESS_ALG_MATCH;
if (strcmp(name, "zlib@openssh.com") == 0) {
comp->type = COMP_DELAYED;
} else if (strcmp(name, "zlib") == 0) {
comp->type = COMP_ZLIB;
} else if (strcmp(name, "none") == 0) {
comp->type = COMP_NONE;
} else {
return SSH_ERR_INTERNAL_ERROR;
}
comp->name = name;
return 0;
}
static int
choose_kex(struct kex *k, char *client, char *server)
{
const struct kexalg *kexalg;
k->name = match_list(client, server, NULL);
debug("kex: algorithm: %s", k->name ? k->name : "(no match)");
if (k->name == NULL)
return SSH_ERR_NO_KEX_ALG_MATCH;
if ((kexalg = kex_alg_by_name(k->name)) == NULL)
return SSH_ERR_INTERNAL_ERROR;
k->kex_type = kexalg->type;
k->hash_alg = kexalg->hash_alg;
k->ec_nid = kexalg->ec_nid;
return 0;
}
static int
choose_hostkeyalg(struct kex *k, char *client, char *server)
{
k->hostkey_alg = match_list(client, server, NULL);
debug("kex: host key algorithm: %s",
k->hostkey_alg ? k->hostkey_alg : "(no match)");
if (k->hostkey_alg == NULL)
return SSH_ERR_NO_HOSTKEY_ALG_MATCH;
k->hostkey_type = sshkey_type_from_name(k->hostkey_alg);
if (k->hostkey_type == KEY_UNSPEC)
return SSH_ERR_INTERNAL_ERROR;
k->hostkey_nid = sshkey_ecdsa_nid_from_name(k->hostkey_alg);
return 0;
}
static int
proposals_match(char *my[PROPOSAL_MAX], char *peer[PROPOSAL_MAX])
{
static int check[] = {
PROPOSAL_KEX_ALGS, PROPOSAL_SERVER_HOST_KEY_ALGS, -1
};
int *idx;
char *p;
for (idx = &check[0]; *idx != -1; idx++) {
if ((p = strchr(my[*idx], ',')) != NULL)
*p = '\0';
if ((p = strchr(peer[*idx], ',')) != NULL)
*p = '\0';
if (strcmp(my[*idx], peer[*idx]) != 0) {
debug2("proposal mismatch: my %s peer %s",
my[*idx], peer[*idx]);
return (0);
}
}
debug2("proposals match");
return (1);
}
static int
kex_choose_conf(struct ssh *ssh)
{
struct kex *kex = ssh->kex;
struct newkeys *newkeys;
char **my = NULL, **peer = NULL;
char **cprop, **sprop;
int nenc, nmac, ncomp;
u_int mode, ctos, need, dh_need, authlen;
int r, first_kex_follows;
debug2("local %s KEXINIT proposal", kex->server ? "server" : "client");
if ((r = kex_buf2prop(kex->my, NULL, &my)) != 0)
goto out;
debug2("peer %s KEXINIT proposal", kex->server ? "client" : "server");
if ((r = kex_buf2prop(kex->peer, &first_kex_follows, &peer)) != 0)
goto out;
if (kex->server) {
cprop=peer;
sprop=my;
} else {
cprop=my;
sprop=peer;
}
/* Check whether client supports ext_info_c */
if (kex->server) {
char *ext;
ext = match_list("ext-info-c", peer[PROPOSAL_KEX_ALGS], NULL);
kex->ext_info_c = (ext != NULL);
free(ext);
}
/* Algorithm Negotiation */
if ((r = choose_kex(kex, cprop[PROPOSAL_KEX_ALGS],
sprop[PROPOSAL_KEX_ALGS])) != 0) {
kex->failed_choice = peer[PROPOSAL_KEX_ALGS];
peer[PROPOSAL_KEX_ALGS] = NULL;
goto out;
}
if ((r = choose_hostkeyalg(kex, cprop[PROPOSAL_SERVER_HOST_KEY_ALGS],
sprop[PROPOSAL_SERVER_HOST_KEY_ALGS])) != 0) {
kex->failed_choice = peer[PROPOSAL_SERVER_HOST_KEY_ALGS];
peer[PROPOSAL_SERVER_HOST_KEY_ALGS] = NULL;
goto out;
}
for (mode = 0; mode < MODE_MAX; mode++) {
if ((newkeys = calloc(1, sizeof(*newkeys))) == NULL) {
r = SSH_ERR_ALLOC_FAIL;
goto out;
}
kex->newkeys[mode] = newkeys;
ctos = (!kex->server && mode == MODE_OUT) ||
(kex->server && mode == MODE_IN);
nenc = ctos ? PROPOSAL_ENC_ALGS_CTOS : PROPOSAL_ENC_ALGS_STOC;
nmac = ctos ? PROPOSAL_MAC_ALGS_CTOS : PROPOSAL_MAC_ALGS_STOC;
ncomp = ctos ? PROPOSAL_COMP_ALGS_CTOS : PROPOSAL_COMP_ALGS_STOC;
if ((r = choose_enc(&newkeys->enc, cprop[nenc],
sprop[nenc])) != 0) {
kex->failed_choice = peer[nenc];
peer[nenc] = NULL;
goto out;
}
authlen = cipher_authlen(newkeys->enc.cipher);
/* ignore mac for authenticated encryption */
if (authlen == 0 &&
(r = choose_mac(ssh, &newkeys->mac, cprop[nmac],
sprop[nmac])) != 0) {
kex->failed_choice = peer[nmac];
peer[nmac] = NULL;
goto out;
}
if ((r = choose_comp(&newkeys->comp, cprop[ncomp],
sprop[ncomp])) != 0) {
kex->failed_choice = peer[ncomp];
peer[ncomp] = NULL;
goto out;
}
debug("kex: %s cipher: %s MAC: %s compression: %s",
ctos ? "client->server" : "server->client",
newkeys->enc.name,
authlen == 0 ? newkeys->mac.name : "<implicit>",
newkeys->comp.name);
}
need = dh_need = 0;
for (mode = 0; mode < MODE_MAX; mode++) {
newkeys = kex->newkeys[mode];
need = MAXIMUM(need, newkeys->enc.key_len);
need = MAXIMUM(need, newkeys->enc.block_size);
need = MAXIMUM(need, newkeys->enc.iv_len);
need = MAXIMUM(need, newkeys->mac.key_len);
dh_need = MAXIMUM(dh_need, cipher_seclen(newkeys->enc.cipher));
dh_need = MAXIMUM(dh_need, newkeys->enc.block_size);
dh_need = MAXIMUM(dh_need, newkeys->enc.iv_len);
dh_need = MAXIMUM(dh_need, newkeys->mac.key_len);
}
/* XXX need runden? */
kex->we_need = need;
kex->dh_need = dh_need;
/* ignore the next message if the proposals do not match */
if (first_kex_follows && !proposals_match(my, peer) &&
!(ssh->compat & SSH_BUG_FIRSTKEX))
ssh->dispatch_skip_packets = 1;
r = 0;
out:
kex_prop_free(my);
kex_prop_free(peer);
return r;
}
static int
derive_key(struct ssh *ssh, int id, u_int need, u_char *hash, u_int hashlen,
const struct sshbuf *shared_secret, u_char **keyp)
{
struct kex *kex = ssh->kex;
struct ssh_digest_ctx *hashctx = NULL;
char c = id;
u_int have;
size_t mdsz;
u_char *digest;
int r;
if ((mdsz = ssh_digest_bytes(kex->hash_alg)) == 0)
return SSH_ERR_INVALID_ARGUMENT;
if ((digest = calloc(1, ROUNDUP(need, mdsz))) == NULL) {
r = SSH_ERR_ALLOC_FAIL;
goto out;
}
/* K1 = HASH(K || H || "A" || session_id) */
if ((hashctx = ssh_digest_start(kex->hash_alg)) == NULL ||
ssh_digest_update_buffer(hashctx, shared_secret) != 0 ||
ssh_digest_update(hashctx, hash, hashlen) != 0 ||
ssh_digest_update(hashctx, &c, 1) != 0 ||
ssh_digest_update(hashctx, kex->session_id,
kex->session_id_len) != 0 ||
ssh_digest_final(hashctx, digest, mdsz) != 0) {
r = SSH_ERR_LIBCRYPTO_ERROR;
goto out;
}
ssh_digest_free(hashctx);
hashctx = NULL;
/*
* expand key:
* Kn = HASH(K || H || K1 || K2 || ... || Kn-1)
* Key = K1 || K2 || ... || Kn
*/
for (have = mdsz; need > have; have += mdsz) {
if ((hashctx = ssh_digest_start(kex->hash_alg)) == NULL ||
ssh_digest_update_buffer(hashctx, shared_secret) != 0 ||
ssh_digest_update(hashctx, hash, hashlen) != 0 ||
ssh_digest_update(hashctx, digest, have) != 0 ||
ssh_digest_final(hashctx, digest + have, mdsz) != 0) {
r = SSH_ERR_LIBCRYPTO_ERROR;
goto out;
}
ssh_digest_free(hashctx);
hashctx = NULL;
}
#ifdef DEBUG_KEX
fprintf(stderr, "key '%c'== ", c);
dump_digest("key", digest, need);
#endif
*keyp = digest;
digest = NULL;
r = 0;
out:
free(digest);
ssh_digest_free(hashctx);
return r;
}
#define NKEYS 6
int
kex_derive_keys(struct ssh *ssh, u_char *hash, u_int hashlen,
const struct sshbuf *shared_secret)
{
struct kex *kex = ssh->kex;
u_char *keys[NKEYS];
u_int i, j, mode, ctos;
int r;
for (i = 0; i < NKEYS; i++) {
if ((r = derive_key(ssh, 'A'+i, kex->we_need, hash, hashlen,
shared_secret, &keys[i])) != 0) {
for (j = 0; j < i; j++)
free(keys[j]);
return r;
}
}
for (mode = 0; mode < MODE_MAX; mode++) {
ctos = (!kex->server && mode == MODE_OUT) ||
(kex->server && mode == MODE_IN);
kex->newkeys[mode]->enc.iv = keys[ctos ? 0 : 1];
kex->newkeys[mode]->enc.key = keys[ctos ? 2 : 3];
kex->newkeys[mode]->mac.key = keys[ctos ? 4 : 5];
}
return 0;
}
#ifdef WITH_OPENSSL
int
kex_derive_keys_bn(struct ssh *ssh, u_char *hash, u_int hashlen,
const BIGNUM *secret)
{
struct sshbuf *shared_secret;
int r;
if ((shared_secret = sshbuf_new()) == NULL)
return SSH_ERR_ALLOC_FAIL;
if ((r = sshbuf_put_bignum2(shared_secret, secret)) == 0)
r = kex_derive_keys(ssh, hash, hashlen, shared_secret);
sshbuf_free(shared_secret);
return r;
}
#endif
#ifdef WITH_SSH1
int
derive_ssh1_session_id(BIGNUM *host_modulus, BIGNUM *server_modulus,
u_int8_t cookie[8], u_int8_t id[16])
{
u_int8_t hbuf[2048], sbuf[2048], obuf[SSH_DIGEST_MAX_LENGTH];
struct ssh_digest_ctx *hashctx = NULL;
size_t hlen, slen;
int r;
hlen = BN_num_bytes(host_modulus);
slen = BN_num_bytes(server_modulus);
if (hlen < (512 / 8) || (u_int)hlen > sizeof(hbuf) ||
slen < (512 / 8) || (u_int)slen > sizeof(sbuf))
return SSH_ERR_KEY_BITS_MISMATCH;
if (BN_bn2bin(host_modulus, hbuf) <= 0 ||
BN_bn2bin(server_modulus, sbuf) <= 0) {
r = SSH_ERR_LIBCRYPTO_ERROR;
goto out;
}
if ((hashctx = ssh_digest_start(SSH_DIGEST_MD5)) == NULL) {
r = SSH_ERR_ALLOC_FAIL;
goto out;
}
if (ssh_digest_update(hashctx, hbuf, hlen) != 0 ||
ssh_digest_update(hashctx, sbuf, slen) != 0 ||
ssh_digest_update(hashctx, cookie, 8) != 0 ||
ssh_digest_final(hashctx, obuf, sizeof(obuf)) != 0) {
r = SSH_ERR_LIBCRYPTO_ERROR;
goto out;
}
memcpy(id, obuf, ssh_digest_bytes(SSH_DIGEST_MD5));
r = 0;
out:
ssh_digest_free(hashctx);
explicit_bzero(hbuf, sizeof(hbuf));
explicit_bzero(sbuf, sizeof(sbuf));
explicit_bzero(obuf, sizeof(obuf));
return r;
}
#endif
#if defined(DEBUG_KEX) || defined(DEBUG_KEXDH) || defined(DEBUG_KEXECDH)
void
dump_digest(char *msg, u_char *digest, int len)
{
fprintf(stderr, "%s\n", msg);
sshbuf_dump_data(digest, len, stderr);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_5367_0 |
crossvul-cpp_data_good_3486_8 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
* Copyright (C) 1995, 1996 Paul M. Antoine
* Copyright (C) 1998 Ulf Carlsson
* Copyright (C) 1999 Silicon Graphics, Inc.
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000, 01 MIPS Technologies, Inc.
* Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
*/
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/bootmem.h>
#include <linux/interrupt.h>
#include <linux/ptrace.h>
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/kprobes.h>
#include <linux/notifier.h>
#include <linux/kdb.h>
#include <linux/irq.h>
#include <linux/perf_event.h>
#include <asm/bootinfo.h>
#include <asm/branch.h>
#include <asm/break.h>
#include <asm/cop2.h>
#include <asm/cpu.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/module.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/tlbdebug.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
#include <asm/watch.h>
#include <asm/mmu_context.h>
#include <asm/types.h>
#include <asm/stacktrace.h>
#include <asm/uasm.h>
extern void check_wait(void);
extern asmlinkage void r4k_wait(void);
extern asmlinkage void rollback_handle_int(void);
extern asmlinkage void handle_int(void);
extern asmlinkage void handle_tlbm(void);
extern asmlinkage void handle_tlbl(void);
extern asmlinkage void handle_tlbs(void);
extern asmlinkage void handle_adel(void);
extern asmlinkage void handle_ades(void);
extern asmlinkage void handle_ibe(void);
extern asmlinkage void handle_dbe(void);
extern asmlinkage void handle_sys(void);
extern asmlinkage void handle_bp(void);
extern asmlinkage void handle_ri(void);
extern asmlinkage void handle_ri_rdhwr_vivt(void);
extern asmlinkage void handle_ri_rdhwr(void);
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
extern asmlinkage void handle_tr(void);
extern asmlinkage void handle_fpe(void);
extern asmlinkage void handle_mdmx(void);
extern asmlinkage void handle_watch(void);
extern asmlinkage void handle_mt(void);
extern asmlinkage void handle_dsp(void);
extern asmlinkage void handle_mcheck(void);
extern asmlinkage void handle_reserved(void);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
void (*board_be_init)(void);
int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
void (*board_nmi_handler_setup)(void);
void (*board_ejtag_handler_setup)(void);
void (*board_bind_eic_interrupt)(int irq, int regset);
static void show_raw_backtrace(unsigned long reg29)
{
unsigned long *sp = (unsigned long *)(reg29 & ~3);
unsigned long addr;
printk("Call Trace:");
#ifdef CONFIG_KALLSYMS
printk("\n");
#endif
while (!kstack_end(sp)) {
unsigned long __user *p =
(unsigned long __user *)(unsigned long)sp++;
if (__get_user(addr, p)) {
printk(" (Bad stack address)");
break;
}
if (__kernel_text_address(addr))
print_ip_sym(addr);
}
printk("\n");
}
#ifdef CONFIG_KALLSYMS
int raw_show_trace;
static int __init set_raw_show_trace(char *str)
{
raw_show_trace = 1;
return 1;
}
__setup("raw_show_trace", set_raw_show_trace);
#endif
static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
if (raw_show_trace || !__kernel_text_address(pc)) {
show_raw_backtrace(sp);
return;
}
printk("Call Trace:\n");
do {
print_ip_sym(pc);
pc = unwind_stack(task, &sp, pc, &ra);
} while (pc);
printk("\n");
}
/*
* This routine abuses get_user()/put_user() to reference pointers
* with at least a bit of error checking ...
*/
static void show_stacktrace(struct task_struct *task,
const struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
long stackdata;
int i;
unsigned long __user *sp = (unsigned long __user *)regs->regs[29];
printk("Stack :");
i = 0;
while ((unsigned long) sp & (PAGE_SIZE - 1)) {
if (i && ((i % (64 / field)) == 0))
printk("\n ");
if (i > 39) {
printk(" ...");
break;
}
if (__get_user(stackdata, sp++)) {
printk(" (Bad stack address)");
break;
}
printk(" %0*lx", field, stackdata);
i++;
}
printk("\n");
show_backtrace(task, regs);
}
void show_stack(struct task_struct *task, unsigned long *sp)
{
struct pt_regs regs;
if (sp) {
regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0;
regs.cp0_epc = 0;
} else {
if (task && task != current) {
regs.regs[29] = task->thread.reg29;
regs.regs[31] = 0;
regs.cp0_epc = task->thread.reg31;
#ifdef CONFIG_KGDB_KDB
} else if (atomic_read(&kgdb_active) != -1 &&
kdb_current_regs) {
memcpy(®s, kdb_current_regs, sizeof(regs));
#endif /* CONFIG_KGDB_KDB */
} else {
prepare_frametrace(®s);
}
}
show_stacktrace(task, ®s);
}
/*
* The architecture-independent dump_stack generator
*/
void dump_stack(void)
{
struct pt_regs regs;
prepare_frametrace(®s);
show_backtrace(current, ®s);
}
EXPORT_SYMBOL(dump_stack);
static void show_code(unsigned int __user *pc)
{
long i;
unsigned short __user *pc16 = NULL;
printk("\nCode:");
if ((unsigned long)pc & 1)
pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
for(i = -3 ; i < 6 ; i++) {
unsigned int insn;
if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
printk(" (Bad address in epc)\n");
break;
}
printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
}
}
static void __show_regs(const struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
unsigned int cause = regs->cp0_cause;
int i;
printk("Cpu %d\n", smp_processor_id());
/*
* Saved main processor registers
*/
for (i = 0; i < 32; ) {
if ((i % 4) == 0)
printk("$%2d :", i);
if (i == 0)
printk(" %0*lx", field, 0UL);
else if (i == 26 || i == 27)
printk(" %*s", field, "");
else
printk(" %0*lx", field, regs->regs[i]);
i++;
if ((i % 4) == 0)
printk("\n");
}
#ifdef CONFIG_CPU_HAS_SMARTMIPS
printk("Acx : %0*lx\n", field, regs->acx);
#endif
printk("Hi : %0*lx\n", field, regs->hi);
printk("Lo : %0*lx\n", field, regs->lo);
/*
* Saved cp0 registers
*/
printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
(void *) regs->cp0_epc);
printk(" %s\n", print_tainted());
printk("ra : %0*lx %pS\n", field, regs->regs[31],
(void *) regs->regs[31]);
printk("Status: %08x ", (uint32_t) regs->cp0_status);
if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
if (regs->cp0_status & ST0_KUO)
printk("KUo ");
if (regs->cp0_status & ST0_IEO)
printk("IEo ");
if (regs->cp0_status & ST0_KUP)
printk("KUp ");
if (regs->cp0_status & ST0_IEP)
printk("IEp ");
if (regs->cp0_status & ST0_KUC)
printk("KUc ");
if (regs->cp0_status & ST0_IEC)
printk("IEc ");
} else {
if (regs->cp0_status & ST0_KX)
printk("KX ");
if (regs->cp0_status & ST0_SX)
printk("SX ");
if (regs->cp0_status & ST0_UX)
printk("UX ");
switch (regs->cp0_status & ST0_KSU) {
case KSU_USER:
printk("USER ");
break;
case KSU_SUPERVISOR:
printk("SUPERVISOR ");
break;
case KSU_KERNEL:
printk("KERNEL ");
break;
default:
printk("BAD_MODE ");
break;
}
if (regs->cp0_status & ST0_ERL)
printk("ERL ");
if (regs->cp0_status & ST0_EXL)
printk("EXL ");
if (regs->cp0_status & ST0_IE)
printk("IE ");
}
printk("\n");
printk("Cause : %08x\n", cause);
cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
if (1 <= cause && cause <= 5)
printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
printk("PrId : %08x (%s)\n", read_c0_prid(),
cpu_name_string());
}
/*
* FIXME: really the generic show_regs should take a const pointer argument.
*/
void show_regs(struct pt_regs *regs)
{
__show_regs((struct pt_regs *)regs);
}
void show_registers(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
__show_regs(regs);
print_modules();
printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
current->comm, current->pid, current_thread_info(), current,
field, current_thread_info()->tp_value);
if (cpu_has_userlocal) {
unsigned long tls;
tls = read_c0_userlocal();
if (tls != current_thread_info()->tp_value)
printk("*HwTLS: %0*lx\n", field, tls);
}
show_stacktrace(current, regs);
show_code((unsigned int __user *) regs->cp0_epc);
printk("\n");
}
static int regs_to_trapnr(struct pt_regs *regs)
{
return (regs->cp0_cause >> 2) & 0x1f;
}
static DEFINE_SPINLOCK(die_lock);
void __noreturn die(const char *str, struct pt_regs *regs)
{
static int die_counter;
int sig = SIGSEGV;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long dvpret = dvpe();
#endif /* CONFIG_MIPS_MT_SMTC */
if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
sig = 0;
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
#ifdef CONFIG_MIPS_MT_SMTC
mips_mt_regdump(dvpret);
#endif /* CONFIG_MIPS_MT_SMTC */
printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs);
add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops) {
printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
ssleep(5);
panic("Fatal exception");
}
do_exit(sig);
}
extern struct exception_table_entry __start___dbe_table[];
extern struct exception_table_entry __stop___dbe_table[];
__asm__(
" .section __dbe_table, \"a\"\n"
" .previous \n");
/* Given an address, look for it in the exception tables. */
static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
{
const struct exception_table_entry *e;
e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
if (!e)
e = search_module_dbetables(addr);
return e;
}
asmlinkage void do_be(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
const struct exception_table_entry *fixup = NULL;
int data = regs->cp0_cause & 4;
int action = MIPS_BE_FATAL;
/* XXX For now. Fixme, this searches the wrong table ... */
if (data && !user_mode(regs))
fixup = search_dbe_tables(exception_epc(regs));
if (fixup)
action = MIPS_BE_FIXUP;
if (board_be_handler)
action = board_be_handler(regs, fixup != NULL);
switch (action) {
case MIPS_BE_DISCARD:
return;
case MIPS_BE_FIXUP:
if (fixup) {
regs->cp0_epc = fixup->nextinsn;
return;
}
break;
default:
break;
}
/*
* Assume it would be too dangerous to continue ...
*/
printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
data ? "Data" : "Instruction",
field, regs->cp0_epc, field, regs->regs[31]);
if (notify_die(DIE_OOPS, "bus error", regs, 0, regs_to_trapnr(regs), SIGBUS)
== NOTIFY_STOP)
return;
die_if_kernel("Oops", regs);
force_sig(SIGBUS, current);
}
/*
* ll/sc, rdhwr, sync emulation
*/
#define OPCODE 0xfc000000
#define BASE 0x03e00000
#define RT 0x001f0000
#define OFFSET 0x0000ffff
#define LL 0xc0000000
#define SC 0xe0000000
#define SPEC0 0x00000000
#define SPEC3 0x7c000000
#define RD 0x0000f800
#define FUNC 0x0000003f
#define SYNC 0x0000000f
#define RDHWR 0x0000003b
/*
* The ll_bit is cleared by r*_switch.S
*/
unsigned int ll_bit;
struct task_struct *ll_task;
static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
{
unsigned long value, __user *vaddr;
long offset;
/*
* analyse the ll instruction that just caused a ri exception
* and put the referenced address to addr.
*/
/* sign extend offset */
offset = opcode & OFFSET;
offset <<= 16;
offset >>= 16;
vaddr = (unsigned long __user *)
((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
if ((unsigned long)vaddr & 3)
return SIGBUS;
if (get_user(value, vaddr))
return SIGSEGV;
preempt_disable();
if (ll_task == NULL || ll_task == current) {
ll_bit = 1;
} else {
ll_bit = 0;
}
ll_task = current;
preempt_enable();
regs->regs[(opcode & RT) >> 16] = value;
return 0;
}
static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
{
unsigned long __user *vaddr;
unsigned long reg;
long offset;
/*
* analyse the sc instruction that just caused a ri exception
* and put the referenced address to addr.
*/
/* sign extend offset */
offset = opcode & OFFSET;
offset <<= 16;
offset >>= 16;
vaddr = (unsigned long __user *)
((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
reg = (opcode & RT) >> 16;
if ((unsigned long)vaddr & 3)
return SIGBUS;
preempt_disable();
if (ll_bit == 0 || ll_task != current) {
regs->regs[reg] = 0;
preempt_enable();
return 0;
}
preempt_enable();
if (put_user(regs->regs[reg], vaddr))
return SIGSEGV;
regs->regs[reg] = 1;
return 0;
}
/*
* ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
* opcodes are supposed to result in coprocessor unusable exceptions if
* executed on ll/sc-less processors. That's the theory. In practice a
* few processors such as NEC's VR4100 throw reserved instruction exceptions
* instead, so we're doing the emulation thing in both exception handlers.
*/
static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & OPCODE) == LL) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
return simulate_ll(regs, opcode);
}
if ((opcode & OPCODE) == SC) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
return simulate_sc(regs, opcode);
}
return -1; /* Must be something else ... */
}
/*
* Simulate trapping 'rdhwr' instructions to provide user accessible
* registers not implemented in hardware.
*/
static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
{
struct thread_info *ti = task_thread_info(current);
if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
int rd = (opcode & RD) >> 11;
int rt = (opcode & RT) >> 16;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
switch (rd) {
case 0: /* CPU number */
regs->regs[rt] = smp_processor_id();
return 0;
case 1: /* SYNCI length */
regs->regs[rt] = min(current_cpu_data.dcache.linesz,
current_cpu_data.icache.linesz);
return 0;
case 2: /* Read count register */
regs->regs[rt] = read_c0_count();
return 0;
case 3: /* Count register resolution */
switch (current_cpu_data.cputype) {
case CPU_20KC:
case CPU_25KF:
regs->regs[rt] = 1;
break;
default:
regs->regs[rt] = 2;
}
return 0;
case 29:
regs->regs[rt] = ti->tp_value;
return 0;
default:
return -1;
}
}
/* Not ours. */
return -1;
}
static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
return 0;
}
return -1; /* Must be something else ... */
}
asmlinkage void do_ov(struct pt_regs *regs)
{
siginfo_t info;
die_if_kernel("Integer overflow", regs);
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
}
static int process_fpemu_return(int sig, void __user *fault_addr)
{
if (sig == SIGSEGV || sig == SIGBUS) {
struct siginfo si = {0};
si.si_addr = fault_addr;
si.si_signo = sig;
if (sig == SIGSEGV) {
if (find_vma(current->mm, (unsigned long)fault_addr))
si.si_code = SEGV_ACCERR;
else
si.si_code = SEGV_MAPERR;
} else {
si.si_code = BUS_ADRERR;
}
force_sig_info(sig, &si, current);
return 1;
} else if (sig) {
force_sig(sig, current);
return 1;
} else {
return 0;
}
}
/*
* XXX Delayed fp exceptions when doing a lazy ctx switch XXX
*/
asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
{
siginfo_t info = {0};
if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs), SIGFPE)
== NOTIFY_STOP)
return;
die_if_kernel("FP exception in kernel code", regs);
if (fcr31 & FPU_CSR_UNI_X) {
int sig;
void __user *fault_addr = NULL;
/*
* Unimplemented operation exception. If we've got the full
* software emulator on-board, let's use it...
*
* Force FPU to dump state into task/thread context. We're
* moving a lot of data here for what is probably a single
* instruction, but the alternative is to pre-decode the FP
* register operands before invoking the emulator, which seems
* a bit extreme for what should be an infrequent event.
*/
/* Ensure 'resume' not overwrite saved fp context again. */
lose_fpu(1);
/* Run the emulator */
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
&fault_addr);
/*
* We can't allow the emulated instruction to leave any of
* the cause bit set in $fcr31.
*/
current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
/* If something went wrong, signal */
process_fpemu_return(sig, fault_addr);
return;
} else if (fcr31 & FPU_CSR_INV_X)
info.si_code = FPE_FLTINV;
else if (fcr31 & FPU_CSR_DIV_X)
info.si_code = FPE_FLTDIV;
else if (fcr31 & FPU_CSR_OVF_X)
info.si_code = FPE_FLTOVF;
else if (fcr31 & FPU_CSR_UDF_X)
info.si_code = FPE_FLTUND;
else if (fcr31 & FPU_CSR_INE_X)
info.si_code = FPE_FLTRES;
else
info.si_code = __SI_FAULT;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
}
static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
const char *str)
{
siginfo_t info;
char b[40];
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
if (kgdb_ll_trap(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
if (notify_die(DIE_TRAP, str, regs, code, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
/*
* A short test says that IRIX 5.3 sends SIGTRAP for all trap
* insns, even for trap and break codes that indicate arithmetic
* failures. Weird ...
* But should we continue the brokenness??? --macro
*/
switch (code) {
case BRK_OVERFLOW:
case BRK_DIVZERO:
scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
die_if_kernel(b, regs);
if (code == BRK_DIVZERO)
info.si_code = FPE_INTDIV;
else
info.si_code = FPE_INTOVF;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void __user *) regs->cp0_epc;
force_sig_info(SIGFPE, &info, current);
break;
case BRK_BUG:
die_if_kernel("Kernel bug detected", regs);
force_sig(SIGTRAP, current);
break;
case BRK_MEMU:
/*
* Address errors may be deliberately induced by the FPU
* emulator to retake control of the CPU after executing the
* instruction in the delay slot of an emulated branch.
*
* Terminate if exception was recognized as a delay slot return
* otherwise handle as normal.
*/
if (do_dsemulret(regs))
return;
die_if_kernel("Math emu break/trap", regs);
force_sig(SIGTRAP, current);
break;
default:
scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
die_if_kernel(b, regs);
force_sig(SIGTRAP, current);
}
}
asmlinkage void do_bp(struct pt_regs *regs)
{
unsigned int opcode, bcode;
if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/*
* There is the ancient bug in the MIPS assemblers that the break
* code starts left to bit 16 instead to bit 6 in the opcode.
* Gas is bug-compatible, but not always, grrr...
* We handle both cases with a simple heuristics. --macro
*/
bcode = ((opcode >> 6) & ((1 << 20) - 1));
if (bcode >= (1 << 10))
bcode >>= 10;
/*
* notify the kprobe handlers, if instruction is likely to
* pertain to them.
*/
switch (bcode) {
case BRK_KPROBE_BP:
if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
else
break;
case BRK_KPROBE_SSTEPBP:
if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
return;
else
break;
default:
break;
}
do_trap_or_bp(regs, bcode, "Break");
return;
out_sigsegv:
force_sig(SIGSEGV, current);
}
asmlinkage void do_tr(struct pt_regs *regs)
{
unsigned int opcode, tcode = 0;
if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
goto out_sigsegv;
/* Immediate versions don't provide a code. */
if (!(opcode & OPCODE))
tcode = ((opcode >> 6) & ((1 << 10) - 1));
do_trap_or_bp(regs, tcode, "Trap");
return;
out_sigsegv:
force_sig(SIGSEGV, current);
}
asmlinkage void do_ri(struct pt_regs *regs)
{
unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
unsigned long old_epc = regs->cp0_epc;
unsigned int opcode = 0;
int status = -1;
if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL)
== NOTIFY_STOP)
return;
die_if_kernel("Reserved instruction in kernel code", regs);
if (unlikely(compute_return_epc(regs) < 0))
return;
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
if (!cpu_has_llsc && status < 0)
status = simulate_llsc(regs, opcode);
if (status < 0)
status = simulate_rdhwr(regs, opcode);
if (status < 0)
status = simulate_sync(regs, opcode);
if (status < 0)
status = SIGILL;
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
force_sig(status, current);
}
}
/*
* MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
* emulated more than some threshold number of instructions, force migration to
* a "CPU" that has FP support.
*/
static void mt_ase_fp_affinity(void)
{
#ifdef CONFIG_MIPS_MT_FPAFF
if (mt_fpemul_threshold > 0 &&
((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
/*
* If there's no FPU present, or if the application has already
* restricted the allowed set to exclude any CPUs with FPUs,
* we'll skip the procedure.
*/
if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
cpumask_t tmask;
current->thread.user_cpus_allowed
= current->cpus_allowed;
cpus_and(tmask, current->cpus_allowed,
mt_fpu_cpumask);
set_cpus_allowed_ptr(current, &tmask);
set_thread_flag(TIF_FPUBOUND);
}
}
#endif /* CONFIG_MIPS_MT_FPAFF */
}
/*
* No lock; only written during early bootup by CPU 0.
*/
static RAW_NOTIFIER_HEAD(cu2_chain);
int __ref register_cu2_notifier(struct notifier_block *nb)
{
return raw_notifier_chain_register(&cu2_chain, nb);
}
int cu2_notifier_call_chain(unsigned long val, void *v)
{
return raw_notifier_call_chain(&cu2_chain, val, v);
}
static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
void *data)
{
struct pt_regs *regs = data;
switch (action) {
default:
die_if_kernel("Unhandled kernel unaligned access or invalid "
"instruction", regs);
/* Fall through */
case CU2_EXCEPTION:
force_sig(SIGILL, current);
}
return NOTIFY_OK;
}
asmlinkage void do_cpu(struct pt_regs *regs)
{
unsigned int __user *epc;
unsigned long old_epc;
unsigned int opcode;
unsigned int cpid;
int status;
unsigned long __maybe_unused flags;
die_if_kernel("do_cpu invoked from kernel context!", regs);
cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
switch (cpid) {
case 0:
epc = (unsigned int __user *)exception_epc(regs);
old_epc = regs->cp0_epc;
opcode = 0;
status = -1;
if (unlikely(compute_return_epc(regs) < 0))
return;
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
if (!cpu_has_llsc && status < 0)
status = simulate_llsc(regs, opcode);
if (status < 0)
status = simulate_rdhwr(regs, opcode);
if (status < 0)
status = SIGILL;
if (unlikely(status > 0)) {
regs->cp0_epc = old_epc; /* Undo skip-over. */
force_sig(status, current);
}
return;
case 1:
if (used_math()) /* Using the FPU again. */
own_fpu(1);
else { /* First time FPU user. */
init_fpu();
set_used_math();
}
if (!raw_cpu_has_fpu) {
int sig;
void __user *fault_addr = NULL;
sig = fpu_emulator_cop1Handler(regs,
¤t->thread.fpu,
0, &fault_addr);
if (!process_fpemu_return(sig, fault_addr))
mt_ase_fp_affinity();
}
return;
case 2:
raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
return;
case 3:
break;
}
force_sig(SIGILL, current);
}
asmlinkage void do_mdmx(struct pt_regs *regs)
{
force_sig(SIGILL, current);
}
/*
* Called with interrupts disabled.
*/
asmlinkage void do_watch(struct pt_regs *regs)
{
u32 cause;
/*
* Clear WP (bit 22) bit of cause register so we don't loop
* forever.
*/
cause = read_c0_cause();
cause &= ~(1 << 22);
write_c0_cause(cause);
/*
* If the current thread has the watch registers loaded, save
* their values and send SIGTRAP. Otherwise another thread
* left the registers set, clear them and continue.
*/
if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
mips_read_watch_registers();
local_irq_enable();
force_sig(SIGTRAP, current);
} else {
mips_clear_watch_registers();
local_irq_enable();
}
}
asmlinkage void do_mcheck(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS;
show_regs(regs);
if (multi_match) {
printk("Index : %0x\n", read_c0_index());
printk("Pagemask: %0x\n", read_c0_pagemask());
printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
printk("\n");
dump_tlb_all();
}
show_code((unsigned int __user *) regs->cp0_epc);
/*
* Some chips may have other causes of machine check (e.g. SB1
* graduation timer)
*/
panic("Caught Machine Check exception - %scaused by multiple "
"matching entries in the TLB.",
(multi_match) ? "" : "not ");
}
asmlinkage void do_mt(struct pt_regs *regs)
{
int subcode;
subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
>> VPECONTROL_EXCPT_SHIFT;
switch (subcode) {
case 0:
printk(KERN_DEBUG "Thread Underflow\n");
break;
case 1:
printk(KERN_DEBUG "Thread Overflow\n");
break;
case 2:
printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
break;
case 3:
printk(KERN_DEBUG "Gating Storage Exception\n");
break;
case 4:
printk(KERN_DEBUG "YIELD Scheduler Exception\n");
break;
case 5:
printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
break;
default:
printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
subcode);
break;
}
die_if_kernel("MIPS MT Thread exception in kernel", regs);
force_sig(SIGILL, current);
}
asmlinkage void do_dsp(struct pt_regs *regs)
{
if (cpu_has_dsp)
panic("Unexpected DSP exception\n");
force_sig(SIGILL, current);
}
asmlinkage void do_reserved(struct pt_regs *regs)
{
/*
* Game over - no way to handle this if it ever occurs. Most probably
* caused by a new unknown cpu type or after another deadly
* hard/software error.
*/
show_regs(regs);
panic("Caught reserved exception %ld - should not happen.",
(regs->cp0_cause & 0x7f) >> 2);
}
static int __initdata l1parity = 1;
static int __init nol1parity(char *s)
{
l1parity = 0;
return 1;
}
__setup("nol1par", nol1parity);
static int __initdata l2parity = 1;
static int __init nol2parity(char *s)
{
l2parity = 0;
return 1;
}
__setup("nol2par", nol2parity);
/*
* Some MIPS CPUs can enable/disable for cache parity detection, but do
* it different ways.
*/
static inline void parity_protection_init(void)
{
switch (current_cpu_type()) {
case CPU_24K:
case CPU_34K:
case CPU_74K:
case CPU_1004K:
{
#define ERRCTL_PE 0x80000000
#define ERRCTL_L2P 0x00800000
unsigned long errctl;
unsigned int l1parity_present, l2parity_present;
errctl = read_c0_ecc();
errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
/* probe L1 parity support */
write_c0_ecc(errctl | ERRCTL_PE);
back_to_back_c0_hazard();
l1parity_present = (read_c0_ecc() & ERRCTL_PE);
/* probe L2 parity support */
write_c0_ecc(errctl|ERRCTL_L2P);
back_to_back_c0_hazard();
l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
if (l1parity_present && l2parity_present) {
if (l1parity)
errctl |= ERRCTL_PE;
if (l1parity ^ l2parity)
errctl |= ERRCTL_L2P;
} else if (l1parity_present) {
if (l1parity)
errctl |= ERRCTL_PE;
} else if (l2parity_present) {
if (l2parity)
errctl |= ERRCTL_L2P;
} else {
/* No parity available */
}
printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
write_c0_ecc(errctl);
back_to_back_c0_hazard();
errctl = read_c0_ecc();
printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
if (l1parity_present)
printk(KERN_INFO "Cache parity protection %sabled\n",
(errctl & ERRCTL_PE) ? "en" : "dis");
if (l2parity_present) {
if (l1parity_present && l1parity)
errctl ^= ERRCTL_L2P;
printk(KERN_INFO "L2 cache parity protection %sabled\n",
(errctl & ERRCTL_L2P) ? "en" : "dis");
}
}
break;
case CPU_5KC:
write_c0_ecc(0x80000000);
back_to_back_c0_hazard();
/* Set the PE bit (bit 31) in the c0_errctl register. */
printk(KERN_INFO "Cache parity protection %sabled\n",
(read_c0_ecc() & 0x80000000) ? "en" : "dis");
break;
case CPU_20KC:
case CPU_25KF:
/* Clear the DE bit (bit 16) in the c0_status register. */
printk(KERN_INFO "Enable cache parity protection for "
"MIPS 20KC/25KF CPUs.\n");
clear_c0_status(ST0_DE);
break;
default:
break;
}
}
asmlinkage void cache_parity_error(void)
{
const int field = 2 * sizeof(unsigned long);
unsigned int reg_val;
/* For the moment, report the problem and hang. */
printk("Cache error exception:\n");
printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
reg_val = read_c0_cacheerr();
printk("c0_cacheerr == %08x\n", reg_val);
printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
reg_val & (1<<30) ? "secondary" : "primary",
reg_val & (1<<31) ? "data" : "insn");
printk("Error bits: %s%s%s%s%s%s%s\n",
reg_val & (1<<29) ? "ED " : "",
reg_val & (1<<28) ? "ET " : "",
reg_val & (1<<26) ? "EE " : "",
reg_val & (1<<25) ? "EB " : "",
reg_val & (1<<24) ? "EI " : "",
reg_val & (1<<23) ? "E1 " : "",
reg_val & (1<<22) ? "E0 " : "");
printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
if (reg_val & (1<<22))
printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
if (reg_val & (1<<23))
printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
#endif
panic("Can't handle the cache error!");
}
/*
* SDBBP EJTAG debug exception handler.
* We skip the instruction and return to the next instruction.
*/
void ejtag_exception_handler(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
unsigned long depc, old_epc;
unsigned int debug;
printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
depc = read_c0_depc();
debug = read_c0_debug();
printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
if (debug & 0x80000000) {
/*
* In branch delay slot.
* We cheat a little bit here and use EPC to calculate the
* debug return address (DEPC). EPC is restored after the
* calculation.
*/
old_epc = regs->cp0_epc;
regs->cp0_epc = depc;
__compute_return_epc(regs);
depc = regs->cp0_epc;
regs->cp0_epc = old_epc;
} else
depc += 4;
write_c0_depc(depc);
#if 0
printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
write_c0_debug(debug | 0x100);
#endif
}
/*
* NMI exception handler.
*/
NORET_TYPE void ATTRIB_NORET nmi_exception_handler(struct pt_regs *regs)
{
bust_spinlocks(1);
printk("NMI taken!!!!\n");
die("NMI", regs);
}
#define VECTORSPACING 0x100 /* for EI/VI mode */
unsigned long ebase;
unsigned long exception_handlers[32];
unsigned long vi_handlers[64];
void __init *set_except_vector(int n, void *addr)
{
unsigned long handler = (unsigned long) addr;
unsigned long old_handler = exception_handlers[n];
exception_handlers[n] = handler;
if (n == 0 && cpu_has_divec) {
unsigned long jump_mask = ~((1 << 28) - 1);
u32 *buf = (u32 *)(ebase + 0x200);
unsigned int k0 = 26;
if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
uasm_i_j(&buf, handler & ~jump_mask);
uasm_i_nop(&buf);
} else {
UASM_i_LA(&buf, k0, handler);
uasm_i_jr(&buf, k0);
uasm_i_nop(&buf);
}
local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
}
return (void *)old_handler;
}
static asmlinkage void do_default_vi(void)
{
show_regs(get_irq_regs());
panic("Caught unexpected vectored interrupt.");
}
static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
{
unsigned long handler;
unsigned long old_handler = vi_handlers[n];
int srssets = current_cpu_data.srsets;
u32 *w;
unsigned char *b;
BUG_ON(!cpu_has_veic && !cpu_has_vint);
if (addr == NULL) {
handler = (unsigned long) do_default_vi;
srs = 0;
} else
handler = (unsigned long) addr;
vi_handlers[n] = (unsigned long) addr;
b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
if (srs >= srssets)
panic("Shadow register set %d not supported", srs);
if (cpu_has_veic) {
if (board_bind_eic_interrupt)
board_bind_eic_interrupt(n, srs);
} else if (cpu_has_vint) {
/* SRSMap is only defined if shadow sets are implemented */
if (srssets > 1)
change_c0_srsmap(0xf << n*4, srs << n*4);
}
if (srs == 0) {
/*
* If no shadow set is selected then use the default handler
* that does normal register saving and a standard interrupt exit
*/
extern char except_vec_vi, except_vec_vi_lui;
extern char except_vec_vi_ori, except_vec_vi_end;
extern char rollback_except_vec_vi;
char *vec_start = (cpu_wait == r4k_wait) ?
&rollback_except_vec_vi : &except_vec_vi;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* We need to provide the SMTC vectored interrupt handler
* not only with the address of the handler, but with the
* Status.IM bit to be masked before going there.
*/
extern char except_vec_vi_mori;
const int mori_offset = &except_vec_vi_mori - vec_start;
#endif /* CONFIG_MIPS_MT_SMTC */
const int handler_len = &except_vec_vi_end - vec_start;
const int lui_offset = &except_vec_vi_lui - vec_start;
const int ori_offset = &except_vec_vi_ori - vec_start;
if (handler_len > VECTORSPACING) {
/*
* Sigh... panicing won't help as the console
* is probably not configured :(
*/
panic("VECTORSPACING too small");
}
memcpy(b, vec_start, handler_len);
#ifdef CONFIG_MIPS_MT_SMTC
BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
w = (u32 *)(b + mori_offset);
*w = (*w & 0xffff0000) | (0x100 << n);
#endif /* CONFIG_MIPS_MT_SMTC */
w = (u32 *)(b + lui_offset);
*w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
w = (u32 *)(b + ori_offset);
*w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
local_flush_icache_range((unsigned long)b,
(unsigned long)(b+handler_len));
}
else {
/*
* In other cases jump directly to the interrupt handler
*
* It is the handlers responsibility to save registers if required
* (eg hi/lo) and return from the exception using "eret"
*/
w = (u32 *)b;
*w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
*w = 0;
local_flush_icache_range((unsigned long)b,
(unsigned long)(b+8));
}
return (void *)old_handler;
}
void *set_vi_handler(int n, vi_handler_t addr)
{
return set_vi_srs_handler(n, addr, 0);
}
extern void cpu_cache_init(void);
extern void tlb_init(void);
extern void flush_tlb_handlers(void);
/*
* Timer interrupt
*/
int cp0_compare_irq;
int cp0_compare_irq_shift;
/*
* Performance counter IRQ or -1 if shared with timer
*/
int cp0_perfcount_irq;
EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
static int __cpuinitdata noulri;
static int __init ulri_disable(char *s)
{
pr_info("Disabling ulri\n");
noulri = 1;
return 1;
}
__setup("noulri", ulri_disable);
void __cpuinit per_cpu_trap_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0;
unsigned int hwrena = cpu_hwrena_impl_bits;
#ifdef CONFIG_MIPS_MT_SMTC
int secondaryTC = 0;
int bootTC = (cpu == 0);
/*
* Only do per_cpu_trap_init() for first TC of Each VPE.
* Note that this hack assumes that the SMTC init code
* assigns TCs consecutively and in ascending order.
*/
if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
secondaryTC = 1;
#endif /* CONFIG_MIPS_MT_SMTC */
/*
* Disable coprocessors and select 32-bit or 64-bit addressing
* and the 16/32 or 32/32 FPR register model. Reset the BEV
* flag that some firmware may have left set and the TS bit (for
* IP27). Set XX for ISA IV code to work.
*/
#ifdef CONFIG_64BIT
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
#endif
if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
status_set |= ST0_XX;
if (cpu_has_dsp)
status_set |= ST0_MX;
change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
if (cpu_has_mips_r2)
hwrena |= 0x0000000f;
if (!noulri && cpu_has_userlocal)
hwrena |= (1 << 29);
if (hwrena)
write_c0_hwrena(hwrena);
#ifdef CONFIG_MIPS_MT_SMTC
if (!secondaryTC) {
#endif /* CONFIG_MIPS_MT_SMTC */
if (cpu_has_veic || cpu_has_vint) {
unsigned long sr = set_c0_status(ST0_BEV);
write_c0_ebase(ebase);
write_c0_status(sr);
/* Setting vector spacing enables EI/VI mode */
change_c0_intctl(0x3e0, VECTORSPACING);
}
if (cpu_has_divec) {
if (cpu_has_mipsmt) {
unsigned int vpflags = dvpe();
set_c0_cause(CAUSEF_IV);
evpe(vpflags);
} else
set_c0_cause(CAUSEF_IV);
}
/*
* Before R2 both interrupt numbers were fixed to 7, so on R2 only:
*
* o read IntCtl.IPTI to determine the timer interrupt
* o read IntCtl.IPPCI to determine the performance counter interrupt
*/
if (cpu_has_mips_r2) {
cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
if (cp0_perfcount_irq == cp0_compare_irq)
cp0_perfcount_irq = -1;
} else {
cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
cp0_compare_irq_shift = cp0_compare_irq;
cp0_perfcount_irq = -1;
}
#ifdef CONFIG_MIPS_MT_SMTC
}
#endif /* CONFIG_MIPS_MT_SMTC */
cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current);
#ifdef CONFIG_MIPS_MT_SMTC
if (bootTC) {
#endif /* CONFIG_MIPS_MT_SMTC */
cpu_cache_init();
tlb_init();
#ifdef CONFIG_MIPS_MT_SMTC
} else if (!secondaryTC) {
/*
* First TC in non-boot VPE must do subset of tlb_init()
* for MMU countrol registers.
*/
write_c0_pagemask(PM_DEFAULT_MASK);
write_c0_wired(0);
}
#endif /* CONFIG_MIPS_MT_SMTC */
TLBMISS_HANDLER_SETUP();
}
/* Install CPU exception handler */
void __init set_handler(unsigned long offset, void *addr, unsigned long size)
{
memcpy((void *)(ebase + offset), addr, size);
local_flush_icache_range(ebase + offset, ebase + offset + size);
}
static char panic_null_cerr[] __cpuinitdata =
"Trying to set NULL cache error exception handler";
/*
* Install uncached CPU exception handler.
* This is suitable only for the cache error exception which is the only
* exception handler that is being run uncached.
*/
void __cpuinit set_uncached_handler(unsigned long offset, void *addr,
unsigned long size)
{
unsigned long uncached_ebase = CKSEG1ADDR(ebase);
if (!addr)
panic(panic_null_cerr);
memcpy((void *)(uncached_ebase + offset), addr, size);
}
static int __initdata rdhwr_noopt;
static int __init set_rdhwr_noopt(char *str)
{
rdhwr_noopt = 1;
return 1;
}
__setup("rdhwr_noopt", set_rdhwr_noopt);
void __init trap_init(void)
{
extern char except_vec3_generic, except_vec3_r4000;
extern char except_vec4;
unsigned long i;
int rollback;
check_wait();
rollback = (cpu_wait == r4k_wait);
#if defined(CONFIG_KGDB)
if (kgdb_early_setup)
return; /* Already done */
#endif
if (cpu_has_veic || cpu_has_vint) {
unsigned long size = 0x200 + VECTORSPACING*64;
ebase = (unsigned long)
__alloc_bootmem(size, 1 << fls(size), 0);
} else {
ebase = CKSEG0;
if (cpu_has_mips_r2)
ebase += (read_c0_ebase() & 0x3ffff000);
}
per_cpu_trap_init();
/*
* Copy the generic exception handlers to their final destination.
* This will be overriden later as suitable for a particular
* configuration.
*/
set_handler(0x180, &except_vec3_generic, 0x80);
/*
* Setup default vectors
*/
for (i = 0; i <= 31; i++)
set_except_vector(i, handle_reserved);
/*
* Copy the EJTAG debug exception vector handler code to it's final
* destination.
*/
if (cpu_has_ejtag && board_ejtag_handler_setup)
board_ejtag_handler_setup();
/*
* Only some CPUs have the watch exceptions.
*/
if (cpu_has_watch)
set_except_vector(23, handle_watch);
/*
* Initialise interrupt handlers
*/
if (cpu_has_veic || cpu_has_vint) {
int nvec = cpu_has_veic ? 64 : 8;
for (i = 0; i < nvec; i++)
set_vi_handler(i, NULL);
}
else if (cpu_has_divec)
set_handler(0x200, &except_vec4, 0x8);
/*
* Some CPUs can enable/disable for cache parity detection, but does
* it different ways.
*/
parity_protection_init();
/*
* The Data Bus Errors / Instruction Bus Errors are signaled
* by external hardware. Therefore these two exceptions
* may have board specific handlers.
*/
if (board_be_init)
board_be_init();
set_except_vector(0, rollback ? rollback_handle_int : handle_int);
set_except_vector(1, handle_tlbm);
set_except_vector(2, handle_tlbl);
set_except_vector(3, handle_tlbs);
set_except_vector(4, handle_adel);
set_except_vector(5, handle_ades);
set_except_vector(6, handle_ibe);
set_except_vector(7, handle_dbe);
set_except_vector(8, handle_sys);
set_except_vector(9, handle_bp);
set_except_vector(10, rdhwr_noopt ? handle_ri :
(cpu_has_vtag_icache ?
handle_ri_rdhwr_vivt : handle_ri_rdhwr));
set_except_vector(11, handle_cpu);
set_except_vector(12, handle_ov);
set_except_vector(13, handle_tr);
if (current_cpu_type() == CPU_R6000 ||
current_cpu_type() == CPU_R6000A) {
/*
* The R6000 is the only R-series CPU that features a machine
* check exception (similar to the R4000 cache error) and
* unaligned ldc1/sdc1 exception. The handlers have not been
* written yet. Well, anyway there is no R6000 machine on the
* current list of targets for Linux/MIPS.
* (Duh, crap, there is someone with a triple R6k machine)
*/
//set_except_vector(14, handle_mc);
//set_except_vector(15, handle_ndc);
}
if (board_nmi_handler_setup)
board_nmi_handler_setup();
if (cpu_has_fpu && !cpu_has_nofpuex)
set_except_vector(15, handle_fpe);
set_except_vector(22, handle_mdmx);
if (cpu_has_mcheck)
set_except_vector(24, handle_mcheck);
if (cpu_has_mipsmt)
set_except_vector(25, handle_mt);
set_except_vector(26, handle_dsp);
if (cpu_has_vce)
/* Special exception: R4[04]00 uses also the divec space. */
memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
else if (cpu_has_4kex)
memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80);
else
memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80);
local_flush_icache_range(ebase, ebase + 0x400);
flush_tlb_handlers();
sort_extable(__start___dbe_table, __stop___dbe_table);
cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3486_8 |
crossvul-cpp_data_bad_2297_1 | /*
* Copyright (c) Christos Zoulas 2003.
* All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice immediately at the beginning of the file, without modification,
* this list of conditions, and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include "file.h"
#ifndef lint
FILE_RCSID("@(#)$File: funcs.c,v 1.72 2014/05/14 23:15:42 christos Exp $")
#endif /* lint */
#include "magic.h"
#include <assert.h>
#include <stdarg.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#if defined(HAVE_WCHAR_H)
#include <wchar.h>
#endif
#if defined(HAVE_WCTYPE_H)
#include <wctype.h>
#endif
#if defined(HAVE_LIMITS_H)
#include <limits.h>
#endif
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t)~0)
#endif
/*
* Like printf, only we append to a buffer.
*/
protected int
file_vprintf(struct magic_set *ms, const char *fmt, va_list ap)
{
int len;
char *buf, *newstr;
if (ms->event_flags & EVENT_HAD_ERR)
return 0;
len = vasprintf(&buf, fmt, ap);
if (len < 0)
goto out;
if (ms->o.buf != NULL) {
len = asprintf(&newstr, "%s%s", ms->o.buf, buf);
free(buf);
if (len < 0)
goto out;
free(ms->o.buf);
buf = newstr;
}
ms->o.buf = buf;
return 0;
out:
file_error(ms, errno, "vasprintf failed");
return -1;
}
protected int
file_printf(struct magic_set *ms, const char *fmt, ...)
{
int rv;
va_list ap;
va_start(ap, fmt);
rv = file_vprintf(ms, fmt, ap);
va_end(ap);
return rv;
}
/*
* error - print best error message possible
*/
/*VARARGS*/
__attribute__((__format__(__printf__, 3, 0)))
private void
file_error_core(struct magic_set *ms, int error, const char *f, va_list va,
size_t lineno)
{
/* Only the first error is ok */
if (ms->event_flags & EVENT_HAD_ERR)
return;
if (lineno != 0) {
free(ms->o.buf);
ms->o.buf = NULL;
file_printf(ms, "line %" SIZE_T_FORMAT "u: ", lineno);
}
file_vprintf(ms, f, va);
if (error > 0)
file_printf(ms, " (%s)", strerror(error));
ms->event_flags |= EVENT_HAD_ERR;
ms->error = error;
}
/*VARARGS*/
protected void
file_error(struct magic_set *ms, int error, const char *f, ...)
{
va_list va;
va_start(va, f);
file_error_core(ms, error, f, va, 0);
va_end(va);
}
/*
* Print an error with magic line number.
*/
/*VARARGS*/
protected void
file_magerror(struct magic_set *ms, const char *f, ...)
{
va_list va;
va_start(va, f);
file_error_core(ms, 0, f, va, ms->line);
va_end(va);
}
protected void
file_oomem(struct magic_set *ms, size_t len)
{
file_error(ms, errno, "cannot allocate %" SIZE_T_FORMAT "u bytes",
len);
}
protected void
file_badseek(struct magic_set *ms)
{
file_error(ms, errno, "error seeking");
}
protected void
file_badread(struct magic_set *ms)
{
file_error(ms, errno, "error reading");
}
#ifndef COMPILE_ONLY
protected int
file_buffer(struct magic_set *ms, int fd, const char *inname __attribute__ ((unused)),
const void *buf, size_t nb)
{
int m = 0, rv = 0, looks_text = 0;
int mime = ms->flags & MAGIC_MIME;
const unsigned char *ubuf = CAST(const unsigned char *, buf);
unichar *u8buf = NULL;
size_t ulen;
const char *code = NULL;
const char *code_mime = "binary";
const char *type = "application/octet-stream";
const char *def = "data";
const char *ftype = NULL;
if (nb == 0) {
def = "empty";
type = "application/x-empty";
goto simple;
} else if (nb == 1) {
def = "very short file (no magic)";
goto simple;
}
if ((ms->flags & MAGIC_NO_CHECK_ENCODING) == 0) {
looks_text = file_encoding(ms, ubuf, nb, &u8buf, &ulen,
&code, &code_mime, &ftype);
}
#ifdef __EMX__
if ((ms->flags & MAGIC_NO_CHECK_APPTYPE) == 0 && inname) {
switch (file_os2_apptype(ms, inname, buf, nb)) {
case -1:
return -1;
case 0:
break;
default:
return 1;
}
}
#endif
#if HAVE_FORK
/* try compression stuff */
if ((ms->flags & MAGIC_NO_CHECK_COMPRESS) == 0)
if ((m = file_zmagic(ms, fd, inname, ubuf, nb)) != 0) {
if ((ms->flags & MAGIC_DEBUG) != 0)
(void)fprintf(stderr, "zmagic %d\n", m);
goto done_encoding;
}
#endif
/* Check if we have a tar file */
if ((ms->flags & MAGIC_NO_CHECK_TAR) == 0)
if ((m = file_is_tar(ms, ubuf, nb)) != 0) {
if ((ms->flags & MAGIC_DEBUG) != 0)
(void)fprintf(stderr, "tar %d\n", m);
goto done;
}
/* Check if we have a CDF file */
if ((ms->flags & MAGIC_NO_CHECK_CDF) == 0)
if ((m = file_trycdf(ms, fd, ubuf, nb)) != 0) {
if ((ms->flags & MAGIC_DEBUG) != 0)
(void)fprintf(stderr, "cdf %d\n", m);
goto done;
}
/* try soft magic tests */
if ((ms->flags & MAGIC_NO_CHECK_SOFT) == 0)
if ((m = file_softmagic(ms, ubuf, nb, 0, BINTEST,
looks_text)) != 0) {
if ((ms->flags & MAGIC_DEBUG) != 0)
(void)fprintf(stderr, "softmagic %d\n", m);
#ifdef BUILTIN_ELF
if ((ms->flags & MAGIC_NO_CHECK_ELF) == 0 && m == 1 &&
nb > 5 && fd != -1) {
/*
* We matched something in the file, so this
* *might* be an ELF file, and the file is at
* least 5 bytes long, so if it's an ELF file
* it has at least one byte past the ELF magic
* number - try extracting information from the
* ELF headers that cannot easily * be
* extracted with rules in the magic file.
*/
if ((m = file_tryelf(ms, fd, ubuf, nb)) != 0)
if ((ms->flags & MAGIC_DEBUG) != 0)
(void)fprintf(stderr,
"elf %d\n", m);
}
#endif
goto done;
}
/* try text properties */
if ((ms->flags & MAGIC_NO_CHECK_TEXT) == 0) {
if ((m = file_ascmagic(ms, ubuf, nb, looks_text)) != 0) {
if ((ms->flags & MAGIC_DEBUG) != 0)
(void)fprintf(stderr, "ascmagic %d\n", m);
goto done;
}
}
simple:
/* give up */
m = 1;
if ((!mime || (mime & MAGIC_MIME_TYPE)) &&
file_printf(ms, "%s", mime ? type : def) == -1) {
rv = -1;
}
done:
if ((ms->flags & MAGIC_MIME_ENCODING) != 0) {
if (ms->flags & MAGIC_MIME_TYPE)
if (file_printf(ms, "; charset=") == -1)
rv = -1;
if (file_printf(ms, "%s", code_mime) == -1)
rv = -1;
}
#if HAVE_FORK
done_encoding:
#endif
free(u8buf);
if (rv)
return rv;
return m;
}
#endif
protected int
file_reset(struct magic_set *ms)
{
if (ms->mlist[0] == NULL) {
file_error(ms, 0, "no magic files loaded");
return -1;
}
if (ms->o.buf) {
free(ms->o.buf);
ms->o.buf = NULL;
}
if (ms->o.pbuf) {
free(ms->o.pbuf);
ms->o.pbuf = NULL;
}
ms->event_flags &= ~EVENT_HAD_ERR;
ms->error = -1;
return 0;
}
#define OCTALIFY(n, o) \
/*LINTED*/ \
(void)(*(n)++ = '\\', \
*(n)++ = (((uint32_t)*(o) >> 6) & 3) + '0', \
*(n)++ = (((uint32_t)*(o) >> 3) & 7) + '0', \
*(n)++ = (((uint32_t)*(o) >> 0) & 7) + '0', \
(o)++)
protected const char *
file_getbuffer(struct magic_set *ms)
{
char *pbuf, *op, *np;
size_t psize, len;
if (ms->event_flags & EVENT_HAD_ERR)
return NULL;
if (ms->flags & MAGIC_RAW)
return ms->o.buf;
if (ms->o.buf == NULL)
return NULL;
/* * 4 is for octal representation, + 1 is for NUL */
len = strlen(ms->o.buf);
if (len > (SIZE_MAX - 1) / 4) {
file_oomem(ms, len);
return NULL;
}
psize = len * 4 + 1;
if ((pbuf = CAST(char *, realloc(ms->o.pbuf, psize))) == NULL) {
file_oomem(ms, psize);
return NULL;
}
ms->o.pbuf = pbuf;
#if defined(HAVE_WCHAR_H) && defined(HAVE_MBRTOWC) && defined(HAVE_WCWIDTH)
{
mbstate_t state;
wchar_t nextchar;
int mb_conv = 1;
size_t bytesconsumed;
char *eop;
(void)memset(&state, 0, sizeof(mbstate_t));
np = ms->o.pbuf;
op = ms->o.buf;
eop = op + len;
while (op < eop) {
bytesconsumed = mbrtowc(&nextchar, op,
(size_t)(eop - op), &state);
if (bytesconsumed == (size_t)(-1) ||
bytesconsumed == (size_t)(-2)) {
mb_conv = 0;
break;
}
if (iswprint(nextchar)) {
(void)memcpy(np, op, bytesconsumed);
op += bytesconsumed;
np += bytesconsumed;
} else {
while (bytesconsumed-- > 0)
OCTALIFY(np, op);
}
}
*np = '\0';
/* Parsing succeeded as a multi-byte sequence */
if (mb_conv != 0)
return ms->o.pbuf;
}
#endif
for (np = ms->o.pbuf, op = ms->o.buf; *op;) {
if (isprint((unsigned char)*op)) {
*np++ = *op++;
} else {
OCTALIFY(np, op);
}
}
*np = '\0';
return ms->o.pbuf;
}
protected int
file_check_mem(struct magic_set *ms, unsigned int level)
{
size_t len;
if (level >= ms->c.len) {
len = (ms->c.len += 20) * sizeof(*ms->c.li);
ms->c.li = CAST(struct level_info *, (ms->c.li == NULL) ?
malloc(len) :
realloc(ms->c.li, len));
if (ms->c.li == NULL) {
file_oomem(ms, len);
return -1;
}
}
ms->c.li[level].got_match = 0;
#ifdef ENABLE_CONDITIONALS
ms->c.li[level].last_match = 0;
ms->c.li[level].last_cond = COND_NONE;
#endif /* ENABLE_CONDITIONALS */
return 0;
}
protected size_t
file_printedlen(const struct magic_set *ms)
{
return ms->o.buf == NULL ? 0 : strlen(ms->o.buf);
}
protected int
file_replace(struct magic_set *ms, const char *pat, const char *rep)
{
file_regex_t rx;
int rc, rv = -1;
rc = file_regcomp(&rx, pat, REG_EXTENDED);
if (rc) {
file_regerror(&rx, rc, ms);
} else {
regmatch_t rm;
int nm = 0;
while (file_regexec(&rx, ms->o.buf, 1, &rm, 0) == 0) {
ms->o.buf[rm.rm_so] = '\0';
if (file_printf(ms, "%s%s", rep,
rm.rm_eo != 0 ? ms->o.buf + rm.rm_eo : "") == -1)
goto out;
nm++;
}
rv = nm;
}
out:
file_regfree(&rx);
return rv;
}
protected int
file_regcomp(file_regex_t *rx, const char *pat, int flags)
{
#ifdef USE_C_LOCALE
rx->c_lc_ctype = newlocale(LC_CTYPE_MASK, "C", 0);
assert(rx->c_lc_ctype != NULL);
rx->old_lc_ctype = uselocale(rx->c_lc_ctype);
assert(rx->old_lc_ctype != NULL);
#endif
rx->pat = pat;
return rx->rc = regcomp(&rx->rx, pat, flags);
}
protected int
file_regexec(file_regex_t *rx, const char *str, size_t nmatch,
regmatch_t* pmatch, int eflags)
{
assert(rx->rc == 0);
return regexec(&rx->rx, str, nmatch, pmatch, eflags);
}
protected void
file_regfree(file_regex_t *rx)
{
if (rx->rc == 0)
regfree(&rx->rx);
#ifdef USE_C_LOCALE
(void)uselocale(rx->old_lc_ctype);
freelocale(rx->c_lc_ctype);
#endif
}
protected void
file_regerror(file_regex_t *rx, int rc, struct magic_set *ms)
{
char errmsg[512];
(void)regerror(rc, &rx->rx, errmsg, sizeof(errmsg));
file_magerror(ms, "regex error %d for `%s', (%s)", rc, rx->pat,
errmsg);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_2297_1 |
crossvul-cpp_data_good_910_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS %
% P P SS %
% PPPP SSS %
% P SS %
% P SSSSS %
% %
% %
% Read/Write Postscript Format %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/constitute.h"
#include "magick/delegate.h"
#include "magick/delegate-private.h"
#include "magick/draw.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/nt-base-private.h"
#include "magick/option.h"
#include "magick/profile.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/timer-private.h"
#include "magick/token.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
Forward declarations.
*/
static MagickBooleanType
WritePSImage(const ImageInfo *,Image *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v o k e P o s t s r i p t D e l e g a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InvokePostscriptDelegate() executes the Postscript interpreter with the
% specified command.
%
% The format of the InvokePostscriptDelegate method is:
%
% MagickBooleanType InvokePostscriptDelegate(
% const MagickBooleanType verbose,const char *command,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o verbose: A value other than zero displays the command prior to
% executing it.
%
% o command: the address of a character string containing the command to
% execute.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_GS_DELEGATE) || defined(MAGICKCORE_WINDOWS_SUPPORT)
static int MagickDLLCall PostscriptDelegateMessage(void *handle,
const char *message,int length)
{
char
**messages;
ssize_t
offset;
offset=0;
messages=(char **) handle;
if (*messages == (char *) NULL)
*messages=(char *) AcquireQuantumMemory(length+1,sizeof(char *));
else
{
offset=strlen(*messages);
*messages=(char *) ResizeQuantumMemory(*messages,offset+length+1,
sizeof(char *));
}
if (*messages == (char *) NULL)
return(0);
(void) memcpy(*messages+offset,message,length);
(*messages)[length+offset] ='\0';
return(length);
}
#endif
static MagickBooleanType InvokePostscriptDelegate(
const MagickBooleanType verbose,const char *command,char *message,
ExceptionInfo *exception)
{
int
status;
#if defined(MAGICKCORE_GS_DELEGATE) || defined(MAGICKCORE_WINDOWS_SUPPORT)
#define SetArgsStart(command,args_start) \
if (args_start == (const char *) NULL) \
{ \
if (*command != '"') \
args_start=strchr(command,' '); \
else \
{ \
args_start=strchr(command+1,'"'); \
if (args_start != (const char *) NULL) \
args_start++; \
} \
}
#define ExecuteGhostscriptCommand(command,status) \
{ \
status=ExternalDelegateCommand(MagickFalse,verbose,command,message, \
exception); \
if (status == 0) \
return(MagickTrue); \
if (status < 0) \
return(MagickFalse); \
(void) ThrowMagickException(exception,GetMagickModule(),DelegateError, \
"FailedToExecuteCommand","`%s' (%d)",command,status); \
return(MagickFalse); \
}
char
**argv,
*errors;
const char
*args_start = (const char *) NULL;
const GhostInfo
*ghost_info;
gs_main_instance
*interpreter;
gsapi_revision_t
revision;
int
argc,
code;
register ssize_t
i;
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
ghost_info=NTGhostscriptDLLVectors();
#else
GhostInfo
ghost_info_struct;
ghost_info=(&ghost_info_struct);
(void) memset(&ghost_info_struct,0,sizeof(ghost_info_struct));
ghost_info_struct.delete_instance=(void (*)(gs_main_instance *))
gsapi_delete_instance;
ghost_info_struct.exit=(int (*)(gs_main_instance *)) gsapi_exit;
ghost_info_struct.new_instance=(int (*)(gs_main_instance **,void *))
gsapi_new_instance;
ghost_info_struct.init_with_args=(int (*)(gs_main_instance *,int,char **))
gsapi_init_with_args;
ghost_info_struct.run_string=(int (*)(gs_main_instance *,const char *,int,
int *)) gsapi_run_string;
ghost_info_struct.set_stdio=(int (*)(gs_main_instance *,int (*)(void *,char *,
int),int (*)(void *,const char *,int),int (*)(void *, const char *, int)))
gsapi_set_stdio;
ghost_info_struct.revision=(int (*)(gsapi_revision_t *,int)) gsapi_revision;
#endif
if (ghost_info == (GhostInfo *) NULL)
ExecuteGhostscriptCommand(command,status);
if ((ghost_info->revision)(&revision,sizeof(revision)) != 0)
revision.revision=0;
if (verbose != MagickFalse)
{
(void) fprintf(stdout,"[ghostscript library %.2f]",(double)
revision.revision/100.0);
SetArgsStart(command,args_start);
(void) fputs(args_start,stdout);
}
interpreter=(gs_main_instance *) NULL;
errors=(char *) NULL;
status=(ghost_info->new_instance)(&interpreter,(void *) &errors);
if (status < 0)
ExecuteGhostscriptCommand(command,status);
code=0;
argv=StringToArgv(command,&argc);
if (argv == (char **) NULL)
{
(ghost_info->delete_instance)(interpreter);
return(MagickFalse);
}
(void) (ghost_info->set_stdio)(interpreter,(int (MagickDLLCall *)(void *,
char *,int)) NULL,PostscriptDelegateMessage,PostscriptDelegateMessage);
status=(ghost_info->init_with_args)(interpreter,argc-1,argv+1);
if (status == 0)
status=(ghost_info->run_string)(interpreter,"systemdict /start get exec\n",
0,&code);
(ghost_info->exit)(interpreter);
(ghost_info->delete_instance)(interpreter);
for (i=0; i < (ssize_t) argc; i++)
argv[i]=DestroyString(argv[i]);
argv=(char **) RelinquishMagickMemory(argv);
if (status != 0)
{
SetArgsStart(command,args_start);
if (status == -101) /* quit */
(void) FormatLocaleString(message,MaxTextExtent,
"[ghostscript library %.2f]%s: %s",(double) revision.revision/100.0,
args_start,errors);
else
{
(void) ThrowMagickException(exception,GetMagickModule(),
DelegateError,"PostscriptDelegateFailed",
"`[ghostscript library %.2f]%s': %s",(double) revision.revision/
100.0,args_start,errors);
if (errors != (char *) NULL)
errors=DestroyString(errors);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Ghostscript returns status %d, exit code %d",status,code);
return(MagickFalse);
}
}
if (errors != (char *) NULL)
errors=DestroyString(errors);
return(MagickTrue);
#else
status=ExternalDelegateCommand(MagickFalse,verbose,command,(char *) NULL,
exception);
return(status == 0 ? MagickTrue : MagickFalse);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPS() returns MagickTrue if the image format type, identified by the
% magick string, is PS.
%
% The format of the IsPS method is:
%
% MagickBooleanType IsPS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPS(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (memcmp(magick,"%!",2) == 0)
return(MagickTrue);
if (memcmp(magick,"\004%!",3) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSImage() reads a Postscript image file and returns it. It allocates
% the memory necessary for the new Image structure and returns a pointer
% to the new image.
%
% The format of the ReadPSImage method is:
%
% Image *ReadPSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType IsPostscriptRendered(const char *path)
{
MagickBooleanType
status;
struct stat
attributes;
if ((path == (const char *) NULL) || (*path == '\0'))
return(MagickFalse);
status=GetPathAttributes(path,&attributes);
if ((status != MagickFalse) && S_ISREG(attributes.st_mode) &&
(attributes.st_size > 0))
return(MagickTrue);
return(MagickFalse);
}
static inline int ProfileInteger(Image *image,short int *hex_digits)
{
int
c,
l,
value;
register ssize_t
i;
l=0;
value=0;
for (i=0; i < 2; )
{
c=ReadBlobByte(image);
if ((c == EOF) || ((c == '%') && (l == '%')))
{
value=(-1);
break;
}
l=c;
c&=0xff;
if (isxdigit(c) == MagickFalse)
continue;
value=(int) ((size_t) value << 4)+hex_digits[c];
i++;
}
return(value);
}
static Image *ReadPSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define BoundingBox "BoundingBox:"
#define BeginDocument "BeginDocument:"
#define BeginXMPPacket "<?xpacket begin="
#define EndXMPPacket "<?xpacket end="
#define ICCProfile "BeginICCProfile:"
#define CMYKCustomColor "CMYKCustomColor:"
#define CMYKProcessColor "CMYKProcessColor:"
#define DocumentMedia "DocumentMedia:"
#define DocumentCustomColors "DocumentCustomColors:"
#define DocumentProcessColors "DocumentProcessColors:"
#define EndDocument "EndDocument:"
#define HiResBoundingBox "HiResBoundingBox:"
#define ImageData "ImageData:"
#define PageBoundingBox "PageBoundingBox:"
#define LanguageLevel "LanguageLevel:"
#define PageMedia "PageMedia:"
#define Pages "Pages:"
#define PhotoshopProfile "BeginPhotoshop:"
#define PostscriptLevel "!PS-"
#define RenderPostscriptText " Rendering Postscript... "
#define SpotColor "+ "
char
command[MaxTextExtent],
*density,
filename[MaxTextExtent],
geometry[MaxTextExtent],
input_filename[MaxTextExtent],
message[MaxTextExtent],
*options,
postscript_filename[MaxTextExtent];
const char
*option;
const DelegateInfo
*delegate_info;
GeometryInfo
geometry_info;
Image
*image,
*next,
*postscript_image;
ImageInfo
*read_info;
int
c,
file;
MagickBooleanType
cmyk,
fitPage,
skip,
status;
MagickStatusType
flags;
PointInfo
delta,
resolution;
RectangleInfo
page;
register char
*p;
register ssize_t
i;
SegmentInfo
bounds,
hires_bounds;
short int
hex_digits[256];
size_t
length,
priority;
ssize_t
count;
StringInfo
*profile;
unsigned long
columns,
extent,
language_level,
pages,
rows,
scene,
spotcolor;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
status=AcquireUniqueSymbolicLink(image_info->filename,input_filename);
if (status == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile",
image_info->filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize hex values.
*/
(void) memset(hex_digits,0,sizeof(hex_digits));
hex_digits[(int) '0']=0;
hex_digits[(int) '1']=1;
hex_digits[(int) '2']=2;
hex_digits[(int) '3']=3;
hex_digits[(int) '4']=4;
hex_digits[(int) '5']=5;
hex_digits[(int) '6']=6;
hex_digits[(int) '7']=7;
hex_digits[(int) '8']=8;
hex_digits[(int) '9']=9;
hex_digits[(int) 'a']=10;
hex_digits[(int) 'b']=11;
hex_digits[(int) 'c']=12;
hex_digits[(int) 'd']=13;
hex_digits[(int) 'e']=14;
hex_digits[(int) 'f']=15;
hex_digits[(int) 'A']=10;
hex_digits[(int) 'B']=11;
hex_digits[(int) 'C']=12;
hex_digits[(int) 'D']=13;
hex_digits[(int) 'E']=14;
hex_digits[(int) 'F']=15;
/*
Set the page density.
*/
delta.x=DefaultResolution;
delta.y=DefaultResolution;
if ((image->x_resolution == 0.0) || (image->y_resolution == 0.0))
{
flags=ParseGeometry(PSDensityGeometry,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
if (image_info->density != (char *) NULL)
{
flags=ParseGeometry(image_info->density,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
resolution.x=image->x_resolution;
resolution.y=image->y_resolution;
page.width=(size_t) ceil((double) (page.width*resolution.x/delta.x)-0.5);
page.height=(size_t) ceil((double) (page.height*resolution.y/delta.y)-0.5);
/*
Determine page geometry from the Postscript bounding box.
*/
(void) memset(&bounds,0,sizeof(bounds));
(void) memset(command,0,sizeof(command));
cmyk=image_info->colorspace == CMYKColorspace ? MagickTrue : MagickFalse;
(void) memset(&hires_bounds,0,sizeof(hires_bounds));
priority=0;
columns=0;
rows=0;
extent=0;
spotcolor=0;
language_level=1;
skip=MagickFalse;
pages=(~0UL);
p=command;
for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image))
{
/*
Note document structuring comments.
*/
*p++=(char) c;
if ((strchr("\n\r%",c) == (char *) NULL) &&
((size_t) (p-command) < (MaxTextExtent-1)))
continue;
*p='\0';
p=command;
/*
Skip %%BeginDocument thru %%EndDocument.
*/
if (LocaleNCompare(BeginDocument,command,strlen(BeginDocument)) == 0)
skip=MagickTrue;
if (LocaleNCompare(EndDocument,command,strlen(EndDocument)) == 0)
skip=MagickFalse;
if (skip != MagickFalse)
continue;
if (LocaleNCompare(PostscriptLevel,command,strlen(PostscriptLevel)) == 0)
{
(void) SetImageProperty(image,"ps:Level",command+4);
if (GlobExpression(command,"*EPSF-*",MagickTrue) != MagickFalse)
pages=1;
}
if (LocaleNCompare(LanguageLevel,command,strlen(LanguageLevel)) == 0)
(void) sscanf(command,LanguageLevel " %lu",&language_level);
if (LocaleNCompare(Pages,command,strlen(Pages)) == 0)
(void) sscanf(command,Pages " %lu",&pages);
if (LocaleNCompare(ImageData,command,strlen(ImageData)) == 0)
(void) sscanf(command,ImageData " %lu %lu",&columns,&rows);
/*
Is this a CMYK document?
*/
length=strlen(DocumentProcessColors);
if (LocaleNCompare(DocumentProcessColors,command,length) == 0)
{
if ((GlobExpression(command,"*Cyan*",MagickTrue) != MagickFalse) ||
(GlobExpression(command,"*Magenta*",MagickTrue) != MagickFalse) ||
(GlobExpression(command,"*Yellow*",MagickTrue) != MagickFalse))
cmyk=MagickTrue;
}
if (LocaleNCompare(CMYKCustomColor,command,strlen(CMYKCustomColor)) == 0)
cmyk=MagickTrue;
if (LocaleNCompare(CMYKProcessColor,command,strlen(CMYKProcessColor)) == 0)
cmyk=MagickTrue;
length=strlen(DocumentCustomColors);
if ((LocaleNCompare(DocumentCustomColors,command,length) == 0) ||
(LocaleNCompare(CMYKCustomColor,command,strlen(CMYKCustomColor)) == 0) ||
(LocaleNCompare(SpotColor,command,strlen(SpotColor)) == 0))
{
char
property[MaxTextExtent],
*value;
register char
*p;
/*
Note spot names.
*/
(void) FormatLocaleString(property,MaxTextExtent,"ps:SpotColor-%.20g",
(double) (spotcolor++));
for (p=command; *p != '\0'; p++)
if (isspace((int) (unsigned char) *p) != 0)
break;
value=ConstantString(p);
(void) SubstituteString(&value,"(","");
(void) SubstituteString(&value,")","");
(void) StripString(value);
if (*value != '\0')
(void) SetImageProperty(image,property,value);
value=DestroyString(value);
continue;
}
if (image_info->page != (char *) NULL)
continue;
/*
Note region defined by bounding box.
*/
count=0;
i=0;
if (LocaleNCompare(BoundingBox,command,strlen(BoundingBox)) == 0)
{
count=(ssize_t) sscanf(command,BoundingBox " %lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
i=2;
}
if (LocaleNCompare(DocumentMedia,command,strlen(DocumentMedia)) == 0)
{
count=(ssize_t) sscanf(command,DocumentMedia " %lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
i=1;
}
if (LocaleNCompare(HiResBoundingBox,command,strlen(HiResBoundingBox)) == 0)
{
count=(ssize_t) sscanf(command,HiResBoundingBox " %lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
i=3;
}
if (LocaleNCompare(PageBoundingBox,command,strlen(PageBoundingBox)) == 0)
{
count=(ssize_t) sscanf(command,PageBoundingBox " %lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
i=1;
}
if (LocaleNCompare(PageMedia,command,strlen(PageMedia)) == 0)
{
count=(ssize_t) sscanf(command,PageMedia " %lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
i=1;
}
if ((count != 4) || (i < (ssize_t) priority))
continue;
if ((fabs(bounds.x2-bounds.x1) <= fabs(hires_bounds.x2-hires_bounds.x1)) ||
(fabs(bounds.y2-bounds.y1) <= fabs(hires_bounds.y2-hires_bounds.y1)))
if (i == (ssize_t) priority)
continue;
hires_bounds=bounds;
priority=(size_t) i;
}
if ((fabs(hires_bounds.x2-hires_bounds.x1) >= MagickEpsilon) &&
(fabs(hires_bounds.y2-hires_bounds.y1) >= MagickEpsilon))
{
/*
Set Postscript render geometry.
*/
(void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g%+.15g%+.15g",
hires_bounds.x2-hires_bounds.x1,hires_bounds.y2-hires_bounds.y1,
hires_bounds.x1,hires_bounds.y1);
(void) SetImageProperty(image,"ps:HiResBoundingBox",geometry);
page.width=(size_t) ceil((double) ((hires_bounds.x2-hires_bounds.x1)*
resolution.x/delta.x)-0.5);
page.height=(size_t) ceil((double) ((hires_bounds.y2-hires_bounds.y1)*
resolution.y/delta.y)-0.5);
}
fitPage=MagickFalse;
option=GetImageOption(image_info,"eps:fit-page");
if (option != (char *) NULL)
{
char
*geometry;
MagickStatusType
flags;
geometry=GetPageGeometry(option);
flags=ParseMetaGeometry(geometry,&page.x,&page.y,&page.width,
&page.height);
if (flags == NoValue)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidGeometry","`%s'",option);
geometry=DestroyString(geometry);
image=DestroyImage(image);
return((Image *) NULL);
}
page.width=(size_t) ceil((double) (page.width*image->x_resolution/delta.x)
-0.5);
page.height=(size_t) ceil((double) (page.height*image->y_resolution/
delta.y) -0.5);
geometry=DestroyString(geometry);
fitPage=MagickTrue;
}
if (IssRGBCompatibleColorspace(image_info->colorspace) != MagickFalse)
cmyk=MagickFalse;
/*
Create Ghostscript control file.
*/
file=AcquireUniqueFileResource(postscript_filename);
if (file == -1)
{
ThrowFileException(&image->exception,FileOpenError,"UnableToOpenFile",
image_info->filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
(void) CopyMagickString(command,"/setpagedevice {pop} bind 1 index where {"
"dup wcheck {3 1 roll put} {pop def} ifelse} {def} ifelse\n"
"<</UseCIEColor true>>setpagedevice\n",MaxTextExtent);
count=write(file,command,(unsigned int) strlen(command));
if (image_info->page == (char *) NULL)
{
char
translate_geometry[MaxTextExtent];
(void) FormatLocaleString(translate_geometry,MaxTextExtent,
"%g %g translate\n",-hires_bounds.x1,-hires_bounds.y1);
count=write(file,translate_geometry,(unsigned int)
strlen(translate_geometry));
}
file=close(file)-1;
/*
Render Postscript with the Ghostscript delegate.
*/
if (image_info->monochrome != MagickFalse)
delegate_info=GetDelegateInfo("ps:mono",(char *) NULL,exception);
else
if (cmyk != MagickFalse)
delegate_info=GetDelegateInfo("ps:cmyk",(char *) NULL,exception);
else
delegate_info=GetDelegateInfo("ps:alpha",(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) RelinquishUniqueFileResource(postscript_filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
density=AcquireString("");
options=AcquireString("");
(void) FormatLocaleString(density,MaxTextExtent,"%gx%g",resolution.x,
resolution.y);
(void) FormatLocaleString(options,MaxTextExtent,"-g%.20gx%.20g ",(double)
page.width,(double) page.height);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
if (read_info->number_scenes != 0)
{
char
pages[MaxTextExtent];
(void) FormatLocaleString(pages,MaxTextExtent,"-dFirstPage=%.20g "
"-dLastPage=%.20g ",(double) read_info->scene+1,(double)
(read_info->scene+read_info->number_scenes));
(void) ConcatenateMagickString(options,pages,MaxTextExtent);
read_info->number_scenes=0;
if (read_info->scenes != (char *) NULL)
*read_info->scenes='\0';
}
if (*image_info->magick == 'E')
{
option=GetImageOption(image_info,"eps:use-cropbox");
if ((option == (const char *) NULL) ||
(IsStringTrue(option) != MagickFalse))
(void) ConcatenateMagickString(options,"-dEPSCrop ",MaxTextExtent);
if (fitPage != MagickFalse)
(void) ConcatenateMagickString(options,"-dEPSFitPage ",MaxTextExtent);
}
(void) CopyMagickString(filename,read_info->filename,MaxTextExtent);
(void) AcquireUniqueFilename(filename);
(void) RelinquishUniqueFileResource(filename);
(void) ConcatenateMagickString(filename,"%d",MaxTextExtent);
(void) FormatLocaleString(command,MaxTextExtent,
GetDelegateCommands(delegate_info),
read_info->antialias != MagickFalse ? 4 : 1,
read_info->antialias != MagickFalse ? 4 : 1,density,options,filename,
postscript_filename,input_filename);
options=DestroyString(options);
density=DestroyString(density);
*message='\0';
status=InvokePostscriptDelegate(read_info->verbose,command,message,exception);
(void) InterpretImageFilename(image_info,image,filename,1,
read_info->filename);
if ((status == MagickFalse) ||
(IsPostscriptRendered(read_info->filename) == MagickFalse))
{
(void) ConcatenateMagickString(command," -c showpage",MaxTextExtent);
status=InvokePostscriptDelegate(read_info->verbose,command,message,
exception);
}
(void) RelinquishUniqueFileResource(postscript_filename);
(void) RelinquishUniqueFileResource(input_filename);
postscript_image=(Image *) NULL;
if (status == MagickFalse)
for (i=1; ; i++)
{
(void) InterpretImageFilename(image_info,image,filename,(int) i,
read_info->filename);
if (IsPostscriptRendered(read_info->filename) == MagickFalse)
break;
(void) RelinquishUniqueFileResource(read_info->filename);
}
else
for (i=1; ; i++)
{
(void) InterpretImageFilename(image_info,image,filename,(int) i,
read_info->filename);
if (IsPostscriptRendered(read_info->filename) == MagickFalse)
break;
read_info->blob=NULL;
read_info->length=0;
next=ReadImage(read_info,exception);
(void) RelinquishUniqueFileResource(read_info->filename);
if (next == (Image *) NULL)
break;
AppendImageToList(&postscript_image,next);
}
(void) RelinquishUniqueFileResource(read_info->filename);
read_info=DestroyImageInfo(read_info);
if (postscript_image == (Image *) NULL)
{
if (*message != '\0')
(void) ThrowMagickException(exception,GetMagickModule(),DelegateError,
"PostscriptDelegateFailed","`%s'",message);
image=DestroyImageList(image);
return((Image *) NULL);
}
if (LocaleCompare(postscript_image->magick,"BMP") == 0)
{
Image
*cmyk_image;
cmyk_image=ConsolidateCMYKImages(postscript_image,exception);
if (cmyk_image != (Image *) NULL)
{
postscript_image=DestroyImageList(postscript_image);
postscript_image=cmyk_image;
}
}
(void) SeekBlob(image,0,SEEK_SET);
for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image))
{
/*
Note document structuring comments.
*/
*p++=(char) c;
if ((strchr("\n\r%",c) == (char *) NULL) &&
((size_t) (p-command) < (MaxTextExtent-1)))
continue;
*p='\0';
p=command;
/*
Skip %%BeginDocument thru %%EndDocument.
*/
if (LocaleNCompare(BeginDocument,command,strlen(BeginDocument)) == 0)
skip=MagickTrue;
if (LocaleNCompare(EndDocument,command,strlen(EndDocument)) == 0)
skip=MagickFalse;
if (skip != MagickFalse)
continue;
if (LocaleNCompare(PostscriptLevel,command,strlen(PostscriptLevel)) == 0)
{
(void) SetImageProperty(image,"ps:Level",command+4);
if (GlobExpression(command,"*EPSF-*",MagickTrue) != MagickFalse)
pages=1;
}
if (LocaleNCompare(LanguageLevel,command,strlen(LanguageLevel)) == 0)
(void) sscanf(command,LanguageLevel " %lu",&language_level);
if (LocaleNCompare(Pages,command,strlen(Pages)) == 0)
(void) sscanf(command,Pages " %lu",&pages);
if (LocaleNCompare(ImageData,command,strlen(ImageData)) == 0)
(void) sscanf(command,ImageData " %lu %lu",&columns,&rows);
if (LocaleNCompare(ICCProfile,command,strlen(ICCProfile)) == 0)
{
unsigned char
*datum;
/*
Read ICC profile.
*/
profile=AcquireStringInfo(MaxTextExtent);
datum=GetStringInfoDatum(profile);
for (i=0; (c=ProfileInteger(image,hex_digits)) != EOF; i++)
{
if (i >= (ssize_t) GetStringInfoLength(profile))
{
SetStringInfoLength(profile,(size_t) i << 1);
datum=GetStringInfoDatum(profile);
}
datum[i]=(unsigned char) c;
}
SetStringInfoLength(profile,(size_t) i+1);
(void) SetImageProfile(image,"icc",profile);
profile=DestroyStringInfo(profile);
continue;
}
if (LocaleNCompare(PhotoshopProfile,command,strlen(PhotoshopProfile)) == 0)
{
unsigned char
*p;
/*
Read Photoshop profile.
*/
count=(ssize_t) sscanf(command,PhotoshopProfile " %lu",&extent);
if (count != 1)
continue;
length=extent;
if ((MagickSizeType) length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
profile=BlobToStringInfo((const void *) NULL,length);
if (profile != (StringInfo *) NULL)
{
p=GetStringInfoDatum(profile);
for (i=0; i < (ssize_t) length; i++)
*p++=(unsigned char) ProfileInteger(image,hex_digits);
(void) SetImageProfile(image,"8bim",profile);
profile=DestroyStringInfo(profile);
}
continue;
}
if (LocaleNCompare(BeginXMPPacket,command,strlen(BeginXMPPacket)) == 0)
{
register size_t
i;
/*
Read XMP profile.
*/
p=command;
profile=StringToStringInfo(command);
for (i=GetStringInfoLength(profile)-1; c != EOF; i++)
{
SetStringInfoLength(profile,(size_t) (i+1));
c=ReadBlobByte(image);
GetStringInfoDatum(profile)[i]=(unsigned char) c;
*p++=(char) c;
if ((strchr("\n\r%",c) == (char *) NULL) &&
((size_t) (p-command) < (MaxTextExtent-1)))
continue;
*p='\0';
p=command;
if (LocaleNCompare(EndXMPPacket,command,strlen(EndXMPPacket)) == 0)
break;
}
SetStringInfoLength(profile,(size_t) i);
(void) SetImageProfile(image,"xmp",profile);
profile=DestroyStringInfo(profile);
continue;
}
}
(void) CloseBlob(image);
if (image_info->number_scenes != 0)
{
Image
*clone_image;
register ssize_t
i;
/*
Add place holder images to meet the subimage specification requirement.
*/
for (i=0; i < (ssize_t) image_info->scene; i++)
{
clone_image=CloneImage(postscript_image,1,1,MagickTrue,exception);
if (clone_image != (Image *) NULL)
PrependImageToList(&postscript_image,clone_image);
}
}
do
{
(void) CopyMagickString(postscript_image->filename,filename,MaxTextExtent);
(void) CopyMagickString(postscript_image->magick,image->magick,
MaxTextExtent);
if (columns != 0)
postscript_image->magick_columns=columns;
if (rows != 0)
postscript_image->magick_rows=rows;
postscript_image->page=page;
(void) CloneImageProfiles(postscript_image,image);
(void) CloneImageProperties(postscript_image,image);
next=SyncNextImageInList(postscript_image);
if (next != (Image *) NULL)
postscript_image=next;
} while (next != (Image *) NULL);
image=DestroyImageList(image);
scene=0;
for (next=GetFirstImageInList(postscript_image); next != (Image *) NULL; )
{
next->scene=scene++;
next=GetNextImageInList(next);
}
return(GetFirstImageInList(postscript_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSImage() adds properties for the PS image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSImage method is:
%
% size_t RegisterPSImage(void)
%
*/
ModuleExport size_t RegisterPSImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("EPI");
entry->decoder=(DecodeImageHandler *) ReadPSImage;
entry->encoder=(EncodeImageHandler *) WritePSImage;
entry->magick=(IsImageFormatHandler *) IsPS;
entry->seekable_stream=MagickTrue;
entry->adjoin=MagickFalse;
entry->blob_support=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString(
"Encapsulated PostScript Interchange format");
entry->mime_type=ConstantString("application/postscript");
entry->module=ConstantString("PS");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("EPS");
entry->decoder=(DecodeImageHandler *) ReadPSImage;
entry->encoder=(EncodeImageHandler *) WritePSImage;
entry->seekable_stream=MagickTrue;
entry->magick=(IsImageFormatHandler *) IsPS;
entry->adjoin=MagickFalse;
entry->blob_support=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Encapsulated PostScript");
entry->mime_type=ConstantString("application/postscript");
entry->module=ConstantString("PS");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("EPSF");
entry->decoder=(DecodeImageHandler *) ReadPSImage;
entry->encoder=(EncodeImageHandler *) WritePSImage;
entry->seekable_stream=MagickTrue;
entry->magick=(IsImageFormatHandler *) IsPS;
entry->adjoin=MagickFalse;
entry->blob_support=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Encapsulated PostScript");
entry->mime_type=ConstantString("application/postscript");
entry->module=ConstantString("PS");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("EPSI");
entry->decoder=(DecodeImageHandler *) ReadPSImage;
entry->encoder=(EncodeImageHandler *) WritePSImage;
entry->seekable_stream=MagickTrue;
entry->magick=(IsImageFormatHandler *) IsPS;
entry->adjoin=MagickFalse;
entry->blob_support=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString(
"Encapsulated PostScript Interchange format");
entry->mime_type=ConstantString("application/postscript");
entry->module=ConstantString("PS");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("PS");
entry->decoder=(DecodeImageHandler *) ReadPSImage;
entry->encoder=(EncodeImageHandler *) WritePSImage;
entry->seekable_stream=MagickTrue;
entry->magick=(IsImageFormatHandler *) IsPS;
entry->mime_type=ConstantString("application/postscript");
entry->module=ConstantString("PS");
entry->blob_support=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("PostScript");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSImage() removes format registrations made by the
% PS module from the list of supported formats.
%
% The format of the UnregisterPSImage method is:
%
% UnregisterPSImage(void)
%
*/
ModuleExport void UnregisterPSImage(void)
{
(void) UnregisterMagickInfo("EPI");
(void) UnregisterMagickInfo("EPS");
(void) UnregisterMagickInfo("EPSF");
(void) UnregisterMagickInfo("EPSI");
(void) UnregisterMagickInfo("PS");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSImage translates an image to encapsulated Postscript
% Level I for printing. If the supplied geometry is null, the image is
% centered on the Postscript page. Otherwise, the image is positioned as
% specified by the geometry.
%
% The format of the WritePSImage method is:
%
% MagickBooleanType WritePSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
*/
static inline unsigned char *PopHexPixel(const char hex_digits[][3],
const size_t pixel,unsigned char *pixels)
{
register const char
*hex;
hex=hex_digits[pixel];
*pixels++=(unsigned char) (*hex++);
*pixels++=(unsigned char) (*hex);
return(pixels);
}
static MagickBooleanType WritePSImage(const ImageInfo *image_info,Image *image)
{
#define WriteRunlengthPacket(image,pixel,length,p) \
{ \
if ((image->matte != MagickFalse) && (length != 0) &&\
(GetPixelOpacity(p) == (Quantum) TransparentOpacity)) \
{ \
q=PopHexPixel(hex_digits,0xff,q); \
q=PopHexPixel(hex_digits,0xff,q); \
q=PopHexPixel(hex_digits,0xff,q); \
} \
else \
{ \
q=PopHexPixel(hex_digits,ScaleQuantumToChar(pixel.red),q); \
q=PopHexPixel(hex_digits,ScaleQuantumToChar(pixel.green),q); \
q=PopHexPixel(hex_digits,ScaleQuantumToChar(pixel.blue),q); \
} \
q=PopHexPixel(hex_digits,(size_t) MagickMin(length,0xff),q); \
}
static const char
hex_digits[][3] =
{
"00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "0A", "0B",
"0C", "0D", "0E", "0F", "10", "11", "12", "13", "14", "15", "16", "17",
"18", "19", "1A", "1B", "1C", "1D", "1E", "1F", "20", "21", "22", "23",
"24", "25", "26", "27", "28", "29", "2A", "2B", "2C", "2D", "2E", "2F",
"30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "3A", "3B",
"3C", "3D", "3E", "3F", "40", "41", "42", "43", "44", "45", "46", "47",
"48", "49", "4A", "4B", "4C", "4D", "4E", "4F", "50", "51", "52", "53",
"54", "55", "56", "57", "58", "59", "5A", "5B", "5C", "5D", "5E", "5F",
"60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "6A", "6B",
"6C", "6D", "6E", "6F", "70", "71", "72", "73", "74", "75", "76", "77",
"78", "79", "7A", "7B", "7C", "7D", "7E", "7F", "80", "81", "82", "83",
"84", "85", "86", "87", "88", "89", "8A", "8B", "8C", "8D", "8E", "8F",
"90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "9A", "9B",
"9C", "9D", "9E", "9F", "A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7",
"A8", "A9", "AA", "AB", "AC", "AD", "AE", "AF", "B0", "B1", "B2", "B3",
"B4", "B5", "B6", "B7", "B8", "B9", "BA", "BB", "BC", "BD", "BE", "BF",
"C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "CA", "CB",
"CC", "CD", "CE", "CF", "D0", "D1", "D2", "D3", "D4", "D5", "D6", "D7",
"D8", "D9", "DA", "DB", "DC", "DD", "DE", "DF", "E0", "E1", "E2", "E3",
"E4", "E5", "E6", "E7", "E8", "E9", "EA", "EB", "EC", "ED", "EE", "EF",
"F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9", "FA", "FB",
"FC", "FD", "FE", "FF"
},
PostscriptProlog[] =
"%%BeginProlog\n"
"%\n"
"% Display a color image. The image is displayed in color on\n"
"% Postscript viewers or printers that support color, otherwise\n"
"% it is displayed as grayscale.\n"
"%\n"
"/DirectClassPacket\n"
"{\n"
" %\n"
" % Get a DirectClass packet.\n"
" %\n"
" % Parameters:\n"
" % red.\n"
" % green.\n"
" % blue.\n"
" % length: number of pixels minus one of this color (optional).\n"
" %\n"
" currentfile color_packet readhexstring pop pop\n"
" compression 0 eq\n"
" {\n"
" /number_pixels 3 def\n"
" }\n"
" {\n"
" currentfile byte readhexstring pop 0 get\n"
" /number_pixels exch 1 add 3 mul def\n"
" } ifelse\n"
" 0 3 number_pixels 1 sub\n"
" {\n"
" pixels exch color_packet putinterval\n"
" } for\n"
" pixels 0 number_pixels getinterval\n"
"} bind def\n"
"\n"
"/DirectClassImage\n"
"{\n"
" %\n"
" % Display a DirectClass image.\n"
" %\n"
" systemdict /colorimage known\n"
" {\n"
" columns rows 8\n"
" [\n"
" columns 0 0\n"
" rows neg 0 rows\n"
" ]\n"
" { DirectClassPacket } false 3 colorimage\n"
" }\n"
" {\n"
" %\n"
" % No colorimage operator; convert to grayscale.\n"
" %\n"
" columns rows 8\n"
" [\n"
" columns 0 0\n"
" rows neg 0 rows\n"
" ]\n"
" { GrayDirectClassPacket } image\n"
" } ifelse\n"
"} bind def\n"
"\n"
"/GrayDirectClassPacket\n"
"{\n"
" %\n"
" % Get a DirectClass packet; convert to grayscale.\n"
" %\n"
" % Parameters:\n"
" % red\n"
" % green\n"
" % blue\n"
" % length: number of pixels minus one of this color (optional).\n"
" %\n"
" currentfile color_packet readhexstring pop pop\n"
" color_packet 0 get 0.299 mul\n"
" color_packet 1 get 0.587 mul add\n"
" color_packet 2 get 0.114 mul add\n"
" cvi\n"
" /gray_packet exch def\n"
" compression 0 eq\n"
" {\n"
" /number_pixels 1 def\n"
" }\n"
" {\n"
" currentfile byte readhexstring pop 0 get\n"
" /number_pixels exch 1 add def\n"
" } ifelse\n"
" 0 1 number_pixels 1 sub\n"
" {\n"
" pixels exch gray_packet put\n"
" } for\n"
" pixels 0 number_pixels getinterval\n"
"} bind def\n"
"\n"
"/GrayPseudoClassPacket\n"
"{\n"
" %\n"
" % Get a PseudoClass packet; convert to grayscale.\n"
" %\n"
" % Parameters:\n"
" % index: index into the colormap.\n"
" % length: number of pixels minus one of this color (optional).\n"
" %\n"
" currentfile byte readhexstring pop 0 get\n"
" /offset exch 3 mul def\n"
" /color_packet colormap offset 3 getinterval def\n"
" color_packet 0 get 0.299 mul\n"
" color_packet 1 get 0.587 mul add\n"
" color_packet 2 get 0.114 mul add\n"
" cvi\n"
" /gray_packet exch def\n"
" compression 0 eq\n"
" {\n"
" /number_pixels 1 def\n"
" }\n"
" {\n"
" currentfile byte readhexstring pop 0 get\n"
" /number_pixels exch 1 add def\n"
" } ifelse\n"
" 0 1 number_pixels 1 sub\n"
" {\n"
" pixels exch gray_packet put\n"
" } for\n"
" pixels 0 number_pixels getinterval\n"
"} bind def\n"
"\n"
"/PseudoClassPacket\n"
"{\n"
" %\n"
" % Get a PseudoClass packet.\n"
" %\n"
" % Parameters:\n"
" % index: index into the colormap.\n"
" % length: number of pixels minus one of this color (optional).\n"
" %\n"
" currentfile byte readhexstring pop 0 get\n"
" /offset exch 3 mul def\n"
" /color_packet colormap offset 3 getinterval def\n"
" compression 0 eq\n"
" {\n"
" /number_pixels 3 def\n"
" }\n"
" {\n"
" currentfile byte readhexstring pop 0 get\n"
" /number_pixels exch 1 add 3 mul def\n"
" } ifelse\n"
" 0 3 number_pixels 1 sub\n"
" {\n"
" pixels exch color_packet putinterval\n"
" } for\n"
" pixels 0 number_pixels getinterval\n"
"} bind def\n"
"\n"
"/PseudoClassImage\n"
"{\n"
" %\n"
" % Display a PseudoClass image.\n"
" %\n"
" % Parameters:\n"
" % class: 0-PseudoClass or 1-Grayscale.\n"
" %\n"
" currentfile buffer readline pop\n"
" token pop /class exch def pop\n"
" class 0 gt\n"
" {\n"
" currentfile buffer readline pop\n"
" token pop /depth exch def pop\n"
" /grays columns 8 add depth sub depth mul 8 idiv string def\n"
" columns rows depth\n"
" [\n"
" columns 0 0\n"
" rows neg 0 rows\n"
" ]\n"
" { currentfile grays readhexstring pop } image\n"
" }\n"
" {\n"
" %\n"
" % Parameters:\n"
" % colors: number of colors in the colormap.\n"
" % colormap: red, green, blue color packets.\n"
" %\n"
" currentfile buffer readline pop\n"
" token pop /colors exch def pop\n"
" /colors colors 3 mul def\n"
" /colormap colors string def\n"
" currentfile colormap readhexstring pop pop\n"
" systemdict /colorimage known\n"
" {\n"
" columns rows 8\n"
" [\n"
" columns 0 0\n"
" rows neg 0 rows\n"
" ]\n"
" { PseudoClassPacket } false 3 colorimage\n"
" }\n"
" {\n"
" %\n"
" % No colorimage operator; convert to grayscale.\n"
" %\n"
" columns rows 8\n"
" [\n"
" columns 0 0\n"
" rows neg 0 rows\n"
" ]\n"
" { GrayPseudoClassPacket } image\n"
" } ifelse\n"
" } ifelse\n"
"} bind def\n"
"\n"
"/DisplayImage\n"
"{\n"
" %\n"
" % Display a DirectClass or PseudoClass image.\n"
" %\n"
" % Parameters:\n"
" % x & y translation.\n"
" % x & y scale.\n"
" % label pointsize.\n"
" % image label.\n"
" % image columns & rows.\n"
" % class: 0-DirectClass or 1-PseudoClass.\n"
" % compression: 0-none or 1-RunlengthEncoded.\n"
" % hex color packets.\n"
" %\n"
" gsave\n"
" /buffer 512 string def\n"
" /byte 1 string def\n"
" /color_packet 3 string def\n"
" /pixels 768 string def\n"
"\n"
" currentfile buffer readline pop\n"
" token pop /x exch def\n"
" token pop /y exch def pop\n"
" x y translate\n"
" currentfile buffer readline pop\n"
" token pop /x exch def\n"
" token pop /y exch def pop\n"
" currentfile buffer readline pop\n"
" token pop /pointsize exch def pop\n",
PostscriptEpilog[] =
" x y scale\n"
" currentfile buffer readline pop\n"
" token pop /columns exch def\n"
" token pop /rows exch def pop\n"
" currentfile buffer readline pop\n"
" token pop /class exch def pop\n"
" currentfile buffer readline pop\n"
" token pop /compression exch def pop\n"
" class 0 gt { PseudoClassImage } { DirectClassImage } ifelse\n"
" grestore\n";
char
buffer[MaxTextExtent],
date[MaxTextExtent],
**labels,
page_geometry[MaxTextExtent];
CompressionType
compression;
const char
*value;
const StringInfo
*profile;
double
pointsize;
GeometryInfo
geometry_info;
IndexPacket
index;
MagickBooleanType
status;
MagickOffsetType
scene;
MagickStatusType
flags;
PixelPacket
pixel;
PointInfo
delta,
resolution,
scale;
RectangleInfo
geometry,
media_info,
page_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
register unsigned char
*q;
SegmentInfo
bounds;
size_t
bit,
byte,
imageListLength,
length,
page,
text_size;
ssize_t
j,
y;
time_t
timer;
unsigned char
pixels[2048];
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
(void) memset(&bounds,0,sizeof(bounds));
compression=image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
page=1;
scene=0;
imageListLength=GetImageListLength(image);
do
{
/*
Scale relative to dots-per-inch.
*/
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,sRGBColorspace);
delta.x=DefaultResolution;
delta.y=DefaultResolution;
resolution.x=image->x_resolution;
resolution.y=image->y_resolution;
if ((resolution.x == 0.0) || (resolution.y == 0.0))
{
flags=ParseGeometry(PSDensityGeometry,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
resolution.y=resolution.x;
}
if (image_info->density != (char *) NULL)
{
flags=ParseGeometry(image_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
resolution.y=resolution.x;
}
if (image->units == PixelsPerCentimeterResolution)
{
resolution.x=(double) ((size_t) (100.0*2.54*resolution.x+0.5)/100.0);
resolution.y=(double) ((size_t) (100.0*2.54*resolution.y+0.5)/100.0);
}
SetGeometry(image,&geometry);
(void) FormatLocaleString(page_geometry,MaxTextExtent,"%.20gx%.20g",
(double) image->columns,(double) image->rows);
if (image_info->page != (char *) NULL)
(void) CopyMagickString(page_geometry,image_info->page,MaxTextExtent);
else
if ((image->page.width != 0) && (image->page.height != 0))
(void) FormatLocaleString(page_geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) image->page.width,(double)
image->page.height,(double) image->page.x,(double) image->page.y);
else
if ((image->gravity != UndefinedGravity) &&
(LocaleCompare(image_info->magick,"PS") == 0))
(void) CopyMagickString(page_geometry,PSPageGeometry,MaxTextExtent);
(void) ConcatenateMagickString(page_geometry,">",MaxTextExtent);
(void) ParseMetaGeometry(page_geometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
scale.x=PerceptibleReciprocal(resolution.x)*geometry.width*delta.x;
geometry.width=(size_t) floor(scale.x+0.5);
scale.y=PerceptibleReciprocal(resolution.y)*geometry.height*delta.y;
geometry.height=(size_t) floor(scale.y+0.5);
(void) ParseAbsoluteGeometry(page_geometry,&media_info);
(void) ParseGravityGeometry(image,page_geometry,&page_info,
&image->exception);
if (image->gravity != UndefinedGravity)
{
geometry.x=(-page_info.x);
geometry.y=(ssize_t) (media_info.height+page_info.y-image->rows);
}
pointsize=12.0;
if (image_info->pointsize != 0.0)
pointsize=image_info->pointsize;
text_size=0;
value=GetImageProperty(image,"label");
if (value != (const char *) NULL)
text_size=(size_t) (MultilineCensus(value)*pointsize+12);
if (page == 1)
{
/*
Output Postscript header.
*/
if (LocaleCompare(image_info->magick,"PS") == 0)
(void) CopyMagickString(buffer,"%!PS-Adobe-3.0\n",MaxTextExtent);
else
(void) CopyMagickString(buffer,"%!PS-Adobe-3.0 EPSF-3.0\n",
MaxTextExtent);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"%%Creator: (ImageMagick)\n");
(void) FormatLocaleString(buffer,MaxTextExtent,"%%%%Title: (%s)\n",
image->filename);
(void) WriteBlobString(image,buffer);
timer=GetMagickTime();
(void) FormatMagickTime(timer,MaxTextExtent,date);
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%%%CreationDate: (%s)\n",date);
(void) WriteBlobString(image,buffer);
bounds.x1=(double) geometry.x;
bounds.y1=(double) geometry.y;
bounds.x2=(double) geometry.x+scale.x;
bounds.y2=(double) geometry.y+(geometry.height+text_size);
if ((image_info->adjoin != MagickFalse) &&
(GetNextImageInList(image) != (Image *) NULL))
(void) CopyMagickString(buffer,"%%%%BoundingBox: (atend)\n",
MaxTextExtent);
else
{
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%%%BoundingBox: %.20g %.20g %.20g %.20g\n",ceil(bounds.x1-0.5),
ceil(bounds.y1-0.5),floor(bounds.x2+0.5),floor(bounds.y2+0.5));
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%%%HiResBoundingBox: %g %g %g %g\n",bounds.x1,bounds.y1,
bounds.x2,bounds.y2);
}
(void) WriteBlobString(image,buffer);
profile=GetImageProfile(image,"8bim");
if (profile != (StringInfo *) NULL)
{
/*
Embed Photoshop profile.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%BeginPhotoshop: %.20g",(double) GetStringInfoLength(profile));
(void) WriteBlobString(image,buffer);
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++)
{
if ((i % 32) == 0)
(void) WriteBlobString(image,"\n% ");
(void) FormatLocaleString(buffer,MaxTextExtent,"%02X",
(unsigned int) (GetStringInfoDatum(profile)[i] & 0xff));
(void) WriteBlobString(image,buffer);
}
(void) WriteBlobString(image,"\n%EndPhotoshop\n");
}
profile=GetImageProfile(image,"xmp");
DisableMSCWarning(4127)
if (0 && (profile != (StringInfo *) NULL))
RestoreMSCWarning
{
/*
Embed XML profile.
*/
(void) WriteBlobString(image,"\n%begin_xml_code\n");
(void) FormatLocaleString(buffer,MaxTextExtent,
"\n%%begin_xml_packet: %.20g\n",(double)
GetStringInfoLength(profile));
(void) WriteBlobString(image,buffer);
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++)
(void) WriteBlobByte(image,GetStringInfoDatum(profile)[i]);
(void) WriteBlobString(image,"\n%end_xml_packet\n%end_xml_code\n");
}
value=GetImageProperty(image,"label");
if (value != (const char *) NULL)
(void) WriteBlobString(image,
"%%DocumentNeededResources: font Times-Roman\n");
(void) WriteBlobString(image,"%%DocumentData: Clean7Bit\n");
(void) WriteBlobString(image,"%%LanguageLevel: 1\n");
if (LocaleCompare(image_info->magick,"PS") != 0)
(void) WriteBlobString(image,"%%Pages: 1\n");
else
{
/*
Compute the number of pages.
*/
(void) WriteBlobString(image,"%%Orientation: Portrait\n");
(void) WriteBlobString(image,"%%PageOrder: Ascend\n");
(void) FormatLocaleString(buffer,MaxTextExtent,"%%%%Pages: %.20g\n",
image_info->adjoin != MagickFalse ? (double)
GetImageListLength(image) : 1.0);
(void) WriteBlobString(image,buffer);
}
(void) WriteBlobString(image,"%%EndComments\n");
(void) WriteBlobString(image,"\n%%BeginDefaults\n");
(void) WriteBlobString(image,"%%EndDefaults\n\n");
if ((LocaleCompare(image_info->magick,"EPI") == 0) ||
(LocaleCompare(image_info->magick,"EPSI") == 0) ||
(LocaleCompare(image_info->magick,"EPT") == 0))
{
Image
*preview_image;
Quantum
pixel;
register ssize_t
x;
ssize_t
y;
/*
Create preview image.
*/
preview_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (preview_image == (Image *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
/*
Dump image as bitmap.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%%%BeginPreview: %.20g %.20g %.20g %.20g\n%% ",(double)
preview_image->columns,(double) preview_image->rows,1.0,
(double) ((((preview_image->columns+7) >> 3)*preview_image->rows+
35)/36));
(void) WriteBlobString(image,buffer);
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(preview_image,0,y,preview_image->columns,1,
&preview_image->exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(preview_image);
bit=0;
byte=0;
for (x=0; x < (ssize_t) preview_image->columns; x++)
{
byte<<=1;
pixel=ClampToQuantum(GetPixelLuma(image,p));
if (pixel >= (Quantum) (QuantumRange/2))
byte|=0x01;
bit++;
if (bit == 8)
{
q=PopHexPixel(hex_digits,byte,q);
if ((q-pixels+8) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
(void) WriteBlobString(image,"% ");
};
bit=0;
byte=0;
}
}
if (bit != 0)
{
byte<<=(8-bit);
q=PopHexPixel(hex_digits,byte,q);
if ((q-pixels+8) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
(void) WriteBlobString(image,"% ");
};
};
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
(void) WriteBlobString(image,"\n%%EndPreview\n");
preview_image=DestroyImage(preview_image);
}
/*
Output Postscript commands.
*/
(void) WriteBlob(image,sizeof(PostscriptProlog)-1,
(const unsigned char *) PostscriptProlog);
value=GetImageProperty(image,"label");
if (value != (const char *) NULL)
{
(void) WriteBlobString(image,
" /Times-Roman findfont pointsize scalefont setfont\n");
for (j=(ssize_t) MultilineCensus(value)-1; j >= 0; j--)
{
(void) WriteBlobString(image," /label 512 string def\n");
(void) WriteBlobString(image,
" currentfile label readline pop\n");
(void) FormatLocaleString(buffer,MaxTextExtent,
" 0 y %g add moveto label show pop\n",j*pointsize+12);
(void) WriteBlobString(image,buffer);
}
}
(void) WriteBlob(image,sizeof(PostscriptEpilog)-1,
(const unsigned char *) PostscriptEpilog);
if (LocaleCompare(image_info->magick,"PS") == 0)
(void) WriteBlobString(image," showpage\n");
(void) WriteBlobString(image,"} bind def\n");
(void) WriteBlobString(image,"%%EndProlog\n");
}
(void) FormatLocaleString(buffer,MaxTextExtent,"%%%%Page: 1 %.20g\n",
(double) (page++));
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%%%PageBoundingBox: %.20g %.20g %.20g %.20g\n",(double) geometry.x,
(double) geometry.y,geometry.x+(double) geometry.width,geometry.y+(double)
(geometry.height+text_size));
(void) WriteBlobString(image,buffer);
if ((double) geometry.x < bounds.x1)
bounds.x1=(double) geometry.x;
if ((double) geometry.y < bounds.y1)
bounds.y1=(double) geometry.y;
if ((double) (geometry.x+geometry.width-1) > bounds.x2)
bounds.x2=(double) geometry.x+geometry.width-1;
if ((double) (geometry.y+(geometry.height+text_size)-1) > bounds.y2)
bounds.y2=(double) geometry.y+(geometry.height+text_size)-1;
value=GetImageProperty(image,"label");
if (value != (const char *) NULL)
(void) WriteBlobString(image,"%%%%PageResources: font Times-Roman\n");
if (LocaleCompare(image_info->magick,"PS") != 0)
(void) WriteBlobString(image,"userdict begin\n");
(void) WriteBlobString(image,"DisplayImage\n");
/*
Output image data.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,"%.20g %.20g\n%g %g\n%g\n",
(double) geometry.x,(double) geometry.y,scale.x,scale.y,pointsize);
(void) WriteBlobString(image,buffer);
labels=(char **) NULL;
value=GetImageProperty(image,"label");
if (value != (const char *) NULL)
labels=StringToList(value);
if (labels != (char **) NULL)
{
for (i=0; labels[i] != (char *) NULL; i++)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"%s \n",
labels[i]);
(void) WriteBlobString(image,buffer);
labels[i]=DestroyString(labels[i]);
}
labels=(char **) RelinquishMagickMemory(labels);
}
(void) memset(&pixel,0,sizeof(pixel));
pixel.opacity=(Quantum) TransparentOpacity;
index=(IndexPacket) 0;
x=0;
if ((image_info->type != TrueColorType) &&
(SetImageGray(image,&image->exception) != MagickFalse))
{
if (SetImageMonochrome(image,&image->exception) == MagickFalse)
{
Quantum
pixel;
/*
Dump image as grayscale.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,
"%.20g %.20g\n1\n1\n1\n8\n",(double) image->columns,(double)
image->rows);
(void) WriteBlobString(image,buffer);
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,
&image->exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=(Quantum) ScaleQuantumToChar(ClampToQuantum(
GetPixelLuma(image,p)));
q=PopHexPixel(hex_digits,(size_t) pixel,q);
if ((q-pixels+8) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
p++;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
}
else
{
ssize_t
y;
Quantum
pixel;
/*
Dump image as bitmap.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,
"%.20g %.20g\n1\n1\n1\n1\n",(double) image->columns,(double)
image->rows);
(void) WriteBlobString(image,buffer);
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,
&image->exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
bit=0;
byte=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
pixel=ClampToQuantum(GetPixelLuma(image,p));
if (pixel >= (Quantum) (QuantumRange/2))
byte|=0x01;
bit++;
if (bit == 8)
{
q=PopHexPixel(hex_digits,byte,q);
if ((q-pixels+2) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
};
bit=0;
byte=0;
}
p++;
}
if (bit != 0)
{
byte<<=(8-bit);
q=PopHexPixel(hex_digits,byte,q);
if ((q-pixels+2) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
};
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
}
}
else
if ((image->storage_class == DirectClass) ||
(image->colors > 256) || (image->matte != MagickFalse))
{
/*
Dump DirectClass image.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,"%.20g %.20g\n0\n%d\n",
(double) image->columns,(double) image->rows,
compression == RLECompression ? 1 : 0);
(void) WriteBlobString(image,buffer);
switch (compression)
{
case RLECompression:
{
/*
Dump runlength-encoded DirectColor packets.
*/
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,
&image->exception);
if (p == (const PixelPacket *) NULL)
break;
pixel=(*p);
length=255;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRed(p) == pixel.red) &&
(GetPixelGreen(p) == pixel.green) &&
(GetPixelBlue(p) == pixel.blue) &&
(GetPixelOpacity(p) == pixel.opacity) &&
(length < 255) && (x < (ssize_t) (image->columns-1)))
length++;
else
{
if (x > 0)
{
WriteRunlengthPacket(image,pixel,length,p);
if ((q-pixels+10) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
}
length=0;
}
pixel=(*p);
p++;
}
WriteRunlengthPacket(image,pixel,length,p);
if ((q-pixels+10) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,
(MagickOffsetType) y,image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
case NoCompression:
default:
{
/*
Dump uncompressed DirectColor packets.
*/
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,
&image->exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((image->matte != MagickFalse) &&
(GetPixelOpacity(p) == (Quantum) TransparentOpacity))
{
q=PopHexPixel(hex_digits,0xff,q);
q=PopHexPixel(hex_digits,0xff,q);
q=PopHexPixel(hex_digits,0xff,q);
}
else
{
q=PopHexPixel(hex_digits,ScaleQuantumToChar(
GetPixelRed(p)),q);
q=PopHexPixel(hex_digits,ScaleQuantumToChar(
GetPixelGreen(p)),q);
q=PopHexPixel(hex_digits,ScaleQuantumToChar(
GetPixelBlue(p)),q);
}
if ((q-pixels+6) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
p++;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,
(MagickOffsetType) y,image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
}
(void) WriteBlobByte(image,'\n');
}
else
{
/*
Dump PseudoClass image.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,
"%.20g %.20g\n%d\n%d\n0\n",(double) image->columns,(double)
image->rows,image->storage_class == PseudoClass ? 1 : 0,
compression == RLECompression ? 1 : 0);
(void) WriteBlobString(image,buffer);
/*
Dump number of colors and colormap.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,"%.20g\n",(double)
image->colors);
(void) WriteBlobString(image,buffer);
for (i=0; i < (ssize_t) image->colors; i++)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"%02X%02X%02X\n",
ScaleQuantumToChar(image->colormap[i].red),
ScaleQuantumToChar(image->colormap[i].green),
ScaleQuantumToChar(image->colormap[i].blue));
(void) WriteBlobString(image,buffer);
}
switch (compression)
{
case RLECompression:
{
/*
Dump runlength-encoded PseudoColor packets.
*/
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,
&image->exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
index=GetPixelIndex(indexes);
length=255;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((index == GetPixelIndex(indexes+x)) &&
(length < 255) && (x < ((ssize_t) image->columns-1)))
length++;
else
{
if (x > 0)
{
q=PopHexPixel(hex_digits,(size_t) index,q);
q=PopHexPixel(hex_digits,(size_t)
MagickMin(length,0xff),q);
i++;
if ((q-pixels+6) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
}
length=0;
}
index=GetPixelIndex(indexes+x);
pixel.red=GetPixelRed(p);
pixel.green=GetPixelGreen(p);
pixel.blue=GetPixelBlue(p);
pixel.opacity=GetPixelOpacity(p);
p++;
}
q=PopHexPixel(hex_digits,(size_t) index,q);
q=PopHexPixel(hex_digits,(size_t) MagickMin(length,0xff),q);
if ((q-pixels+6) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,
(MagickOffsetType) y,image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
case NoCompression:
default:
{
/*
Dump uncompressed PseudoColor packets.
*/
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,
&image->exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
q=PopHexPixel(hex_digits,(size_t) GetPixelIndex(
indexes+x),q);
if ((q-pixels+4) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
p++;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,
(MagickOffsetType) y,image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
}
(void) WriteBlobByte(image,'\n');
}
if (LocaleCompare(image_info->magick,"PS") != 0)
(void) WriteBlobString(image,"end\n");
(void) WriteBlobString(image,"%%PageTrailer\n");
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength);
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
(void) WriteBlobString(image,"%%Trailer\n");
if (page > 2)
{
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%%%BoundingBox: %.20g %.20g %.20g %.20g\n",ceil(bounds.x1-0.5),
ceil(bounds.y1-0.5),floor(bounds.x2+0.5),floor(bounds.y2+0.5));
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%%%HiResBoundingBox: %g %g %g %g\n",bounds.x1,bounds.y1,
bounds.x2,bounds.y2);
(void) WriteBlobString(image,buffer);
}
(void) WriteBlobString(image,"%%EOF\n");
(void) CloseBlob(image);
return(MagickTrue);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_910_0 |
crossvul-cpp_data_bad_5627_0 | /*
* drivers/net/veth.c
*
* Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc
*
* Author: Pavel Emelianov <xemul@openvz.org>
* Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com>
*
*/
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <net/dst.h>
#include <net/xfrm.h>
#include <linux/veth.h>
#define DRV_NAME "veth"
#define DRV_VERSION "1.0"
#define MIN_MTU 68 /* Min L3 MTU */
#define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */
#define MTU_PAD (ETH_HLEN + 4) /* Max difference between L2 and L3 size MTU */
struct veth_net_stats {
unsigned long rx_packets;
unsigned long tx_packets;
unsigned long rx_bytes;
unsigned long tx_bytes;
unsigned long tx_dropped;
unsigned long rx_dropped;
};
struct veth_priv {
struct net_device *peer;
struct veth_net_stats __percpu *stats;
unsigned ip_summed;
};
/*
* ethtool interface
*/
static struct {
const char string[ETH_GSTRING_LEN];
} ethtool_stats_keys[] = {
{ "peer_ifindex" },
};
static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
cmd->supported = 0;
cmd->advertising = 0;
cmd->speed = SPEED_10000;
cmd->duplex = DUPLEX_FULL;
cmd->port = PORT_TP;
cmd->phy_address = 0;
cmd->transceiver = XCVR_INTERNAL;
cmd->autoneg = AUTONEG_DISABLE;
cmd->maxtxpkt = 0;
cmd->maxrxpkt = 0;
return 0;
}
static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
strcpy(info->fw_version, "N/A");
}
static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch(stringset) {
case ETH_SS_STATS:
memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
break;
}
}
static int veth_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(ethtool_stats_keys);
default:
return -EOPNOTSUPP;
}
}
static void veth_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct veth_priv *priv;
priv = netdev_priv(dev);
data[0] = priv->peer->ifindex;
}
static u32 veth_get_rx_csum(struct net_device *dev)
{
struct veth_priv *priv;
priv = netdev_priv(dev);
return priv->ip_summed == CHECKSUM_UNNECESSARY;
}
static int veth_set_rx_csum(struct net_device *dev, u32 data)
{
struct veth_priv *priv;
priv = netdev_priv(dev);
priv->ip_summed = data ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
return 0;
}
static u32 veth_get_tx_csum(struct net_device *dev)
{
return (dev->features & NETIF_F_NO_CSUM) != 0;
}
static int veth_set_tx_csum(struct net_device *dev, u32 data)
{
if (data)
dev->features |= NETIF_F_NO_CSUM;
else
dev->features &= ~NETIF_F_NO_CSUM;
return 0;
}
static const struct ethtool_ops veth_ethtool_ops = {
.get_settings = veth_get_settings,
.get_drvinfo = veth_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_rx_csum = veth_get_rx_csum,
.set_rx_csum = veth_set_rx_csum,
.get_tx_csum = veth_get_tx_csum,
.set_tx_csum = veth_set_tx_csum,
.get_sg = ethtool_op_get_sg,
.set_sg = ethtool_op_set_sg,
.get_strings = veth_get_strings,
.get_sset_count = veth_get_sset_count,
.get_ethtool_stats = veth_get_ethtool_stats,
};
/*
* xmit
*/
static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct net_device *rcv = NULL;
struct veth_priv *priv, *rcv_priv;
struct veth_net_stats *stats, *rcv_stats;
int length;
priv = netdev_priv(dev);
rcv = priv->peer;
rcv_priv = netdev_priv(rcv);
stats = this_cpu_ptr(priv->stats);
rcv_stats = this_cpu_ptr(rcv_priv->stats);
if (!(rcv->flags & IFF_UP))
goto tx_drop;
if (dev->features & NETIF_F_NO_CSUM)
skb->ip_summed = rcv_priv->ip_summed;
length = skb->len + ETH_HLEN;
if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS)
goto rx_drop;
stats->tx_bytes += length;
stats->tx_packets++;
rcv_stats->rx_bytes += length;
rcv_stats->rx_packets++;
return NETDEV_TX_OK;
tx_drop:
kfree_skb(skb);
stats->tx_dropped++;
return NETDEV_TX_OK;
rx_drop:
kfree_skb(skb);
rcv_stats->rx_dropped++;
return NETDEV_TX_OK;
}
/*
* general routines
*/
static struct net_device_stats *veth_get_stats(struct net_device *dev)
{
struct veth_priv *priv;
int cpu;
struct veth_net_stats *stats, total = {0};
priv = netdev_priv(dev);
for_each_possible_cpu(cpu) {
stats = per_cpu_ptr(priv->stats, cpu);
total.rx_packets += stats->rx_packets;
total.tx_packets += stats->tx_packets;
total.rx_bytes += stats->rx_bytes;
total.tx_bytes += stats->tx_bytes;
total.tx_dropped += stats->tx_dropped;
total.rx_dropped += stats->rx_dropped;
}
dev->stats.rx_packets = total.rx_packets;
dev->stats.tx_packets = total.tx_packets;
dev->stats.rx_bytes = total.rx_bytes;
dev->stats.tx_bytes = total.tx_bytes;
dev->stats.tx_dropped = total.tx_dropped;
dev->stats.rx_dropped = total.rx_dropped;
return &dev->stats;
}
static int veth_open(struct net_device *dev)
{
struct veth_priv *priv;
priv = netdev_priv(dev);
if (priv->peer == NULL)
return -ENOTCONN;
if (priv->peer->flags & IFF_UP) {
netif_carrier_on(dev);
netif_carrier_on(priv->peer);
}
return 0;
}
static int veth_close(struct net_device *dev)
{
struct veth_priv *priv = netdev_priv(dev);
netif_carrier_off(dev);
netif_carrier_off(priv->peer);
return 0;
}
static int is_valid_veth_mtu(int new_mtu)
{
return (new_mtu >= MIN_MTU && new_mtu <= MAX_MTU);
}
static int veth_change_mtu(struct net_device *dev, int new_mtu)
{
if (!is_valid_veth_mtu(new_mtu))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static int veth_dev_init(struct net_device *dev)
{
struct veth_net_stats __percpu *stats;
struct veth_priv *priv;
stats = alloc_percpu(struct veth_net_stats);
if (stats == NULL)
return -ENOMEM;
priv = netdev_priv(dev);
priv->stats = stats;
return 0;
}
static void veth_dev_free(struct net_device *dev)
{
struct veth_priv *priv;
priv = netdev_priv(dev);
free_percpu(priv->stats);
free_netdev(dev);
}
static const struct net_device_ops veth_netdev_ops = {
.ndo_init = veth_dev_init,
.ndo_open = veth_open,
.ndo_stop = veth_close,
.ndo_start_xmit = veth_xmit,
.ndo_change_mtu = veth_change_mtu,
.ndo_get_stats = veth_get_stats,
.ndo_set_mac_address = eth_mac_addr,
};
static void veth_setup(struct net_device *dev)
{
ether_setup(dev);
dev->netdev_ops = &veth_netdev_ops;
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
dev->destructor = veth_dev_free;
}
/*
* netlink interface
*/
static int veth_validate(struct nlattr *tb[], struct nlattr *data[])
{
if (tb[IFLA_ADDRESS]) {
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
return -EINVAL;
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
return -EADDRNOTAVAIL;
}
if (tb[IFLA_MTU]) {
if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU])))
return -EINVAL;
}
return 0;
}
static struct rtnl_link_ops veth_link_ops;
static int veth_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
int err;
struct net_device *peer;
struct veth_priv *priv;
char ifname[IFNAMSIZ];
struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
struct ifinfomsg *ifmp;
struct net *net;
/*
* create and register peer first
*/
if (data != NULL && data[VETH_INFO_PEER] != NULL) {
struct nlattr *nla_peer;
nla_peer = data[VETH_INFO_PEER];
ifmp = nla_data(nla_peer);
err = nla_parse(peer_tb, IFLA_MAX,
nla_data(nla_peer) + sizeof(struct ifinfomsg),
nla_len(nla_peer) - sizeof(struct ifinfomsg),
ifla_policy);
if (err < 0)
return err;
err = veth_validate(peer_tb, NULL);
if (err < 0)
return err;
tbp = peer_tb;
} else {
ifmp = NULL;
tbp = tb;
}
if (tbp[IFLA_IFNAME])
nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
else
snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
net = rtnl_link_get_net(src_net, tbp);
if (IS_ERR(net))
return PTR_ERR(net);
peer = rtnl_create_link(src_net, net, ifname, &veth_link_ops, tbp);
if (IS_ERR(peer)) {
put_net(net);
return PTR_ERR(peer);
}
if (tbp[IFLA_ADDRESS] == NULL)
random_ether_addr(peer->dev_addr);
err = register_netdevice(peer);
put_net(net);
net = NULL;
if (err < 0)
goto err_register_peer;
netif_carrier_off(peer);
err = rtnl_configure_link(peer, ifmp);
if (err < 0)
goto err_configure_peer;
/*
* register dev last
*
* note, that since we've registered new device the dev's name
* should be re-allocated
*/
if (tb[IFLA_ADDRESS] == NULL)
random_ether_addr(dev->dev_addr);
if (tb[IFLA_IFNAME])
nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
else
snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
if (strchr(dev->name, '%')) {
err = dev_alloc_name(dev, dev->name);
if (err < 0)
goto err_alloc_name;
}
err = register_netdevice(dev);
if (err < 0)
goto err_register_dev;
netif_carrier_off(dev);
/*
* tie the deviced together
*/
priv = netdev_priv(dev);
priv->peer = peer;
priv = netdev_priv(peer);
priv->peer = dev;
return 0;
err_register_dev:
/* nothing to do */
err_alloc_name:
err_configure_peer:
unregister_netdevice(peer);
return err;
err_register_peer:
free_netdev(peer);
return err;
}
static void veth_dellink(struct net_device *dev, struct list_head *head)
{
struct veth_priv *priv;
struct net_device *peer;
priv = netdev_priv(dev);
peer = priv->peer;
unregister_netdevice_queue(dev, head);
unregister_netdevice_queue(peer, head);
}
static const struct nla_policy veth_policy[VETH_INFO_MAX + 1];
static struct rtnl_link_ops veth_link_ops = {
.kind = DRV_NAME,
.priv_size = sizeof(struct veth_priv),
.setup = veth_setup,
.validate = veth_validate,
.newlink = veth_newlink,
.dellink = veth_dellink,
.policy = veth_policy,
.maxtype = VETH_INFO_MAX,
};
/*
* init/fini
*/
static __init int veth_init(void)
{
return rtnl_link_register(&veth_link_ops);
}
static __exit void veth_exit(void)
{
rtnl_link_unregister(&veth_link_ops);
}
module_init(veth_init);
module_exit(veth_exit);
MODULE_DESCRIPTION("Virtual Ethernet Tunnel");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS_RTNL_LINK(DRV_NAME);
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_5627_0 |
crossvul-cpp_data_bad_910_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% PPPP SSSSS %
% P P SS %
% PPPP SSS %
% P SS %
% P SSSSS %
% %
% %
% Read/Write Postscript Format %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/constitute.h"
#include "magick/delegate.h"
#include "magick/delegate-private.h"
#include "magick/draw.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/geometry.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/nt-base-private.h"
#include "magick/option.h"
#include "magick/profile.h"
#include "magick/pixel-accessor.h"
#include "magick/pixel-private.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/timer-private.h"
#include "magick/token.h"
#include "magick/transform.h"
#include "magick/utility.h"
/*
Forward declarations.
*/
static MagickBooleanType
WritePSImage(const ImageInfo *,Image *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n v o k e P o s t s r i p t D e l e g a t e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InvokePostscriptDelegate() executes the Postscript interpreter with the
% specified command.
%
% The format of the InvokePostscriptDelegate method is:
%
% MagickBooleanType InvokePostscriptDelegate(
% const MagickBooleanType verbose,const char *command,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o verbose: A value other than zero displays the command prior to
% executing it.
%
% o command: the address of a character string containing the command to
% execute.
%
% o exception: return any errors or warnings in this structure.
%
*/
#if defined(MAGICKCORE_GS_DELEGATE) || defined(MAGICKCORE_WINDOWS_SUPPORT)
static int MagickDLLCall PostscriptDelegateMessage(void *handle,
const char *message,int length)
{
char
**messages;
ssize_t
offset;
offset=0;
messages=(char **) handle;
if (*messages == (char *) NULL)
*messages=(char *) AcquireQuantumMemory(length+1,sizeof(char *));
else
{
offset=strlen(*messages);
*messages=(char *) ResizeQuantumMemory(*messages,offset+length+1,
sizeof(char *));
}
if (*messages == (char *) NULL)
return(0);
(void) memcpy(*messages+offset,message,length);
(*messages)[length+offset] ='\0';
return(length);
}
#endif
static MagickBooleanType InvokePostscriptDelegate(
const MagickBooleanType verbose,const char *command,char *message,
ExceptionInfo *exception)
{
int
status;
#if defined(MAGICKCORE_GS_DELEGATE) || defined(MAGICKCORE_WINDOWS_SUPPORT)
#define SetArgsStart(command,args_start) \
if (args_start == (const char *) NULL) \
{ \
if (*command != '"') \
args_start=strchr(command,' '); \
else \
{ \
args_start=strchr(command+1,'"'); \
if (args_start != (const char *) NULL) \
args_start++; \
} \
}
#define ExecuteGhostscriptCommand(command,status) \
{ \
status=ExternalDelegateCommand(MagickFalse,verbose,command,message, \
exception); \
if (status == 0) \
return(MagickTrue); \
if (status < 0) \
return(MagickFalse); \
(void) ThrowMagickException(exception,GetMagickModule(),DelegateError, \
"FailedToExecuteCommand","`%s' (%d)",command,status); \
return(MagickFalse); \
}
char
**argv,
*errors;
const char
*args_start = (const char *) NULL;
const GhostInfo
*ghost_info;
gs_main_instance
*interpreter;
gsapi_revision_t
revision;
int
argc,
code;
register ssize_t
i;
#if defined(MAGICKCORE_WINDOWS_SUPPORT)
ghost_info=NTGhostscriptDLLVectors();
#else
GhostInfo
ghost_info_struct;
ghost_info=(&ghost_info_struct);
(void) memset(&ghost_info_struct,0,sizeof(ghost_info_struct));
ghost_info_struct.delete_instance=(void (*)(gs_main_instance *))
gsapi_delete_instance;
ghost_info_struct.exit=(int (*)(gs_main_instance *)) gsapi_exit;
ghost_info_struct.new_instance=(int (*)(gs_main_instance **,void *))
gsapi_new_instance;
ghost_info_struct.init_with_args=(int (*)(gs_main_instance *,int,char **))
gsapi_init_with_args;
ghost_info_struct.run_string=(int (*)(gs_main_instance *,const char *,int,
int *)) gsapi_run_string;
ghost_info_struct.set_stdio=(int (*)(gs_main_instance *,int (*)(void *,char *,
int),int (*)(void *,const char *,int),int (*)(void *, const char *, int)))
gsapi_set_stdio;
ghost_info_struct.revision=(int (*)(gsapi_revision_t *,int)) gsapi_revision;
#endif
if (ghost_info == (GhostInfo *) NULL)
ExecuteGhostscriptCommand(command,status);
if ((ghost_info->revision)(&revision,sizeof(revision)) != 0)
revision.revision=0;
if (verbose != MagickFalse)
{
(void) fprintf(stdout,"[ghostscript library %.2f]",(double)
revision.revision/100.0);
SetArgsStart(command,args_start);
(void) fputs(args_start,stdout);
}
interpreter=(gs_main_instance *) NULL;
errors=(char *) NULL;
status=(ghost_info->new_instance)(&interpreter,(void *) &errors);
if (status < 0)
ExecuteGhostscriptCommand(command,status);
code=0;
argv=StringToArgv(command,&argc);
if (argv == (char **) NULL)
{
(ghost_info->delete_instance)(interpreter);
return(MagickFalse);
}
(void) (ghost_info->set_stdio)(interpreter,(int (MagickDLLCall *)(void *,
char *,int)) NULL,PostscriptDelegateMessage,PostscriptDelegateMessage);
status=(ghost_info->init_with_args)(interpreter,argc-1,argv+1);
if (status == 0)
status=(ghost_info->run_string)(interpreter,"systemdict /start get exec\n",
0,&code);
(ghost_info->exit)(interpreter);
(ghost_info->delete_instance)(interpreter);
for (i=0; i < (ssize_t) argc; i++)
argv[i]=DestroyString(argv[i]);
argv=(char **) RelinquishMagickMemory(argv);
if (status != 0)
{
SetArgsStart(command,args_start);
if (status == -101) /* quit */
(void) FormatLocaleString(message,MaxTextExtent,
"[ghostscript library %.2f]%s: %s",(double) revision.revision/100.0,
args_start,errors);
else
{
(void) ThrowMagickException(exception,GetMagickModule(),
DelegateError,"PostscriptDelegateFailed",
"`[ghostscript library %.2f]%s': %s",(double) revision.revision/
100.0,args_start,errors);
if (errors != (char *) NULL)
errors=DestroyString(errors);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Ghostscript returns status %d, exit code %d",status,code);
return(MagickFalse);
}
}
if (errors != (char *) NULL)
errors=DestroyString(errors);
return(MagickTrue);
#else
status=ExternalDelegateCommand(MagickFalse,verbose,command,(char *) NULL,
exception);
return(status == 0 ? MagickTrue : MagickFalse);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s P S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsPS() returns MagickTrue if the image format type, identified by the
% magick string, is PS.
%
% The format of the IsPS method is:
%
% MagickBooleanType IsPS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsPS(const unsigned char *magick,const size_t length)
{
if (length < 4)
return(MagickFalse);
if (memcmp(magick,"%!",2) == 0)
return(MagickTrue);
if (memcmp(magick,"\004%!",3) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d P S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPSImage() reads a Postscript image file and returns it. It allocates
% the memory necessary for the new Image structure and returns a pointer
% to the new image.
%
% The format of the ReadPSImage method is:
%
% Image *ReadPSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType IsPostscriptRendered(const char *path)
{
MagickBooleanType
status;
struct stat
attributes;
if ((path == (const char *) NULL) || (*path == '\0'))
return(MagickFalse);
status=GetPathAttributes(path,&attributes);
if ((status != MagickFalse) && S_ISREG(attributes.st_mode) &&
(attributes.st_size > 0))
return(MagickTrue);
return(MagickFalse);
}
static inline int ProfileInteger(Image *image,short int *hex_digits)
{
int
c,
l,
value;
register ssize_t
i;
l=0;
value=0;
for (i=0; i < 2; )
{
c=ReadBlobByte(image);
if ((c == EOF) || ((c == '%') && (l == '%')))
{
value=(-1);
break;
}
l=c;
c&=0xff;
if (isxdigit(c) == MagickFalse)
continue;
value=(int) ((size_t) value << 4)+hex_digits[c];
i++;
}
return(value);
}
static Image *ReadPSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
#define BoundingBox "BoundingBox:"
#define BeginDocument "BeginDocument:"
#define BeginXMPPacket "<?xpacket begin="
#define EndXMPPacket "<?xpacket end="
#define ICCProfile "BeginICCProfile:"
#define CMYKCustomColor "CMYKCustomColor:"
#define CMYKProcessColor "CMYKProcessColor:"
#define DocumentMedia "DocumentMedia:"
#define DocumentCustomColors "DocumentCustomColors:"
#define DocumentProcessColors "DocumentProcessColors:"
#define EndDocument "EndDocument:"
#define HiResBoundingBox "HiResBoundingBox:"
#define ImageData "ImageData:"
#define PageBoundingBox "PageBoundingBox:"
#define LanguageLevel "LanguageLevel:"
#define PageMedia "PageMedia:"
#define Pages "Pages:"
#define PhotoshopProfile "BeginPhotoshop:"
#define PostscriptLevel "!PS-"
#define RenderPostscriptText " Rendering Postscript... "
#define SpotColor "+ "
char
command[MaxTextExtent],
*density,
filename[MaxTextExtent],
geometry[MaxTextExtent],
input_filename[MaxTextExtent],
message[MaxTextExtent],
*options,
postscript_filename[MaxTextExtent];
const char
*option;
const DelegateInfo
*delegate_info;
GeometryInfo
geometry_info;
Image
*image,
*next,
*postscript_image;
ImageInfo
*read_info;
int
c,
file;
MagickBooleanType
cmyk,
fitPage,
skip,
status;
MagickStatusType
flags;
PointInfo
delta,
resolution;
RectangleInfo
page;
register char
*p;
register ssize_t
i;
SegmentInfo
bounds,
hires_bounds;
short int
hex_digits[256];
size_t
length,
priority;
ssize_t
count;
StringInfo
*profile;
unsigned long
columns,
extent,
language_level,
pages,
rows,
scene,
spotcolor;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
status=AcquireUniqueSymbolicLink(image_info->filename,input_filename);
if (status == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToCreateTemporaryFile",
image_info->filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize hex values.
*/
(void) memset(hex_digits,0,sizeof(hex_digits));
hex_digits[(int) '0']=0;
hex_digits[(int) '1']=1;
hex_digits[(int) '2']=2;
hex_digits[(int) '3']=3;
hex_digits[(int) '4']=4;
hex_digits[(int) '5']=5;
hex_digits[(int) '6']=6;
hex_digits[(int) '7']=7;
hex_digits[(int) '8']=8;
hex_digits[(int) '9']=9;
hex_digits[(int) 'a']=10;
hex_digits[(int) 'b']=11;
hex_digits[(int) 'c']=12;
hex_digits[(int) 'd']=13;
hex_digits[(int) 'e']=14;
hex_digits[(int) 'f']=15;
hex_digits[(int) 'A']=10;
hex_digits[(int) 'B']=11;
hex_digits[(int) 'C']=12;
hex_digits[(int) 'D']=13;
hex_digits[(int) 'E']=14;
hex_digits[(int) 'F']=15;
/*
Set the page density.
*/
delta.x=DefaultResolution;
delta.y=DefaultResolution;
if ((image->x_resolution == 0.0) || (image->y_resolution == 0.0))
{
flags=ParseGeometry(PSDensityGeometry,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
if (image_info->density != (char *) NULL)
{
flags=ParseGeometry(image_info->density,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
(void) ParseAbsoluteGeometry(PSPageGeometry,&page);
if (image_info->page != (char *) NULL)
(void) ParseAbsoluteGeometry(image_info->page,&page);
resolution.x=image->x_resolution;
resolution.y=image->y_resolution;
page.width=(size_t) ceil((double) (page.width*resolution.x/delta.x)-0.5);
page.height=(size_t) ceil((double) (page.height*resolution.y/delta.y)-0.5);
/*
Determine page geometry from the Postscript bounding box.
*/
(void) memset(&bounds,0,sizeof(bounds));
(void) memset(command,0,sizeof(command));
cmyk=image_info->colorspace == CMYKColorspace ? MagickTrue : MagickFalse;
(void) memset(&hires_bounds,0,sizeof(hires_bounds));
priority=0;
columns=0;
rows=0;
extent=0;
spotcolor=0;
language_level=1;
skip=MagickFalse;
pages=(~0UL);
p=command;
for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image))
{
/*
Note document structuring comments.
*/
*p++=(char) c;
if ((strchr("\n\r%",c) == (char *) NULL) &&
((size_t) (p-command) < (MaxTextExtent-1)))
continue;
*p='\0';
p=command;
/*
Skip %%BeginDocument thru %%EndDocument.
*/
if (LocaleNCompare(BeginDocument,command,strlen(BeginDocument)) == 0)
skip=MagickTrue;
if (LocaleNCompare(EndDocument,command,strlen(EndDocument)) == 0)
skip=MagickFalse;
if (skip != MagickFalse)
continue;
if (LocaleNCompare(PostscriptLevel,command,strlen(PostscriptLevel)) == 0)
{
(void) SetImageProperty(image,"ps:Level",command+4);
if (GlobExpression(command,"*EPSF-*",MagickTrue) != MagickFalse)
pages=1;
}
if (LocaleNCompare(LanguageLevel,command,strlen(LanguageLevel)) == 0)
(void) sscanf(command,LanguageLevel " %lu",&language_level);
if (LocaleNCompare(Pages,command,strlen(Pages)) == 0)
(void) sscanf(command,Pages " %lu",&pages);
if (LocaleNCompare(ImageData,command,strlen(ImageData)) == 0)
(void) sscanf(command,ImageData " %lu %lu",&columns,&rows);
/*
Is this a CMYK document?
*/
length=strlen(DocumentProcessColors);
if (LocaleNCompare(DocumentProcessColors,command,length) == 0)
{
if ((GlobExpression(command,"*Cyan*",MagickTrue) != MagickFalse) ||
(GlobExpression(command,"*Magenta*",MagickTrue) != MagickFalse) ||
(GlobExpression(command,"*Yellow*",MagickTrue) != MagickFalse))
cmyk=MagickTrue;
}
if (LocaleNCompare(CMYKCustomColor,command,strlen(CMYKCustomColor)) == 0)
cmyk=MagickTrue;
if (LocaleNCompare(CMYKProcessColor,command,strlen(CMYKProcessColor)) == 0)
cmyk=MagickTrue;
length=strlen(DocumentCustomColors);
if ((LocaleNCompare(DocumentCustomColors,command,length) == 0) ||
(LocaleNCompare(CMYKCustomColor,command,strlen(CMYKCustomColor)) == 0) ||
(LocaleNCompare(SpotColor,command,strlen(SpotColor)) == 0))
{
char
property[MaxTextExtent],
*value;
register char
*p;
/*
Note spot names.
*/
(void) FormatLocaleString(property,MaxTextExtent,"ps:SpotColor-%.20g",
(double) (spotcolor++));
for (p=command; *p != '\0'; p++)
if (isspace((int) (unsigned char) *p) != 0)
break;
value=ConstantString(p);
(void) SubstituteString(&value,"(","");
(void) SubstituteString(&value,")","");
(void) StripString(value);
if (*value != '\0')
(void) SetImageProperty(image,property,value);
value=DestroyString(value);
continue;
}
if (image_info->page != (char *) NULL)
continue;
/*
Note region defined by bounding box.
*/
count=0;
i=0;
if (LocaleNCompare(BoundingBox,command,strlen(BoundingBox)) == 0)
{
count=(ssize_t) sscanf(command,BoundingBox " %lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
i=2;
}
if (LocaleNCompare(DocumentMedia,command,strlen(DocumentMedia)) == 0)
{
count=(ssize_t) sscanf(command,DocumentMedia " %lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
i=1;
}
if (LocaleNCompare(HiResBoundingBox,command,strlen(HiResBoundingBox)) == 0)
{
count=(ssize_t) sscanf(command,HiResBoundingBox " %lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
i=3;
}
if (LocaleNCompare(PageBoundingBox,command,strlen(PageBoundingBox)) == 0)
{
count=(ssize_t) sscanf(command,PageBoundingBox " %lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
i=1;
}
if (LocaleNCompare(PageMedia,command,strlen(PageMedia)) == 0)
{
count=(ssize_t) sscanf(command,PageMedia " %lf %lf %lf %lf",
&bounds.x1,&bounds.y1,&bounds.x2,&bounds.y2);
i=1;
}
if ((count != 4) || (i < (ssize_t) priority))
continue;
if ((fabs(bounds.x2-bounds.x1) <= fabs(hires_bounds.x2-hires_bounds.x1)) ||
(fabs(bounds.y2-bounds.y1) <= fabs(hires_bounds.y2-hires_bounds.y1)))
if (i == (ssize_t) priority)
continue;
hires_bounds=bounds;
priority=(size_t) i;
}
if ((fabs(hires_bounds.x2-hires_bounds.x1) >= MagickEpsilon) &&
(fabs(hires_bounds.y2-hires_bounds.y1) >= MagickEpsilon))
{
/*
Set Postscript render geometry.
*/
(void) FormatLocaleString(geometry,MaxTextExtent,"%gx%g%+.15g%+.15g",
hires_bounds.x2-hires_bounds.x1,hires_bounds.y2-hires_bounds.y1,
hires_bounds.x1,hires_bounds.y1);
(void) SetImageProperty(image,"ps:HiResBoundingBox",geometry);
page.width=(size_t) ceil((double) ((hires_bounds.x2-hires_bounds.x1)*
resolution.x/delta.x)-0.5);
page.height=(size_t) ceil((double) ((hires_bounds.y2-hires_bounds.y1)*
resolution.y/delta.y)-0.5);
}
fitPage=MagickFalse;
option=GetImageOption(image_info,"eps:fit-page");
if (option != (char *) NULL)
{
char
*geometry;
MagickStatusType
flags;
geometry=GetPageGeometry(option);
flags=ParseMetaGeometry(geometry,&page.x,&page.y,&page.width,
&page.height);
if (flags == NoValue)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidGeometry","`%s'",option);
image=DestroyImage(image);
return((Image *) NULL);
}
page.width=(size_t) ceil((double) (page.width*image->x_resolution/delta.x)
-0.5);
page.height=(size_t) ceil((double) (page.height*image->y_resolution/
delta.y) -0.5);
geometry=DestroyString(geometry);
fitPage=MagickTrue;
}
if (IssRGBCompatibleColorspace(image_info->colorspace) != MagickFalse)
cmyk=MagickFalse;
/*
Create Ghostscript control file.
*/
file=AcquireUniqueFileResource(postscript_filename);
if (file == -1)
{
ThrowFileException(&image->exception,FileOpenError,"UnableToOpenFile",
image_info->filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
(void) CopyMagickString(command,"/setpagedevice {pop} bind 1 index where {"
"dup wcheck {3 1 roll put} {pop def} ifelse} {def} ifelse\n"
"<</UseCIEColor true>>setpagedevice\n",MaxTextExtent);
count=write(file,command,(unsigned int) strlen(command));
if (image_info->page == (char *) NULL)
{
char
translate_geometry[MaxTextExtent];
(void) FormatLocaleString(translate_geometry,MaxTextExtent,
"%g %g translate\n",-hires_bounds.x1,-hires_bounds.y1);
count=write(file,translate_geometry,(unsigned int)
strlen(translate_geometry));
}
file=close(file)-1;
/*
Render Postscript with the Ghostscript delegate.
*/
if (image_info->monochrome != MagickFalse)
delegate_info=GetDelegateInfo("ps:mono",(char *) NULL,exception);
else
if (cmyk != MagickFalse)
delegate_info=GetDelegateInfo("ps:cmyk",(char *) NULL,exception);
else
delegate_info=GetDelegateInfo("ps:alpha",(char *) NULL,exception);
if (delegate_info == (const DelegateInfo *) NULL)
{
(void) RelinquishUniqueFileResource(postscript_filename);
image=DestroyImageList(image);
return((Image *) NULL);
}
density=AcquireString("");
options=AcquireString("");
(void) FormatLocaleString(density,MaxTextExtent,"%gx%g",resolution.x,
resolution.y);
(void) FormatLocaleString(options,MaxTextExtent,"-g%.20gx%.20g ",(double)
page.width,(double) page.height);
read_info=CloneImageInfo(image_info);
*read_info->magick='\0';
if (read_info->number_scenes != 0)
{
char
pages[MaxTextExtent];
(void) FormatLocaleString(pages,MaxTextExtent,"-dFirstPage=%.20g "
"-dLastPage=%.20g ",(double) read_info->scene+1,(double)
(read_info->scene+read_info->number_scenes));
(void) ConcatenateMagickString(options,pages,MaxTextExtent);
read_info->number_scenes=0;
if (read_info->scenes != (char *) NULL)
*read_info->scenes='\0';
}
if (*image_info->magick == 'E')
{
option=GetImageOption(image_info,"eps:use-cropbox");
if ((option == (const char *) NULL) ||
(IsStringTrue(option) != MagickFalse))
(void) ConcatenateMagickString(options,"-dEPSCrop ",MaxTextExtent);
if (fitPage != MagickFalse)
(void) ConcatenateMagickString(options,"-dEPSFitPage ",MaxTextExtent);
}
(void) CopyMagickString(filename,read_info->filename,MaxTextExtent);
(void) AcquireUniqueFilename(filename);
(void) RelinquishUniqueFileResource(filename);
(void) ConcatenateMagickString(filename,"%d",MaxTextExtent);
(void) FormatLocaleString(command,MaxTextExtent,
GetDelegateCommands(delegate_info),
read_info->antialias != MagickFalse ? 4 : 1,
read_info->antialias != MagickFalse ? 4 : 1,density,options,filename,
postscript_filename,input_filename);
options=DestroyString(options);
density=DestroyString(density);
*message='\0';
status=InvokePostscriptDelegate(read_info->verbose,command,message,exception);
(void) InterpretImageFilename(image_info,image,filename,1,
read_info->filename);
if ((status == MagickFalse) ||
(IsPostscriptRendered(read_info->filename) == MagickFalse))
{
(void) ConcatenateMagickString(command," -c showpage",MaxTextExtent);
status=InvokePostscriptDelegate(read_info->verbose,command,message,
exception);
}
(void) RelinquishUniqueFileResource(postscript_filename);
(void) RelinquishUniqueFileResource(input_filename);
postscript_image=(Image *) NULL;
if (status == MagickFalse)
for (i=1; ; i++)
{
(void) InterpretImageFilename(image_info,image,filename,(int) i,
read_info->filename);
if (IsPostscriptRendered(read_info->filename) == MagickFalse)
break;
(void) RelinquishUniqueFileResource(read_info->filename);
}
else
for (i=1; ; i++)
{
(void) InterpretImageFilename(image_info,image,filename,(int) i,
read_info->filename);
if (IsPostscriptRendered(read_info->filename) == MagickFalse)
break;
read_info->blob=NULL;
read_info->length=0;
next=ReadImage(read_info,exception);
(void) RelinquishUniqueFileResource(read_info->filename);
if (next == (Image *) NULL)
break;
AppendImageToList(&postscript_image,next);
}
(void) RelinquishUniqueFileResource(read_info->filename);
read_info=DestroyImageInfo(read_info);
if (postscript_image == (Image *) NULL)
{
if (*message != '\0')
(void) ThrowMagickException(exception,GetMagickModule(),DelegateError,
"PostscriptDelegateFailed","`%s'",message);
image=DestroyImageList(image);
return((Image *) NULL);
}
if (LocaleCompare(postscript_image->magick,"BMP") == 0)
{
Image
*cmyk_image;
cmyk_image=ConsolidateCMYKImages(postscript_image,exception);
if (cmyk_image != (Image *) NULL)
{
postscript_image=DestroyImageList(postscript_image);
postscript_image=cmyk_image;
}
}
(void) SeekBlob(image,0,SEEK_SET);
for (c=ReadBlobByte(image); c != EOF; c=ReadBlobByte(image))
{
/*
Note document structuring comments.
*/
*p++=(char) c;
if ((strchr("\n\r%",c) == (char *) NULL) &&
((size_t) (p-command) < (MaxTextExtent-1)))
continue;
*p='\0';
p=command;
/*
Skip %%BeginDocument thru %%EndDocument.
*/
if (LocaleNCompare(BeginDocument,command,strlen(BeginDocument)) == 0)
skip=MagickTrue;
if (LocaleNCompare(EndDocument,command,strlen(EndDocument)) == 0)
skip=MagickFalse;
if (skip != MagickFalse)
continue;
if (LocaleNCompare(PostscriptLevel,command,strlen(PostscriptLevel)) == 0)
{
(void) SetImageProperty(image,"ps:Level",command+4);
if (GlobExpression(command,"*EPSF-*",MagickTrue) != MagickFalse)
pages=1;
}
if (LocaleNCompare(LanguageLevel,command,strlen(LanguageLevel)) == 0)
(void) sscanf(command,LanguageLevel " %lu",&language_level);
if (LocaleNCompare(Pages,command,strlen(Pages)) == 0)
(void) sscanf(command,Pages " %lu",&pages);
if (LocaleNCompare(ImageData,command,strlen(ImageData)) == 0)
(void) sscanf(command,ImageData " %lu %lu",&columns,&rows);
if (LocaleNCompare(ICCProfile,command,strlen(ICCProfile)) == 0)
{
unsigned char
*datum;
/*
Read ICC profile.
*/
profile=AcquireStringInfo(MaxTextExtent);
datum=GetStringInfoDatum(profile);
for (i=0; (c=ProfileInteger(image,hex_digits)) != EOF; i++)
{
if (i >= (ssize_t) GetStringInfoLength(profile))
{
SetStringInfoLength(profile,(size_t) i << 1);
datum=GetStringInfoDatum(profile);
}
datum[i]=(unsigned char) c;
}
SetStringInfoLength(profile,(size_t) i+1);
(void) SetImageProfile(image,"icc",profile);
profile=DestroyStringInfo(profile);
continue;
}
if (LocaleNCompare(PhotoshopProfile,command,strlen(PhotoshopProfile)) == 0)
{
unsigned char
*p;
/*
Read Photoshop profile.
*/
count=(ssize_t) sscanf(command,PhotoshopProfile " %lu",&extent);
if (count != 1)
continue;
length=extent;
if ((MagickSizeType) length > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
profile=BlobToStringInfo((const void *) NULL,length);
if (profile != (StringInfo *) NULL)
{
p=GetStringInfoDatum(profile);
for (i=0; i < (ssize_t) length; i++)
*p++=(unsigned char) ProfileInteger(image,hex_digits);
(void) SetImageProfile(image,"8bim",profile);
profile=DestroyStringInfo(profile);
}
continue;
}
if (LocaleNCompare(BeginXMPPacket,command,strlen(BeginXMPPacket)) == 0)
{
register size_t
i;
/*
Read XMP profile.
*/
p=command;
profile=StringToStringInfo(command);
for (i=GetStringInfoLength(profile)-1; c != EOF; i++)
{
SetStringInfoLength(profile,(size_t) (i+1));
c=ReadBlobByte(image);
GetStringInfoDatum(profile)[i]=(unsigned char) c;
*p++=(char) c;
if ((strchr("\n\r%",c) == (char *) NULL) &&
((size_t) (p-command) < (MaxTextExtent-1)))
continue;
*p='\0';
p=command;
if (LocaleNCompare(EndXMPPacket,command,strlen(EndXMPPacket)) == 0)
break;
}
SetStringInfoLength(profile,(size_t) i);
(void) SetImageProfile(image,"xmp",profile);
profile=DestroyStringInfo(profile);
continue;
}
}
(void) CloseBlob(image);
if (image_info->number_scenes != 0)
{
Image
*clone_image;
register ssize_t
i;
/*
Add place holder images to meet the subimage specification requirement.
*/
for (i=0; i < (ssize_t) image_info->scene; i++)
{
clone_image=CloneImage(postscript_image,1,1,MagickTrue,exception);
if (clone_image != (Image *) NULL)
PrependImageToList(&postscript_image,clone_image);
}
}
do
{
(void) CopyMagickString(postscript_image->filename,filename,MaxTextExtent);
(void) CopyMagickString(postscript_image->magick,image->magick,
MaxTextExtent);
if (columns != 0)
postscript_image->magick_columns=columns;
if (rows != 0)
postscript_image->magick_rows=rows;
postscript_image->page=page;
(void) CloneImageProfiles(postscript_image,image);
(void) CloneImageProperties(postscript_image,image);
next=SyncNextImageInList(postscript_image);
if (next != (Image *) NULL)
postscript_image=next;
} while (next != (Image *) NULL);
image=DestroyImageList(image);
scene=0;
for (next=GetFirstImageInList(postscript_image); next != (Image *) NULL; )
{
next->scene=scene++;
next=GetNextImageInList(next);
}
return(GetFirstImageInList(postscript_image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r P S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterPSImage() adds properties for the PS image format to
% the list of supported formats. The properties include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterPSImage method is:
%
% size_t RegisterPSImage(void)
%
*/
ModuleExport size_t RegisterPSImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("EPI");
entry->decoder=(DecodeImageHandler *) ReadPSImage;
entry->encoder=(EncodeImageHandler *) WritePSImage;
entry->magick=(IsImageFormatHandler *) IsPS;
entry->seekable_stream=MagickTrue;
entry->adjoin=MagickFalse;
entry->blob_support=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString(
"Encapsulated PostScript Interchange format");
entry->mime_type=ConstantString("application/postscript");
entry->module=ConstantString("PS");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("EPS");
entry->decoder=(DecodeImageHandler *) ReadPSImage;
entry->encoder=(EncodeImageHandler *) WritePSImage;
entry->seekable_stream=MagickTrue;
entry->magick=(IsImageFormatHandler *) IsPS;
entry->adjoin=MagickFalse;
entry->blob_support=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Encapsulated PostScript");
entry->mime_type=ConstantString("application/postscript");
entry->module=ConstantString("PS");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("EPSF");
entry->decoder=(DecodeImageHandler *) ReadPSImage;
entry->encoder=(EncodeImageHandler *) WritePSImage;
entry->seekable_stream=MagickTrue;
entry->magick=(IsImageFormatHandler *) IsPS;
entry->adjoin=MagickFalse;
entry->blob_support=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("Encapsulated PostScript");
entry->mime_type=ConstantString("application/postscript");
entry->module=ConstantString("PS");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("EPSI");
entry->decoder=(DecodeImageHandler *) ReadPSImage;
entry->encoder=(EncodeImageHandler *) WritePSImage;
entry->seekable_stream=MagickTrue;
entry->magick=(IsImageFormatHandler *) IsPS;
entry->adjoin=MagickFalse;
entry->blob_support=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString(
"Encapsulated PostScript Interchange format");
entry->mime_type=ConstantString("application/postscript");
entry->module=ConstantString("PS");
(void) RegisterMagickInfo(entry);
entry=SetMagickInfo("PS");
entry->decoder=(DecodeImageHandler *) ReadPSImage;
entry->encoder=(EncodeImageHandler *) WritePSImage;
entry->seekable_stream=MagickTrue;
entry->magick=(IsImageFormatHandler *) IsPS;
entry->mime_type=ConstantString("application/postscript");
entry->module=ConstantString("PS");
entry->blob_support=MagickFalse;
entry->seekable_stream=MagickTrue;
entry->description=ConstantString("PostScript");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r P S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterPSImage() removes format registrations made by the
% PS module from the list of supported formats.
%
% The format of the UnregisterPSImage method is:
%
% UnregisterPSImage(void)
%
*/
ModuleExport void UnregisterPSImage(void)
{
(void) UnregisterMagickInfo("EPI");
(void) UnregisterMagickInfo("EPS");
(void) UnregisterMagickInfo("EPSF");
(void) UnregisterMagickInfo("EPSI");
(void) UnregisterMagickInfo("PS");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e P S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePSImage translates an image to encapsulated Postscript
% Level I for printing. If the supplied geometry is null, the image is
% centered on the Postscript page. Otherwise, the image is positioned as
% specified by the geometry.
%
% The format of the WritePSImage method is:
%
% MagickBooleanType WritePSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
*/
static inline unsigned char *PopHexPixel(const char hex_digits[][3],
const size_t pixel,unsigned char *pixels)
{
register const char
*hex;
hex=hex_digits[pixel];
*pixels++=(unsigned char) (*hex++);
*pixels++=(unsigned char) (*hex);
return(pixels);
}
static MagickBooleanType WritePSImage(const ImageInfo *image_info,Image *image)
{
#define WriteRunlengthPacket(image,pixel,length,p) \
{ \
if ((image->matte != MagickFalse) && (length != 0) &&\
(GetPixelOpacity(p) == (Quantum) TransparentOpacity)) \
{ \
q=PopHexPixel(hex_digits,0xff,q); \
q=PopHexPixel(hex_digits,0xff,q); \
q=PopHexPixel(hex_digits,0xff,q); \
} \
else \
{ \
q=PopHexPixel(hex_digits,ScaleQuantumToChar(pixel.red),q); \
q=PopHexPixel(hex_digits,ScaleQuantumToChar(pixel.green),q); \
q=PopHexPixel(hex_digits,ScaleQuantumToChar(pixel.blue),q); \
} \
q=PopHexPixel(hex_digits,(size_t) MagickMin(length,0xff),q); \
}
static const char
hex_digits[][3] =
{
"00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "0A", "0B",
"0C", "0D", "0E", "0F", "10", "11", "12", "13", "14", "15", "16", "17",
"18", "19", "1A", "1B", "1C", "1D", "1E", "1F", "20", "21", "22", "23",
"24", "25", "26", "27", "28", "29", "2A", "2B", "2C", "2D", "2E", "2F",
"30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "3A", "3B",
"3C", "3D", "3E", "3F", "40", "41", "42", "43", "44", "45", "46", "47",
"48", "49", "4A", "4B", "4C", "4D", "4E", "4F", "50", "51", "52", "53",
"54", "55", "56", "57", "58", "59", "5A", "5B", "5C", "5D", "5E", "5F",
"60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "6A", "6B",
"6C", "6D", "6E", "6F", "70", "71", "72", "73", "74", "75", "76", "77",
"78", "79", "7A", "7B", "7C", "7D", "7E", "7F", "80", "81", "82", "83",
"84", "85", "86", "87", "88", "89", "8A", "8B", "8C", "8D", "8E", "8F",
"90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "9A", "9B",
"9C", "9D", "9E", "9F", "A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7",
"A8", "A9", "AA", "AB", "AC", "AD", "AE", "AF", "B0", "B1", "B2", "B3",
"B4", "B5", "B6", "B7", "B8", "B9", "BA", "BB", "BC", "BD", "BE", "BF",
"C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9", "CA", "CB",
"CC", "CD", "CE", "CF", "D0", "D1", "D2", "D3", "D4", "D5", "D6", "D7",
"D8", "D9", "DA", "DB", "DC", "DD", "DE", "DF", "E0", "E1", "E2", "E3",
"E4", "E5", "E6", "E7", "E8", "E9", "EA", "EB", "EC", "ED", "EE", "EF",
"F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9", "FA", "FB",
"FC", "FD", "FE", "FF"
},
PostscriptProlog[] =
"%%BeginProlog\n"
"%\n"
"% Display a color image. The image is displayed in color on\n"
"% Postscript viewers or printers that support color, otherwise\n"
"% it is displayed as grayscale.\n"
"%\n"
"/DirectClassPacket\n"
"{\n"
" %\n"
" % Get a DirectClass packet.\n"
" %\n"
" % Parameters:\n"
" % red.\n"
" % green.\n"
" % blue.\n"
" % length: number of pixels minus one of this color (optional).\n"
" %\n"
" currentfile color_packet readhexstring pop pop\n"
" compression 0 eq\n"
" {\n"
" /number_pixels 3 def\n"
" }\n"
" {\n"
" currentfile byte readhexstring pop 0 get\n"
" /number_pixels exch 1 add 3 mul def\n"
" } ifelse\n"
" 0 3 number_pixels 1 sub\n"
" {\n"
" pixels exch color_packet putinterval\n"
" } for\n"
" pixels 0 number_pixels getinterval\n"
"} bind def\n"
"\n"
"/DirectClassImage\n"
"{\n"
" %\n"
" % Display a DirectClass image.\n"
" %\n"
" systemdict /colorimage known\n"
" {\n"
" columns rows 8\n"
" [\n"
" columns 0 0\n"
" rows neg 0 rows\n"
" ]\n"
" { DirectClassPacket } false 3 colorimage\n"
" }\n"
" {\n"
" %\n"
" % No colorimage operator; convert to grayscale.\n"
" %\n"
" columns rows 8\n"
" [\n"
" columns 0 0\n"
" rows neg 0 rows\n"
" ]\n"
" { GrayDirectClassPacket } image\n"
" } ifelse\n"
"} bind def\n"
"\n"
"/GrayDirectClassPacket\n"
"{\n"
" %\n"
" % Get a DirectClass packet; convert to grayscale.\n"
" %\n"
" % Parameters:\n"
" % red\n"
" % green\n"
" % blue\n"
" % length: number of pixels minus one of this color (optional).\n"
" %\n"
" currentfile color_packet readhexstring pop pop\n"
" color_packet 0 get 0.299 mul\n"
" color_packet 1 get 0.587 mul add\n"
" color_packet 2 get 0.114 mul add\n"
" cvi\n"
" /gray_packet exch def\n"
" compression 0 eq\n"
" {\n"
" /number_pixels 1 def\n"
" }\n"
" {\n"
" currentfile byte readhexstring pop 0 get\n"
" /number_pixels exch 1 add def\n"
" } ifelse\n"
" 0 1 number_pixels 1 sub\n"
" {\n"
" pixels exch gray_packet put\n"
" } for\n"
" pixels 0 number_pixels getinterval\n"
"} bind def\n"
"\n"
"/GrayPseudoClassPacket\n"
"{\n"
" %\n"
" % Get a PseudoClass packet; convert to grayscale.\n"
" %\n"
" % Parameters:\n"
" % index: index into the colormap.\n"
" % length: number of pixels minus one of this color (optional).\n"
" %\n"
" currentfile byte readhexstring pop 0 get\n"
" /offset exch 3 mul def\n"
" /color_packet colormap offset 3 getinterval def\n"
" color_packet 0 get 0.299 mul\n"
" color_packet 1 get 0.587 mul add\n"
" color_packet 2 get 0.114 mul add\n"
" cvi\n"
" /gray_packet exch def\n"
" compression 0 eq\n"
" {\n"
" /number_pixels 1 def\n"
" }\n"
" {\n"
" currentfile byte readhexstring pop 0 get\n"
" /number_pixels exch 1 add def\n"
" } ifelse\n"
" 0 1 number_pixels 1 sub\n"
" {\n"
" pixels exch gray_packet put\n"
" } for\n"
" pixels 0 number_pixels getinterval\n"
"} bind def\n"
"\n"
"/PseudoClassPacket\n"
"{\n"
" %\n"
" % Get a PseudoClass packet.\n"
" %\n"
" % Parameters:\n"
" % index: index into the colormap.\n"
" % length: number of pixels minus one of this color (optional).\n"
" %\n"
" currentfile byte readhexstring pop 0 get\n"
" /offset exch 3 mul def\n"
" /color_packet colormap offset 3 getinterval def\n"
" compression 0 eq\n"
" {\n"
" /number_pixels 3 def\n"
" }\n"
" {\n"
" currentfile byte readhexstring pop 0 get\n"
" /number_pixels exch 1 add 3 mul def\n"
" } ifelse\n"
" 0 3 number_pixels 1 sub\n"
" {\n"
" pixels exch color_packet putinterval\n"
" } for\n"
" pixels 0 number_pixels getinterval\n"
"} bind def\n"
"\n"
"/PseudoClassImage\n"
"{\n"
" %\n"
" % Display a PseudoClass image.\n"
" %\n"
" % Parameters:\n"
" % class: 0-PseudoClass or 1-Grayscale.\n"
" %\n"
" currentfile buffer readline pop\n"
" token pop /class exch def pop\n"
" class 0 gt\n"
" {\n"
" currentfile buffer readline pop\n"
" token pop /depth exch def pop\n"
" /grays columns 8 add depth sub depth mul 8 idiv string def\n"
" columns rows depth\n"
" [\n"
" columns 0 0\n"
" rows neg 0 rows\n"
" ]\n"
" { currentfile grays readhexstring pop } image\n"
" }\n"
" {\n"
" %\n"
" % Parameters:\n"
" % colors: number of colors in the colormap.\n"
" % colormap: red, green, blue color packets.\n"
" %\n"
" currentfile buffer readline pop\n"
" token pop /colors exch def pop\n"
" /colors colors 3 mul def\n"
" /colormap colors string def\n"
" currentfile colormap readhexstring pop pop\n"
" systemdict /colorimage known\n"
" {\n"
" columns rows 8\n"
" [\n"
" columns 0 0\n"
" rows neg 0 rows\n"
" ]\n"
" { PseudoClassPacket } false 3 colorimage\n"
" }\n"
" {\n"
" %\n"
" % No colorimage operator; convert to grayscale.\n"
" %\n"
" columns rows 8\n"
" [\n"
" columns 0 0\n"
" rows neg 0 rows\n"
" ]\n"
" { GrayPseudoClassPacket } image\n"
" } ifelse\n"
" } ifelse\n"
"} bind def\n"
"\n"
"/DisplayImage\n"
"{\n"
" %\n"
" % Display a DirectClass or PseudoClass image.\n"
" %\n"
" % Parameters:\n"
" % x & y translation.\n"
" % x & y scale.\n"
" % label pointsize.\n"
" % image label.\n"
" % image columns & rows.\n"
" % class: 0-DirectClass or 1-PseudoClass.\n"
" % compression: 0-none or 1-RunlengthEncoded.\n"
" % hex color packets.\n"
" %\n"
" gsave\n"
" /buffer 512 string def\n"
" /byte 1 string def\n"
" /color_packet 3 string def\n"
" /pixels 768 string def\n"
"\n"
" currentfile buffer readline pop\n"
" token pop /x exch def\n"
" token pop /y exch def pop\n"
" x y translate\n"
" currentfile buffer readline pop\n"
" token pop /x exch def\n"
" token pop /y exch def pop\n"
" currentfile buffer readline pop\n"
" token pop /pointsize exch def pop\n",
PostscriptEpilog[] =
" x y scale\n"
" currentfile buffer readline pop\n"
" token pop /columns exch def\n"
" token pop /rows exch def pop\n"
" currentfile buffer readline pop\n"
" token pop /class exch def pop\n"
" currentfile buffer readline pop\n"
" token pop /compression exch def pop\n"
" class 0 gt { PseudoClassImage } { DirectClassImage } ifelse\n"
" grestore\n";
char
buffer[MaxTextExtent],
date[MaxTextExtent],
**labels,
page_geometry[MaxTextExtent];
CompressionType
compression;
const char
*value;
const StringInfo
*profile;
double
pointsize;
GeometryInfo
geometry_info;
IndexPacket
index;
MagickBooleanType
status;
MagickOffsetType
scene;
MagickStatusType
flags;
PixelPacket
pixel;
PointInfo
delta,
resolution,
scale;
RectangleInfo
geometry,
media_info,
page_info;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
i,
x;
register unsigned char
*q;
SegmentInfo
bounds;
size_t
bit,
byte,
imageListLength,
length,
page,
text_size;
ssize_t
j,
y;
time_t
timer;
unsigned char
pixels[2048];
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
(void) memset(&bounds,0,sizeof(bounds));
compression=image->compression;
if (image_info->compression != UndefinedCompression)
compression=image_info->compression;
page=1;
scene=0;
imageListLength=GetImageListLength(image);
do
{
/*
Scale relative to dots-per-inch.
*/
if (image->colorspace != CMYKColorspace)
(void) TransformImageColorspace(image,sRGBColorspace);
delta.x=DefaultResolution;
delta.y=DefaultResolution;
resolution.x=image->x_resolution;
resolution.y=image->y_resolution;
if ((resolution.x == 0.0) || (resolution.y == 0.0))
{
flags=ParseGeometry(PSDensityGeometry,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
resolution.y=resolution.x;
}
if (image_info->density != (char *) NULL)
{
flags=ParseGeometry(image_info->density,&geometry_info);
resolution.x=geometry_info.rho;
resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
resolution.y=resolution.x;
}
if (image->units == PixelsPerCentimeterResolution)
{
resolution.x=(double) ((size_t) (100.0*2.54*resolution.x+0.5)/100.0);
resolution.y=(double) ((size_t) (100.0*2.54*resolution.y+0.5)/100.0);
}
SetGeometry(image,&geometry);
(void) FormatLocaleString(page_geometry,MaxTextExtent,"%.20gx%.20g",
(double) image->columns,(double) image->rows);
if (image_info->page != (char *) NULL)
(void) CopyMagickString(page_geometry,image_info->page,MaxTextExtent);
else
if ((image->page.width != 0) && (image->page.height != 0))
(void) FormatLocaleString(page_geometry,MaxTextExtent,
"%.20gx%.20g%+.20g%+.20g",(double) image->page.width,(double)
image->page.height,(double) image->page.x,(double) image->page.y);
else
if ((image->gravity != UndefinedGravity) &&
(LocaleCompare(image_info->magick,"PS") == 0))
(void) CopyMagickString(page_geometry,PSPageGeometry,MaxTextExtent);
(void) ConcatenateMagickString(page_geometry,">",MaxTextExtent);
(void) ParseMetaGeometry(page_geometry,&geometry.x,&geometry.y,
&geometry.width,&geometry.height);
scale.x=PerceptibleReciprocal(resolution.x)*geometry.width*delta.x;
geometry.width=(size_t) floor(scale.x+0.5);
scale.y=PerceptibleReciprocal(resolution.y)*geometry.height*delta.y;
geometry.height=(size_t) floor(scale.y+0.5);
(void) ParseAbsoluteGeometry(page_geometry,&media_info);
(void) ParseGravityGeometry(image,page_geometry,&page_info,
&image->exception);
if (image->gravity != UndefinedGravity)
{
geometry.x=(-page_info.x);
geometry.y=(ssize_t) (media_info.height+page_info.y-image->rows);
}
pointsize=12.0;
if (image_info->pointsize != 0.0)
pointsize=image_info->pointsize;
text_size=0;
value=GetImageProperty(image,"label");
if (value != (const char *) NULL)
text_size=(size_t) (MultilineCensus(value)*pointsize+12);
if (page == 1)
{
/*
Output Postscript header.
*/
if (LocaleCompare(image_info->magick,"PS") == 0)
(void) CopyMagickString(buffer,"%!PS-Adobe-3.0\n",MaxTextExtent);
else
(void) CopyMagickString(buffer,"%!PS-Adobe-3.0 EPSF-3.0\n",
MaxTextExtent);
(void) WriteBlobString(image,buffer);
(void) WriteBlobString(image,"%%Creator: (ImageMagick)\n");
(void) FormatLocaleString(buffer,MaxTextExtent,"%%%%Title: (%s)\n",
image->filename);
(void) WriteBlobString(image,buffer);
timer=GetMagickTime();
(void) FormatMagickTime(timer,MaxTextExtent,date);
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%%%CreationDate: (%s)\n",date);
(void) WriteBlobString(image,buffer);
bounds.x1=(double) geometry.x;
bounds.y1=(double) geometry.y;
bounds.x2=(double) geometry.x+scale.x;
bounds.y2=(double) geometry.y+(geometry.height+text_size);
if ((image_info->adjoin != MagickFalse) &&
(GetNextImageInList(image) != (Image *) NULL))
(void) CopyMagickString(buffer,"%%%%BoundingBox: (atend)\n",
MaxTextExtent);
else
{
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%%%BoundingBox: %.20g %.20g %.20g %.20g\n",ceil(bounds.x1-0.5),
ceil(bounds.y1-0.5),floor(bounds.x2+0.5),floor(bounds.y2+0.5));
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%%%HiResBoundingBox: %g %g %g %g\n",bounds.x1,bounds.y1,
bounds.x2,bounds.y2);
}
(void) WriteBlobString(image,buffer);
profile=GetImageProfile(image,"8bim");
if (profile != (StringInfo *) NULL)
{
/*
Embed Photoshop profile.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%BeginPhotoshop: %.20g",(double) GetStringInfoLength(profile));
(void) WriteBlobString(image,buffer);
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++)
{
if ((i % 32) == 0)
(void) WriteBlobString(image,"\n% ");
(void) FormatLocaleString(buffer,MaxTextExtent,"%02X",
(unsigned int) (GetStringInfoDatum(profile)[i] & 0xff));
(void) WriteBlobString(image,buffer);
}
(void) WriteBlobString(image,"\n%EndPhotoshop\n");
}
profile=GetImageProfile(image,"xmp");
DisableMSCWarning(4127)
if (0 && (profile != (StringInfo *) NULL))
RestoreMSCWarning
{
/*
Embed XML profile.
*/
(void) WriteBlobString(image,"\n%begin_xml_code\n");
(void) FormatLocaleString(buffer,MaxTextExtent,
"\n%%begin_xml_packet: %.20g\n",(double)
GetStringInfoLength(profile));
(void) WriteBlobString(image,buffer);
for (i=0; i < (ssize_t) GetStringInfoLength(profile); i++)
(void) WriteBlobByte(image,GetStringInfoDatum(profile)[i]);
(void) WriteBlobString(image,"\n%end_xml_packet\n%end_xml_code\n");
}
value=GetImageProperty(image,"label");
if (value != (const char *) NULL)
(void) WriteBlobString(image,
"%%DocumentNeededResources: font Times-Roman\n");
(void) WriteBlobString(image,"%%DocumentData: Clean7Bit\n");
(void) WriteBlobString(image,"%%LanguageLevel: 1\n");
if (LocaleCompare(image_info->magick,"PS") != 0)
(void) WriteBlobString(image,"%%Pages: 1\n");
else
{
/*
Compute the number of pages.
*/
(void) WriteBlobString(image,"%%Orientation: Portrait\n");
(void) WriteBlobString(image,"%%PageOrder: Ascend\n");
(void) FormatLocaleString(buffer,MaxTextExtent,"%%%%Pages: %.20g\n",
image_info->adjoin != MagickFalse ? (double)
GetImageListLength(image) : 1.0);
(void) WriteBlobString(image,buffer);
}
(void) WriteBlobString(image,"%%EndComments\n");
(void) WriteBlobString(image,"\n%%BeginDefaults\n");
(void) WriteBlobString(image,"%%EndDefaults\n\n");
if ((LocaleCompare(image_info->magick,"EPI") == 0) ||
(LocaleCompare(image_info->magick,"EPSI") == 0) ||
(LocaleCompare(image_info->magick,"EPT") == 0))
{
Image
*preview_image;
Quantum
pixel;
register ssize_t
x;
ssize_t
y;
/*
Create preview image.
*/
preview_image=CloneImage(image,0,0,MagickTrue,&image->exception);
if (preview_image == (Image *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
/*
Dump image as bitmap.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%%%BeginPreview: %.20g %.20g %.20g %.20g\n%% ",(double)
preview_image->columns,(double) preview_image->rows,1.0,
(double) ((((preview_image->columns+7) >> 3)*preview_image->rows+
35)/36));
(void) WriteBlobString(image,buffer);
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(preview_image,0,y,preview_image->columns,1,
&preview_image->exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(preview_image);
bit=0;
byte=0;
for (x=0; x < (ssize_t) preview_image->columns; x++)
{
byte<<=1;
pixel=ClampToQuantum(GetPixelLuma(image,p));
if (pixel >= (Quantum) (QuantumRange/2))
byte|=0x01;
bit++;
if (bit == 8)
{
q=PopHexPixel(hex_digits,byte,q);
if ((q-pixels+8) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
(void) WriteBlobString(image,"% ");
};
bit=0;
byte=0;
}
}
if (bit != 0)
{
byte<<=(8-bit);
q=PopHexPixel(hex_digits,byte,q);
if ((q-pixels+8) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
(void) WriteBlobString(image,"% ");
};
};
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
(void) WriteBlobString(image,"\n%%EndPreview\n");
preview_image=DestroyImage(preview_image);
}
/*
Output Postscript commands.
*/
(void) WriteBlob(image,sizeof(PostscriptProlog)-1,
(const unsigned char *) PostscriptProlog);
value=GetImageProperty(image,"label");
if (value != (const char *) NULL)
{
(void) WriteBlobString(image,
" /Times-Roman findfont pointsize scalefont setfont\n");
for (j=(ssize_t) MultilineCensus(value)-1; j >= 0; j--)
{
(void) WriteBlobString(image," /label 512 string def\n");
(void) WriteBlobString(image,
" currentfile label readline pop\n");
(void) FormatLocaleString(buffer,MaxTextExtent,
" 0 y %g add moveto label show pop\n",j*pointsize+12);
(void) WriteBlobString(image,buffer);
}
}
(void) WriteBlob(image,sizeof(PostscriptEpilog)-1,
(const unsigned char *) PostscriptEpilog);
if (LocaleCompare(image_info->magick,"PS") == 0)
(void) WriteBlobString(image," showpage\n");
(void) WriteBlobString(image,"} bind def\n");
(void) WriteBlobString(image,"%%EndProlog\n");
}
(void) FormatLocaleString(buffer,MaxTextExtent,"%%%%Page: 1 %.20g\n",
(double) (page++));
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%%%PageBoundingBox: %.20g %.20g %.20g %.20g\n",(double) geometry.x,
(double) geometry.y,geometry.x+(double) geometry.width,geometry.y+(double)
(geometry.height+text_size));
(void) WriteBlobString(image,buffer);
if ((double) geometry.x < bounds.x1)
bounds.x1=(double) geometry.x;
if ((double) geometry.y < bounds.y1)
bounds.y1=(double) geometry.y;
if ((double) (geometry.x+geometry.width-1) > bounds.x2)
bounds.x2=(double) geometry.x+geometry.width-1;
if ((double) (geometry.y+(geometry.height+text_size)-1) > bounds.y2)
bounds.y2=(double) geometry.y+(geometry.height+text_size)-1;
value=GetImageProperty(image,"label");
if (value != (const char *) NULL)
(void) WriteBlobString(image,"%%%%PageResources: font Times-Roman\n");
if (LocaleCompare(image_info->magick,"PS") != 0)
(void) WriteBlobString(image,"userdict begin\n");
(void) WriteBlobString(image,"DisplayImage\n");
/*
Output image data.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,"%.20g %.20g\n%g %g\n%g\n",
(double) geometry.x,(double) geometry.y,scale.x,scale.y,pointsize);
(void) WriteBlobString(image,buffer);
labels=(char **) NULL;
value=GetImageProperty(image,"label");
if (value != (const char *) NULL)
labels=StringToList(value);
if (labels != (char **) NULL)
{
for (i=0; labels[i] != (char *) NULL; i++)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"%s \n",
labels[i]);
(void) WriteBlobString(image,buffer);
labels[i]=DestroyString(labels[i]);
}
labels=(char **) RelinquishMagickMemory(labels);
}
(void) memset(&pixel,0,sizeof(pixel));
pixel.opacity=(Quantum) TransparentOpacity;
index=(IndexPacket) 0;
x=0;
if ((image_info->type != TrueColorType) &&
(SetImageGray(image,&image->exception) != MagickFalse))
{
if (SetImageMonochrome(image,&image->exception) == MagickFalse)
{
Quantum
pixel;
/*
Dump image as grayscale.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,
"%.20g %.20g\n1\n1\n1\n8\n",(double) image->columns,(double)
image->rows);
(void) WriteBlobString(image,buffer);
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,
&image->exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=(Quantum) ScaleQuantumToChar(ClampToQuantum(
GetPixelLuma(image,p)));
q=PopHexPixel(hex_digits,(size_t) pixel,q);
if ((q-pixels+8) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
p++;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
}
else
{
ssize_t
y;
Quantum
pixel;
/*
Dump image as bitmap.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,
"%.20g %.20g\n1\n1\n1\n1\n",(double) image->columns,(double)
image->rows);
(void) WriteBlobString(image,buffer);
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,
&image->exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
bit=0;
byte=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
pixel=ClampToQuantum(GetPixelLuma(image,p));
if (pixel >= (Quantum) (QuantumRange/2))
byte|=0x01;
bit++;
if (bit == 8)
{
q=PopHexPixel(hex_digits,byte,q);
if ((q-pixels+2) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
};
bit=0;
byte=0;
}
p++;
}
if (bit != 0)
{
byte<<=(8-bit);
q=PopHexPixel(hex_digits,byte,q);
if ((q-pixels+2) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
};
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType)
y,image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
}
}
else
if ((image->storage_class == DirectClass) ||
(image->colors > 256) || (image->matte != MagickFalse))
{
/*
Dump DirectClass image.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,"%.20g %.20g\n0\n%d\n",
(double) image->columns,(double) image->rows,
compression == RLECompression ? 1 : 0);
(void) WriteBlobString(image,buffer);
switch (compression)
{
case RLECompression:
{
/*
Dump runlength-encoded DirectColor packets.
*/
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,
&image->exception);
if (p == (const PixelPacket *) NULL)
break;
pixel=(*p);
length=255;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((GetPixelRed(p) == pixel.red) &&
(GetPixelGreen(p) == pixel.green) &&
(GetPixelBlue(p) == pixel.blue) &&
(GetPixelOpacity(p) == pixel.opacity) &&
(length < 255) && (x < (ssize_t) (image->columns-1)))
length++;
else
{
if (x > 0)
{
WriteRunlengthPacket(image,pixel,length,p);
if ((q-pixels+10) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
}
length=0;
}
pixel=(*p);
p++;
}
WriteRunlengthPacket(image,pixel,length,p);
if ((q-pixels+10) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,
(MagickOffsetType) y,image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
case NoCompression:
default:
{
/*
Dump uncompressed DirectColor packets.
*/
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,
&image->exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((image->matte != MagickFalse) &&
(GetPixelOpacity(p) == (Quantum) TransparentOpacity))
{
q=PopHexPixel(hex_digits,0xff,q);
q=PopHexPixel(hex_digits,0xff,q);
q=PopHexPixel(hex_digits,0xff,q);
}
else
{
q=PopHexPixel(hex_digits,ScaleQuantumToChar(
GetPixelRed(p)),q);
q=PopHexPixel(hex_digits,ScaleQuantumToChar(
GetPixelGreen(p)),q);
q=PopHexPixel(hex_digits,ScaleQuantumToChar(
GetPixelBlue(p)),q);
}
if ((q-pixels+6) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
p++;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,
(MagickOffsetType) y,image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
}
(void) WriteBlobByte(image,'\n');
}
else
{
/*
Dump PseudoClass image.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,
"%.20g %.20g\n%d\n%d\n0\n",(double) image->columns,(double)
image->rows,image->storage_class == PseudoClass ? 1 : 0,
compression == RLECompression ? 1 : 0);
(void) WriteBlobString(image,buffer);
/*
Dump number of colors and colormap.
*/
(void) FormatLocaleString(buffer,MaxTextExtent,"%.20g\n",(double)
image->colors);
(void) WriteBlobString(image,buffer);
for (i=0; i < (ssize_t) image->colors; i++)
{
(void) FormatLocaleString(buffer,MaxTextExtent,"%02X%02X%02X\n",
ScaleQuantumToChar(image->colormap[i].red),
ScaleQuantumToChar(image->colormap[i].green),
ScaleQuantumToChar(image->colormap[i].blue));
(void) WriteBlobString(image,buffer);
}
switch (compression)
{
case RLECompression:
{
/*
Dump runlength-encoded PseudoColor packets.
*/
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,
&image->exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
index=GetPixelIndex(indexes);
length=255;
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((index == GetPixelIndex(indexes+x)) &&
(length < 255) && (x < ((ssize_t) image->columns-1)))
length++;
else
{
if (x > 0)
{
q=PopHexPixel(hex_digits,(size_t) index,q);
q=PopHexPixel(hex_digits,(size_t)
MagickMin(length,0xff),q);
i++;
if ((q-pixels+6) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
}
length=0;
}
index=GetPixelIndex(indexes+x);
pixel.red=GetPixelRed(p);
pixel.green=GetPixelGreen(p);
pixel.blue=GetPixelBlue(p);
pixel.opacity=GetPixelOpacity(p);
p++;
}
q=PopHexPixel(hex_digits,(size_t) index,q);
q=PopHexPixel(hex_digits,(size_t) MagickMin(length,0xff),q);
if ((q-pixels+6) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,
(MagickOffsetType) y,image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
case NoCompression:
default:
{
/*
Dump uncompressed PseudoColor packets.
*/
q=pixels;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,
&image->exception);
if (p == (const PixelPacket *) NULL)
break;
indexes=GetVirtualIndexQueue(image);
for (x=0; x < (ssize_t) image->columns; x++)
{
q=PopHexPixel(hex_digits,(size_t) GetPixelIndex(
indexes+x),q);
if ((q-pixels+4) >= 80)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
q=pixels;
}
p++;
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,
(MagickOffsetType) y,image->rows);
if (status == MagickFalse)
break;
}
}
if (q != pixels)
{
*q++='\n';
(void) WriteBlob(image,q-pixels,pixels);
}
break;
}
}
(void) WriteBlobByte(image,'\n');
}
if (LocaleCompare(image_info->magick,"PS") != 0)
(void) WriteBlobString(image,"end\n");
(void) WriteBlobString(image,"%%PageTrailer\n");
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength);
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
(void) WriteBlobString(image,"%%Trailer\n");
if (page > 2)
{
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%%%BoundingBox: %.20g %.20g %.20g %.20g\n",ceil(bounds.x1-0.5),
ceil(bounds.y1-0.5),floor(bounds.x2+0.5),floor(bounds.y2+0.5));
(void) WriteBlobString(image,buffer);
(void) FormatLocaleString(buffer,MaxTextExtent,
"%%%%HiResBoundingBox: %g %g %g %g\n",bounds.x1,bounds.y1,
bounds.x2,bounds.y2);
(void) WriteBlobString(image,buffer);
}
(void) WriteBlobString(image,"%%EOF\n");
(void) CloseBlob(image);
return(MagickTrue);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_910_0 |
crossvul-cpp_data_bad_3486_10 | /*
* cp1emu.c: a MIPS coprocessor 1 (fpu) instruction emulator
*
* MIPS floating point support
* Copyright (C) 1994-2000 Algorithmics Ltd.
*
* Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000 MIPS Technologies, Inc.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* A complete emulator for MIPS coprocessor 1 instructions. This is
* required for #float(switch) or #float(trap), where it catches all
* COP1 instructions via the "CoProcessor Unusable" exception.
*
* More surprisingly it is also required for #float(ieee), to help out
* the hardware fpu at the boundaries of the IEEE-754 representation
* (denormalised values, infinities, underflow, etc). It is made
* quite nasty because emulation of some non-COP1 instructions is
* required, e.g. in branch delay slots.
*
* Note if you know that you won't have an fpu, then you'll get much
* better performance by compiling with -msoft-float!
*/
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/perf_event.h>
#include <asm/inst.h>
#include <asm/bootinfo.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/signal.h>
#include <asm/mipsregs.h>
#include <asm/fpu_emulator.h>
#include <asm/uaccess.h>
#include <asm/branch.h>
#include "ieee754.h"
/* Strap kernel emulator for full MIPS IV emulation */
#ifdef __mips
#undef __mips
#endif
#define __mips 4
/* Function which emulates a floating point instruction. */
static int fpu_emu(struct pt_regs *, struct mips_fpu_struct *,
mips_instruction);
#if __mips >= 4 && __mips != 32
static int fpux_emu(struct pt_regs *,
struct mips_fpu_struct *, mips_instruction, void *__user *);
#endif
/* Further private data for which no space exists in mips_fpu_struct */
#ifdef CONFIG_DEBUG_FS
DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
#endif
/* Control registers */
#define FPCREG_RID 0 /* $0 = revision id */
#define FPCREG_CSR 31 /* $31 = csr */
/* Determine rounding mode from the RM bits of the FCSR */
#define modeindex(v) ((v) & FPU_CSR_RM)
/* Convert Mips rounding mode (0..3) to IEEE library modes. */
static const unsigned char ieee_rm[4] = {
[FPU_CSR_RN] = IEEE754_RN,
[FPU_CSR_RZ] = IEEE754_RZ,
[FPU_CSR_RU] = IEEE754_RU,
[FPU_CSR_RD] = IEEE754_RD,
};
/* Convert IEEE library modes to Mips rounding mode (0..3). */
static const unsigned char mips_rm[4] = {
[IEEE754_RN] = FPU_CSR_RN,
[IEEE754_RZ] = FPU_CSR_RZ,
[IEEE754_RD] = FPU_CSR_RD,
[IEEE754_RU] = FPU_CSR_RU,
};
#if __mips >= 4
/* convert condition code register number to csr bit */
static const unsigned int fpucondbit[8] = {
FPU_CSR_COND0,
FPU_CSR_COND1,
FPU_CSR_COND2,
FPU_CSR_COND3,
FPU_CSR_COND4,
FPU_CSR_COND5,
FPU_CSR_COND6,
FPU_CSR_COND7
};
#endif
/*
* Redundant with logic already in kernel/branch.c,
* embedded in compute_return_epc. At some point,
* a single subroutine should be used across both
* modules.
*/
static int isBranchInstr(mips_instruction * i)
{
switch (MIPSInst_OPCODE(*i)) {
case spec_op:
switch (MIPSInst_FUNC(*i)) {
case jalr_op:
case jr_op:
return 1;
}
break;
case bcond_op:
switch (MIPSInst_RT(*i)) {
case bltz_op:
case bgez_op:
case bltzl_op:
case bgezl_op:
case bltzal_op:
case bgezal_op:
case bltzall_op:
case bgezall_op:
return 1;
}
break;
case j_op:
case jal_op:
case jalx_op:
case beq_op:
case bne_op:
case blez_op:
case bgtz_op:
case beql_op:
case bnel_op:
case blezl_op:
case bgtzl_op:
return 1;
case cop0_op:
case cop1_op:
case cop2_op:
case cop1x_op:
if (MIPSInst_RS(*i) == bc_op)
return 1;
break;
}
return 0;
}
/*
* In the Linux kernel, we support selection of FPR format on the
* basis of the Status.FR bit. If an FPU is not present, the FR bit
* is hardwired to zero, which would imply a 32-bit FPU even for
* 64-bit CPUs. For 64-bit kernels with no FPU we use TIF_32BIT_REGS
* as a proxy for the FR bit so that a 64-bit FPU is emulated. In any
* case, for a 32-bit kernel which uses the O32 MIPS ABI, only the
* even FPRs are used (Status.FR = 0).
*/
static inline int cop1_64bit(struct pt_regs *xcp)
{
if (cpu_has_fpu)
return xcp->cp0_status & ST0_FR;
#ifdef CONFIG_64BIT
return !test_thread_flag(TIF_32BIT_REGS);
#else
return 0;
#endif
}
#define SIFROMREG(si, x) ((si) = cop1_64bit(xcp) || !(x & 1) ? \
(int)ctx->fpr[x] : (int)(ctx->fpr[x & ~1] >> 32))
#define SITOREG(si, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = \
cop1_64bit(xcp) || !(x & 1) ? \
ctx->fpr[x & ~1] >> 32 << 32 | (u32)(si) : \
ctx->fpr[x & ~1] << 32 >> 32 | (u64)(si) << 32)
#define DIFROMREG(di, x) ((di) = ctx->fpr[x & ~(cop1_64bit(xcp) == 0)])
#define DITOREG(di, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = (di))
#define SPFROMREG(sp, x) SIFROMREG((sp).bits, x)
#define SPTOREG(sp, x) SITOREG((sp).bits, x)
#define DPFROMREG(dp, x) DIFROMREG((dp).bits, x)
#define DPTOREG(dp, x) DITOREG((dp).bits, x)
/*
* Emulate the single floating point instruction pointed at by EPC.
* Two instructions if the instruction is in a branch delay slot.
*/
static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
void *__user *fault_addr)
{
mips_instruction ir;
unsigned long emulpc, contpc;
unsigned int cond;
if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGBUS;
}
if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGSEGV;
}
/* XXX NEC Vr54xx bug workaround */
if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir))
xcp->cp0_cause &= ~CAUSEF_BD;
if (xcp->cp0_cause & CAUSEF_BD) {
/*
* The instruction to be emulated is in a branch delay slot
* which means that we have to emulate the branch instruction
* BEFORE we do the cop1 instruction.
*
* This branch could be a COP1 branch, but in that case we
* would have had a trap for that instruction, and would not
* come through this route.
*
* Linux MIPS branch emulator operates on context, updating the
* cp0_epc.
*/
emulpc = xcp->cp0_epc + 4; /* Snapshot emulation target */
if (__compute_return_epc(xcp)) {
#ifdef CP1DBG
printk("failed to emulate branch at %p\n",
(void *) (xcp->cp0_epc));
#endif
return SIGILL;
}
if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)emulpc;
return SIGBUS;
}
if (__get_user(ir, (mips_instruction __user *) emulpc)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)emulpc;
return SIGSEGV;
}
/* __compute_return_epc() will have updated cp0_epc */
contpc = xcp->cp0_epc;
/* In order not to confuse ptrace() et al, tweak context */
xcp->cp0_epc = emulpc - 4;
} else {
emulpc = xcp->cp0_epc;
contpc = xcp->cp0_epc + 4;
}
emul:
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, xcp, 0);
MIPS_FPU_EMU_INC_STATS(emulated);
switch (MIPSInst_OPCODE(ir)) {
case ldc1_op:{
u64 __user *va = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
u64 val;
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(VERIFY_READ, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
DITOREG(val, MIPSInst_RT(ir));
break;
}
case sdc1_op:{
u64 __user *va = (u64 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
u64 val;
MIPS_FPU_EMU_INC_STATS(stores);
DIFROMREG(val, MIPSInst_RT(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
}
case lwc1_op:{
u32 __user *va = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
u32 val;
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(VERIFY_READ, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
SITOREG(val, MIPSInst_RT(ir));
break;
}
case swc1_op:{
u32 __user *va = (u32 __user *) (xcp->regs[MIPSInst_RS(ir)] +
MIPSInst_SIMM(ir));
u32 val;
MIPS_FPU_EMU_INC_STATS(stores);
SIFROMREG(val, MIPSInst_RT(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
}
case cop1_op:
switch (MIPSInst_RS(ir)) {
#if defined(__mips64)
case dmfc_op:
/* copregister fs -> gpr[rt] */
if (MIPSInst_RT(ir) != 0) {
DIFROMREG(xcp->regs[MIPSInst_RT(ir)],
MIPSInst_RD(ir));
}
break;
case dmtc_op:
/* copregister fs <- rt */
DITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
break;
#endif
case mfc_op:
/* copregister rd -> gpr[rt] */
if (MIPSInst_RT(ir) != 0) {
SIFROMREG(xcp->regs[MIPSInst_RT(ir)],
MIPSInst_RD(ir));
}
break;
case mtc_op:
/* copregister rd <- rt */
SITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
break;
case cfc_op:{
/* cop control register rd -> gpr[rt] */
u32 value;
if (MIPSInst_RD(ir) == FPCREG_CSR) {
value = ctx->fcr31;
value = (value & ~FPU_CSR_RM) |
mips_rm[modeindex(value)];
#ifdef CSRTRACE
printk("%p gpr[%d]<-csr=%08x\n",
(void *) (xcp->cp0_epc),
MIPSInst_RT(ir), value);
#endif
}
else if (MIPSInst_RD(ir) == FPCREG_RID)
value = 0;
else
value = 0;
if (MIPSInst_RT(ir))
xcp->regs[MIPSInst_RT(ir)] = value;
break;
}
case ctc_op:{
/* copregister rd <- rt */
u32 value;
if (MIPSInst_RT(ir) == 0)
value = 0;
else
value = xcp->regs[MIPSInst_RT(ir)];
/* we only have one writable control reg
*/
if (MIPSInst_RD(ir) == FPCREG_CSR) {
#ifdef CSRTRACE
printk("%p gpr[%d]->csr=%08x\n",
(void *) (xcp->cp0_epc),
MIPSInst_RT(ir), value);
#endif
/*
* Don't write reserved bits,
* and convert to ieee library modes
*/
ctx->fcr31 = (value &
~(FPU_CSR_RSVD | FPU_CSR_RM)) |
ieee_rm[modeindex(value)];
}
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
return SIGFPE;
}
break;
}
case bc_op:{
int likely = 0;
if (xcp->cp0_cause & CAUSEF_BD)
return SIGILL;
#if __mips >= 4
cond = ctx->fcr31 & fpucondbit[MIPSInst_RT(ir) >> 2];
#else
cond = ctx->fcr31 & FPU_CSR_COND;
#endif
switch (MIPSInst_RT(ir) & 3) {
case bcfl_op:
likely = 1;
case bcf_op:
cond = !cond;
break;
case bctl_op:
likely = 1;
case bct_op:
break;
default:
/* thats an illegal instruction */
return SIGILL;
}
xcp->cp0_cause |= CAUSEF_BD;
if (cond) {
/* branch taken: emulate dslot
* instruction
*/
xcp->cp0_epc += 4;
contpc = (xcp->cp0_epc +
(MIPSInst_SIMM(ir) << 2));
if (!access_ok(VERIFY_READ, xcp->cp0_epc,
sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGBUS;
}
if (__get_user(ir,
(mips_instruction __user *) xcp->cp0_epc)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGSEGV;
}
switch (MIPSInst_OPCODE(ir)) {
case lwc1_op:
case swc1_op:
#if (__mips >= 2 || defined(__mips64))
case ldc1_op:
case sdc1_op:
#endif
case cop1_op:
#if __mips >= 4 && __mips != 32
case cop1x_op:
#endif
/* its one of ours */
goto emul;
#if __mips >= 4
case spec_op:
if (MIPSInst_FUNC(ir) == movc_op)
goto emul;
break;
#endif
}
/*
* Single step the non-cp1
* instruction in the dslot
*/
return mips_dsemul(xcp, ir, contpc);
}
else {
/* branch not taken */
if (likely) {
/*
* branch likely nullifies
* dslot if not taken
*/
xcp->cp0_epc += 4;
contpc += 4;
/*
* else continue & execute
* dslot as normal insn
*/
}
}
break;
}
default:
if (!(MIPSInst_RS(ir) & 0x10))
return SIGILL;
{
int sig;
/* a real fpu computation instruction */
if ((sig = fpu_emu(xcp, ctx, ir)))
return sig;
}
}
break;
#if __mips >= 4 && __mips != 32
case cop1x_op:{
int sig = fpux_emu(xcp, ctx, ir, fault_addr);
if (sig)
return sig;
break;
}
#endif
#if __mips >= 4
case spec_op:
if (MIPSInst_FUNC(ir) != movc_op)
return SIGILL;
cond = fpucondbit[MIPSInst_RT(ir) >> 2];
if (((ctx->fcr31 & cond) != 0) == ((MIPSInst_RT(ir) & 1) != 0))
xcp->regs[MIPSInst_RD(ir)] =
xcp->regs[MIPSInst_RS(ir)];
break;
#endif
default:
return SIGILL;
}
/* we did it !! */
xcp->cp0_epc = contpc;
xcp->cp0_cause &= ~CAUSEF_BD;
return 0;
}
/*
* Conversion table from MIPS compare ops 48-63
* cond = ieee754dp_cmp(x,y,IEEE754_UN,sig);
*/
static const unsigned char cmptab[8] = {
0, /* cmp_0 (sig) cmp_sf */
IEEE754_CUN, /* cmp_un (sig) cmp_ngle */
IEEE754_CEQ, /* cmp_eq (sig) cmp_seq */
IEEE754_CEQ | IEEE754_CUN, /* cmp_ueq (sig) cmp_ngl */
IEEE754_CLT, /* cmp_olt (sig) cmp_lt */
IEEE754_CLT | IEEE754_CUN, /* cmp_ult (sig) cmp_nge */
IEEE754_CLT | IEEE754_CEQ, /* cmp_ole (sig) cmp_le */
IEEE754_CLT | IEEE754_CEQ | IEEE754_CUN, /* cmp_ule (sig) cmp_ngt */
};
#if __mips >= 4 && __mips != 32
/*
* Additional MIPS4 instructions
*/
#define DEF3OP(name, p, f1, f2, f3) \
static ieee754##p fpemu_##p##_##name(ieee754##p r, ieee754##p s, \
ieee754##p t) \
{ \
struct _ieee754_csr ieee754_csr_save; \
s = f1(s, t); \
ieee754_csr_save = ieee754_csr; \
s = f2(s, r); \
ieee754_csr_save.cx |= ieee754_csr.cx; \
ieee754_csr_save.sx |= ieee754_csr.sx; \
s = f3(s); \
ieee754_csr.cx |= ieee754_csr_save.cx; \
ieee754_csr.sx |= ieee754_csr_save.sx; \
return s; \
}
static ieee754dp fpemu_dp_recip(ieee754dp d)
{
return ieee754dp_div(ieee754dp_one(0), d);
}
static ieee754dp fpemu_dp_rsqrt(ieee754dp d)
{
return ieee754dp_div(ieee754dp_one(0), ieee754dp_sqrt(d));
}
static ieee754sp fpemu_sp_recip(ieee754sp s)
{
return ieee754sp_div(ieee754sp_one(0), s);
}
static ieee754sp fpemu_sp_rsqrt(ieee754sp s)
{
return ieee754sp_div(ieee754sp_one(0), ieee754sp_sqrt(s));
}
DEF3OP(madd, sp, ieee754sp_mul, ieee754sp_add, );
DEF3OP(msub, sp, ieee754sp_mul, ieee754sp_sub, );
DEF3OP(nmadd, sp, ieee754sp_mul, ieee754sp_add, ieee754sp_neg);
DEF3OP(nmsub, sp, ieee754sp_mul, ieee754sp_sub, ieee754sp_neg);
DEF3OP(madd, dp, ieee754dp_mul, ieee754dp_add, );
DEF3OP(msub, dp, ieee754dp_mul, ieee754dp_sub, );
DEF3OP(nmadd, dp, ieee754dp_mul, ieee754dp_add, ieee754dp_neg);
DEF3OP(nmsub, dp, ieee754dp_mul, ieee754dp_sub, ieee754dp_neg);
static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
mips_instruction ir, void *__user *fault_addr)
{
unsigned rcsr = 0; /* resulting csr */
MIPS_FPU_EMU_INC_STATS(cp1xops);
switch (MIPSInst_FMA_FFMT(ir)) {
case s_fmt:{ /* 0 */
ieee754sp(*handler) (ieee754sp, ieee754sp, ieee754sp);
ieee754sp fd, fr, fs, ft;
u32 __user *va;
u32 val;
switch (MIPSInst_FUNC(ir)) {
case lwxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(VERIFY_READ, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
SITOREG(val, MIPSInst_FD(ir));
break;
case swxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(stores);
SIFROMREG(val, MIPSInst_FS(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
case madd_s_op:
handler = fpemu_sp_madd;
goto scoptop;
case msub_s_op:
handler = fpemu_sp_msub;
goto scoptop;
case nmadd_s_op:
handler = fpemu_sp_nmadd;
goto scoptop;
case nmsub_s_op:
handler = fpemu_sp_nmsub;
goto scoptop;
scoptop:
SPFROMREG(fr, MIPSInst_FR(ir));
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(ft, MIPSInst_FT(ir));
fd = (*handler) (fr, fs, ft);
SPTOREG(fd, MIPSInst_FD(ir));
copcsr:
if (ieee754_cxtest(IEEE754_INEXACT))
rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
if (ieee754_cxtest(IEEE754_UNDERFLOW))
rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
if (ieee754_cxtest(IEEE754_OVERFLOW))
rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
if (ieee754_cxtest(IEEE754_INVALID_OPERATION))
rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
/*printk ("SIGFPE: fpu csr = %08x\n",
ctx->fcr31); */
return SIGFPE;
}
break;
default:
return SIGILL;
}
break;
}
case d_fmt:{ /* 1 */
ieee754dp(*handler) (ieee754dp, ieee754dp, ieee754dp);
ieee754dp fd, fr, fs, ft;
u64 __user *va;
u64 val;
switch (MIPSInst_FUNC(ir)) {
case ldxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(loads);
if (!access_ok(VERIFY_READ, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__get_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
DITOREG(val, MIPSInst_FD(ir));
break;
case sdxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(stores);
DIFROMREG(val, MIPSInst_FS(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGBUS;
}
if (__put_user(val, va)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
return SIGSEGV;
}
break;
case madd_d_op:
handler = fpemu_dp_madd;
goto dcoptop;
case msub_d_op:
handler = fpemu_dp_msub;
goto dcoptop;
case nmadd_d_op:
handler = fpemu_dp_nmadd;
goto dcoptop;
case nmsub_d_op:
handler = fpemu_dp_nmsub;
goto dcoptop;
dcoptop:
DPFROMREG(fr, MIPSInst_FR(ir));
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(ft, MIPSInst_FT(ir));
fd = (*handler) (fr, fs, ft);
DPTOREG(fd, MIPSInst_FD(ir));
goto copcsr;
default:
return SIGILL;
}
break;
}
case 0x7: /* 7 */
if (MIPSInst_FUNC(ir) != pfetch_op) {
return SIGILL;
}
/* ignore prefx operation */
break;
default:
return SIGILL;
}
return 0;
}
#endif
/*
* Emulate a single COP1 arithmetic instruction.
*/
static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
mips_instruction ir)
{
int rfmt; /* resulting format */
unsigned rcsr = 0; /* resulting csr */
unsigned cond;
union {
ieee754dp d;
ieee754sp s;
int w;
#ifdef __mips64
s64 l;
#endif
} rv; /* resulting value */
MIPS_FPU_EMU_INC_STATS(cp1ops);
switch (rfmt = (MIPSInst_FFMT(ir) & 0xf)) {
case s_fmt:{ /* 0 */
union {
ieee754sp(*b) (ieee754sp, ieee754sp);
ieee754sp(*u) (ieee754sp);
} handler;
switch (MIPSInst_FUNC(ir)) {
/* binary ops */
case fadd_op:
handler.b = ieee754sp_add;
goto scopbop;
case fsub_op:
handler.b = ieee754sp_sub;
goto scopbop;
case fmul_op:
handler.b = ieee754sp_mul;
goto scopbop;
case fdiv_op:
handler.b = ieee754sp_div;
goto scopbop;
/* unary ops */
#if __mips >= 2 || defined(__mips64)
case fsqrt_op:
handler.u = ieee754sp_sqrt;
goto scopuop;
#endif
#if __mips >= 4 && __mips != 32
case frsqrt_op:
handler.u = fpemu_sp_rsqrt;
goto scopuop;
case frecip_op:
handler.u = fpemu_sp_recip;
goto scopuop;
#endif
#if __mips >= 4
case fmovc_op:
cond = fpucondbit[MIPSInst_FT(ir) >> 2];
if (((ctx->fcr31 & cond) != 0) !=
((MIPSInst_FT(ir) & 1) != 0))
return 0;
SPFROMREG(rv.s, MIPSInst_FS(ir));
break;
case fmovz_op:
if (xcp->regs[MIPSInst_FT(ir)] != 0)
return 0;
SPFROMREG(rv.s, MIPSInst_FS(ir));
break;
case fmovn_op:
if (xcp->regs[MIPSInst_FT(ir)] == 0)
return 0;
SPFROMREG(rv.s, MIPSInst_FS(ir));
break;
#endif
case fabs_op:
handler.u = ieee754sp_abs;
goto scopuop;
case fneg_op:
handler.u = ieee754sp_neg;
goto scopuop;
case fmov_op:
/* an easy one */
SPFROMREG(rv.s, MIPSInst_FS(ir));
goto copcsr;
/* binary op on handler */
scopbop:
{
ieee754sp fs, ft;
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(ft, MIPSInst_FT(ir));
rv.s = (*handler.b) (fs, ft);
goto copcsr;
}
scopuop:
{
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = (*handler.u) (fs);
goto copcsr;
}
copcsr:
if (ieee754_cxtest(IEEE754_INEXACT))
rcsr |= FPU_CSR_INE_X | FPU_CSR_INE_S;
if (ieee754_cxtest(IEEE754_UNDERFLOW))
rcsr |= FPU_CSR_UDF_X | FPU_CSR_UDF_S;
if (ieee754_cxtest(IEEE754_OVERFLOW))
rcsr |= FPU_CSR_OVF_X | FPU_CSR_OVF_S;
if (ieee754_cxtest(IEEE754_ZERO_DIVIDE))
rcsr |= FPU_CSR_DIV_X | FPU_CSR_DIV_S;
if (ieee754_cxtest(IEEE754_INVALID_OPERATION))
rcsr |= FPU_CSR_INV_X | FPU_CSR_INV_S;
break;
/* unary conv ops */
case fcvts_op:
return SIGILL; /* not defined */
case fcvtd_op:{
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fsp(fs);
rfmt = d_fmt;
goto copcsr;
}
case fcvtw_op:{
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
rv.w = ieee754sp_tint(fs);
rfmt = w_fmt;
goto copcsr;
}
#if __mips >= 2 || defined(__mips64)
case fround_op:
case ftrunc_op:
case fceil_op:
case ffloor_op:{
unsigned int oldrm = ieee754_csr.rm;
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.w = ieee754sp_tint(fs);
ieee754_csr.rm = oldrm;
rfmt = w_fmt;
goto copcsr;
}
#endif /* __mips >= 2 */
#if defined(__mips64)
case fcvtl_op:{
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
rv.l = ieee754sp_tlong(fs);
rfmt = l_fmt;
goto copcsr;
}
case froundl_op:
case ftruncl_op:
case fceill_op:
case ffloorl_op:{
unsigned int oldrm = ieee754_csr.rm;
ieee754sp fs;
SPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.l = ieee754sp_tlong(fs);
ieee754_csr.rm = oldrm;
rfmt = l_fmt;
goto copcsr;
}
#endif /* defined(__mips64) */
default:
if (MIPSInst_FUNC(ir) >= fcmp_op) {
unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
ieee754sp fs, ft;
SPFROMREG(fs, MIPSInst_FS(ir));
SPFROMREG(ft, MIPSInst_FT(ir));
rv.w = ieee754sp_cmp(fs, ft,
cmptab[cmpop & 0x7], cmpop & 0x8);
rfmt = -1;
if ((cmpop & 0x8) && ieee754_cxtest
(IEEE754_INVALID_OPERATION))
rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
else
goto copcsr;
}
else {
return SIGILL;
}
break;
}
break;
}
case d_fmt:{
union {
ieee754dp(*b) (ieee754dp, ieee754dp);
ieee754dp(*u) (ieee754dp);
} handler;
switch (MIPSInst_FUNC(ir)) {
/* binary ops */
case fadd_op:
handler.b = ieee754dp_add;
goto dcopbop;
case fsub_op:
handler.b = ieee754dp_sub;
goto dcopbop;
case fmul_op:
handler.b = ieee754dp_mul;
goto dcopbop;
case fdiv_op:
handler.b = ieee754dp_div;
goto dcopbop;
/* unary ops */
#if __mips >= 2 || defined(__mips64)
case fsqrt_op:
handler.u = ieee754dp_sqrt;
goto dcopuop;
#endif
#if __mips >= 4 && __mips != 32
case frsqrt_op:
handler.u = fpemu_dp_rsqrt;
goto dcopuop;
case frecip_op:
handler.u = fpemu_dp_recip;
goto dcopuop;
#endif
#if __mips >= 4
case fmovc_op:
cond = fpucondbit[MIPSInst_FT(ir) >> 2];
if (((ctx->fcr31 & cond) != 0) !=
((MIPSInst_FT(ir) & 1) != 0))
return 0;
DPFROMREG(rv.d, MIPSInst_FS(ir));
break;
case fmovz_op:
if (xcp->regs[MIPSInst_FT(ir)] != 0)
return 0;
DPFROMREG(rv.d, MIPSInst_FS(ir));
break;
case fmovn_op:
if (xcp->regs[MIPSInst_FT(ir)] == 0)
return 0;
DPFROMREG(rv.d, MIPSInst_FS(ir));
break;
#endif
case fabs_op:
handler.u = ieee754dp_abs;
goto dcopuop;
case fneg_op:
handler.u = ieee754dp_neg;
goto dcopuop;
case fmov_op:
/* an easy one */
DPFROMREG(rv.d, MIPSInst_FS(ir));
goto copcsr;
/* binary op on handler */
dcopbop:{
ieee754dp fs, ft;
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(ft, MIPSInst_FT(ir));
rv.d = (*handler.b) (fs, ft);
goto copcsr;
}
dcopuop:{
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
rv.d = (*handler.u) (fs);
goto copcsr;
}
/* unary conv ops */
case fcvts_op:{
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fdp(fs);
rfmt = s_fmt;
goto copcsr;
}
case fcvtd_op:
return SIGILL; /* not defined */
case fcvtw_op:{
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
rv.w = ieee754dp_tint(fs); /* wrong */
rfmt = w_fmt;
goto copcsr;
}
#if __mips >= 2 || defined(__mips64)
case fround_op:
case ftrunc_op:
case fceil_op:
case ffloor_op:{
unsigned int oldrm = ieee754_csr.rm;
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.w = ieee754dp_tint(fs);
ieee754_csr.rm = oldrm;
rfmt = w_fmt;
goto copcsr;
}
#endif
#if defined(__mips64)
case fcvtl_op:{
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
rv.l = ieee754dp_tlong(fs);
rfmt = l_fmt;
goto copcsr;
}
case froundl_op:
case ftruncl_op:
case fceill_op:
case ffloorl_op:{
unsigned int oldrm = ieee754_csr.rm;
ieee754dp fs;
DPFROMREG(fs, MIPSInst_FS(ir));
ieee754_csr.rm = ieee_rm[modeindex(MIPSInst_FUNC(ir))];
rv.l = ieee754dp_tlong(fs);
ieee754_csr.rm = oldrm;
rfmt = l_fmt;
goto copcsr;
}
#endif /* __mips >= 3 */
default:
if (MIPSInst_FUNC(ir) >= fcmp_op) {
unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
ieee754dp fs, ft;
DPFROMREG(fs, MIPSInst_FS(ir));
DPFROMREG(ft, MIPSInst_FT(ir));
rv.w = ieee754dp_cmp(fs, ft,
cmptab[cmpop & 0x7], cmpop & 0x8);
rfmt = -1;
if ((cmpop & 0x8)
&&
ieee754_cxtest
(IEEE754_INVALID_OPERATION))
rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
else
goto copcsr;
}
else {
return SIGILL;
}
break;
}
break;
}
case w_fmt:{
ieee754sp fs;
switch (MIPSInst_FUNC(ir)) {
case fcvts_op:
/* convert word to single precision real */
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fint(fs.bits);
rfmt = s_fmt;
goto copcsr;
case fcvtd_op:
/* convert word to double precision real */
SPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fint(fs.bits);
rfmt = d_fmt;
goto copcsr;
default:
return SIGILL;
}
break;
}
#if defined(__mips64)
case l_fmt:{
switch (MIPSInst_FUNC(ir)) {
case fcvts_op:
/* convert long to single precision real */
rv.s = ieee754sp_flong(ctx->fpr[MIPSInst_FS(ir)]);
rfmt = s_fmt;
goto copcsr;
case fcvtd_op:
/* convert long to double precision real */
rv.d = ieee754dp_flong(ctx->fpr[MIPSInst_FS(ir)]);
rfmt = d_fmt;
goto copcsr;
default:
return SIGILL;
}
break;
}
#endif
default:
return SIGILL;
}
/*
* Update the fpu CSR register for this operation.
* If an exception is required, generate a tidy SIGFPE exception,
* without updating the result register.
* Note: cause exception bits do not accumulate, they are rewritten
* for each op; only the flag/sticky bits accumulate.
*/
ctx->fcr31 = (ctx->fcr31 & ~FPU_CSR_ALL_X) | rcsr;
if ((ctx->fcr31 >> 5) & ctx->fcr31 & FPU_CSR_ALL_E) {
/*printk ("SIGFPE: fpu csr = %08x\n",ctx->fcr31); */
return SIGFPE;
}
/*
* Now we can safely write the result back to the register file.
*/
switch (rfmt) {
case -1:{
#if __mips >= 4
cond = fpucondbit[MIPSInst_FD(ir) >> 2];
#else
cond = FPU_CSR_COND;
#endif
if (rv.w)
ctx->fcr31 |= cond;
else
ctx->fcr31 &= ~cond;
break;
}
case d_fmt:
DPTOREG(rv.d, MIPSInst_FD(ir));
break;
case s_fmt:
SPTOREG(rv.s, MIPSInst_FD(ir));
break;
case w_fmt:
SITOREG(rv.w, MIPSInst_FD(ir));
break;
#if defined(__mips64)
case l_fmt:
DITOREG(rv.l, MIPSInst_FD(ir));
break;
#endif
default:
return SIGILL;
}
return 0;
}
int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
int has_fpu, void *__user *fault_addr)
{
unsigned long oldepc, prevepc;
mips_instruction insn;
int sig = 0;
oldepc = xcp->cp0_epc;
do {
prevepc = xcp->cp0_epc;
if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGBUS;
}
if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = (mips_instruction __user *)xcp->cp0_epc;
return SIGSEGV;
}
if (insn == 0)
xcp->cp0_epc += 4; /* skip nops */
else {
/*
* The 'ieee754_csr' is an alias of
* ctx->fcr31. No need to copy ctx->fcr31 to
* ieee754_csr. But ieee754_csr.rm is ieee
* library modes. (not mips rounding mode)
*/
/* convert to ieee library modes */
ieee754_csr.rm = ieee_rm[ieee754_csr.rm];
sig = cop1Emulate(xcp, ctx, fault_addr);
/* revert to mips rounding mode */
ieee754_csr.rm = mips_rm[ieee754_csr.rm];
}
if (has_fpu)
break;
if (sig)
break;
cond_resched();
} while (xcp->cp0_epc > prevepc);
/* SIGILL indicates a non-fpu instruction */
if (sig == SIGILL && xcp->cp0_epc != oldepc)
/* but if epc has advanced, then ignore it */
sig = 0;
return sig;
}
#ifdef CONFIG_DEBUG_FS
static int fpuemu_stat_get(void *data, u64 *val)
{
int cpu;
unsigned long sum = 0;
for_each_online_cpu(cpu) {
struct mips_fpu_emulator_stats *ps;
local_t *pv;
ps = &per_cpu(fpuemustats, cpu);
pv = (void *)ps + (unsigned long)data;
sum += local_read(pv);
}
*val = sum;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_fpuemu_stat, fpuemu_stat_get, NULL, "%llu\n");
extern struct dentry *mips_debugfs_dir;
static int __init debugfs_fpuemu(void)
{
struct dentry *d, *dir;
if (!mips_debugfs_dir)
return -ENODEV;
dir = debugfs_create_dir("fpuemustats", mips_debugfs_dir);
if (!dir)
return -ENOMEM;
#define FPU_STAT_CREATE(M) \
do { \
d = debugfs_create_file(#M , S_IRUGO, dir, \
(void *)offsetof(struct mips_fpu_emulator_stats, M), \
&fops_fpuemu_stat); \
if (!d) \
return -ENOMEM; \
} while (0)
FPU_STAT_CREATE(emulated);
FPU_STAT_CREATE(loads);
FPU_STAT_CREATE(stores);
FPU_STAT_CREATE(cp1ops);
FPU_STAT_CREATE(cp1xops);
FPU_STAT_CREATE(errors);
return 0;
}
__initcall(debugfs_fpuemu);
#endif
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3486_10 |
crossvul-cpp_data_good_3575_0 | /*
* Linux NET3: Internet Group Management Protocol [IGMP]
*
* This code implements the IGMP protocol as defined in RFC1112. There has
* been a further revision of this protocol since which is now supported.
*
* If you have trouble with this module be careful what gcc you have used,
* the older version didn't come out right using gcc 2.5.8, the newer one
* seems to fall out with gcc 2.6.2.
*
* Authors:
* Alan Cox <alan@lxorguk.ukuu.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Fixes:
*
* Alan Cox : Added lots of __inline__ to optimise
* the memory usage of all the tiny little
* functions.
* Alan Cox : Dumped the header building experiment.
* Alan Cox : Minor tweaks ready for multicast routing
* and extended IGMP protocol.
* Alan Cox : Removed a load of inline directives. Gcc 2.5.8
* writes utterly bogus code otherwise (sigh)
* fixed IGMP loopback to behave in the manner
* desired by mrouted, fixed the fact it has been
* broken since 1.3.6 and cleaned up a few minor
* points.
*
* Chih-Jen Chang : Tried to revise IGMP to Version 2
* Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu
* The enhancements are mainly based on Steve Deering's
* ipmulti-3.5 source code.
* Chih-Jen Chang : Added the igmp_get_mrouter_info and
* Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of
* the mrouted version on that device.
* Chih-Jen Chang : Added the max_resp_time parameter to
* Tsu-Sheng Tsao igmp_heard_query(). Using this parameter
* to identify the multicast router version
* and do what the IGMP version 2 specified.
* Chih-Jen Chang : Added a timer to revert to IGMP V2 router
* Tsu-Sheng Tsao if the specified time expired.
* Alan Cox : Stop IGMP from 0.0.0.0 being accepted.
* Alan Cox : Use GFP_ATOMIC in the right places.
* Christian Daudt : igmp timer wasn't set for local group
* memberships but was being deleted,
* which caused a "del_timer() called
* from %p with timer not initialized\n"
* message (960131).
* Christian Daudt : removed del_timer from
* igmp_timer_expire function (960205).
* Christian Daudt : igmp_heard_report now only calls
* igmp_timer_expire if tm->running is
* true (960216).
* Malcolm Beattie : ttl comparison wrong in igmp_rcv made
* igmp_heard_query never trigger. Expiry
* miscalculation fixed in igmp_heard_query
* and random() made to return unsigned to
* prevent negative expiry times.
* Alexey Kuznetsov: Wrong group leaving behaviour, backport
* fix from pending 2.1.x patches.
* Alan Cox: Forget to enable FDDI support earlier.
* Alexey Kuznetsov: Fixed leaving groups on device down.
* Alexey Kuznetsov: Accordance to igmp-v2-06 draft.
* David L Stevens: IGMPv3 support, with help from
* Vinay Kulkarni
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/times.h>
#include <net/net_namespace.h>
#include <net/arp.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
#include <net/sock.h>
#include <net/checksum.h>
#include <linux/netfilter_ipv4.h>
#ifdef CONFIG_IP_MROUTE
#include <linux/mroute.h>
#endif
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#endif
#define IP_MAX_MEMBERSHIPS 20
#define IP_MAX_MSF 10
#ifdef CONFIG_IP_MULTICAST
/* Parameter names and values are taken from igmp-v2-06 draft */
#define IGMP_V1_Router_Present_Timeout (400*HZ)
#define IGMP_V2_Router_Present_Timeout (400*HZ)
#define IGMP_Unsolicited_Report_Interval (10*HZ)
#define IGMP_Query_Response_Interval (10*HZ)
#define IGMP_Unsolicited_Report_Count 2
#define IGMP_Initial_Report_Delay (1)
/* IGMP_Initial_Report_Delay is not from IGMP specs!
* IGMP specs require to report membership immediately after
* joining a group, but we delay the first report by a
* small interval. It seems more natural and still does not
* contradict to specs provided this delay is small enough.
*/
#define IGMP_V1_SEEN(in_dev) \
(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \
((in_dev)->mr_v1_seen && \
time_before(jiffies, (in_dev)->mr_v1_seen)))
#define IGMP_V2_SEEN(in_dev) \
(IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \
((in_dev)->mr_v2_seen && \
time_before(jiffies, (in_dev)->mr_v2_seen)))
static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
static void igmpv3_clear_delrec(struct in_device *in_dev);
static int sf_setstate(struct ip_mc_list *pmc);
static void sf_markstate(struct ip_mc_list *pmc);
#endif
static void ip_mc_clear_src(struct ip_mc_list *pmc);
static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
int sfcount, __be32 *psfsrc, int delta);
static void ip_ma_put(struct ip_mc_list *im)
{
if (atomic_dec_and_test(&im->refcnt)) {
in_dev_put(im->interface);
kfree_rcu(im, rcu);
}
}
#define for_each_pmc_rcu(in_dev, pmc) \
for (pmc = rcu_dereference(in_dev->mc_list); \
pmc != NULL; \
pmc = rcu_dereference(pmc->next_rcu))
#define for_each_pmc_rtnl(in_dev, pmc) \
for (pmc = rtnl_dereference(in_dev->mc_list); \
pmc != NULL; \
pmc = rtnl_dereference(pmc->next_rcu))
#ifdef CONFIG_IP_MULTICAST
/*
* Timer management
*/
static void igmp_stop_timer(struct ip_mc_list *im)
{
spin_lock_bh(&im->lock);
if (del_timer(&im->timer))
atomic_dec(&im->refcnt);
im->tm_running = 0;
im->reporter = 0;
im->unsolicit_count = 0;
spin_unlock_bh(&im->lock);
}
/* It must be called with locked im->lock */
static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
{
int tv = net_random() % max_delay;
im->tm_running = 1;
if (!mod_timer(&im->timer, jiffies+tv+2))
atomic_inc(&im->refcnt);
}
static void igmp_gq_start_timer(struct in_device *in_dev)
{
int tv = net_random() % in_dev->mr_maxdelay;
in_dev->mr_gq_running = 1;
if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
in_dev_hold(in_dev);
}
static void igmp_ifc_start_timer(struct in_device *in_dev, int delay)
{
int tv = net_random() % delay;
if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2))
in_dev_hold(in_dev);
}
static void igmp_mod_timer(struct ip_mc_list *im, int max_delay)
{
spin_lock_bh(&im->lock);
im->unsolicit_count = 0;
if (del_timer(&im->timer)) {
if ((long)(im->timer.expires-jiffies) < max_delay) {
add_timer(&im->timer);
im->tm_running = 1;
spin_unlock_bh(&im->lock);
return;
}
atomic_dec(&im->refcnt);
}
igmp_start_timer(im, max_delay);
spin_unlock_bh(&im->lock);
}
/*
* Send an IGMP report.
*/
#define IGMP_SIZE (sizeof(struct igmphdr)+sizeof(struct iphdr)+4)
static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type,
int gdeleted, int sdeleted)
{
switch (type) {
case IGMPV3_MODE_IS_INCLUDE:
case IGMPV3_MODE_IS_EXCLUDE:
if (gdeleted || sdeleted)
return 0;
if (!(pmc->gsquery && !psf->sf_gsresp)) {
if (pmc->sfmode == MCAST_INCLUDE)
return 1;
/* don't include if this source is excluded
* in all filters
*/
if (psf->sf_count[MCAST_INCLUDE])
return type == IGMPV3_MODE_IS_INCLUDE;
return pmc->sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
}
return 0;
case IGMPV3_CHANGE_TO_INCLUDE:
if (gdeleted || sdeleted)
return 0;
return psf->sf_count[MCAST_INCLUDE] != 0;
case IGMPV3_CHANGE_TO_EXCLUDE:
if (gdeleted || sdeleted)
return 0;
if (pmc->sfcount[MCAST_EXCLUDE] == 0 ||
psf->sf_count[MCAST_INCLUDE])
return 0;
return pmc->sfcount[MCAST_EXCLUDE] ==
psf->sf_count[MCAST_EXCLUDE];
case IGMPV3_ALLOW_NEW_SOURCES:
if (gdeleted || !psf->sf_crcount)
return 0;
return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted;
case IGMPV3_BLOCK_OLD_SOURCES:
if (pmc->sfmode == MCAST_INCLUDE)
return gdeleted || (psf->sf_crcount && sdeleted);
return psf->sf_crcount && !gdeleted && !sdeleted;
}
return 0;
}
static int
igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
{
struct ip_sf_list *psf;
int scount = 0;
for (psf=pmc->sources; psf; psf=psf->sf_next) {
if (!is_in(pmc, psf, type, gdeleted, sdeleted))
continue;
scount++;
}
return scount;
}
#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb))
static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
{
struct sk_buff *skb;
struct rtable *rt;
struct iphdr *pip;
struct igmpv3_report *pig;
struct net *net = dev_net(dev);
struct flowi4 fl4;
int hlen = LL_RESERVED_SPACE(dev);
int tlen = dev->needed_tailroom;
while (1) {
skb = alloc_skb(size + hlen + tlen,
GFP_ATOMIC | __GFP_NOWARN);
if (skb)
break;
size >>= 1;
if (size < 256)
return NULL;
}
igmp_skb_size(skb) = size;
rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
0, 0,
IPPROTO_IGMP, 0, dev->ifindex);
if (IS_ERR(rt)) {
kfree_skb(skb);
return NULL;
}
skb_dst_set(skb, &rt->dst);
skb->dev = dev;
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
pip = ip_hdr(skb);
skb_put(skb, sizeof(struct iphdr) + 4);
pip->version = 4;
pip->ihl = (sizeof(struct iphdr)+4)>>2;
pip->tos = 0xc0;
pip->frag_off = htons(IP_DF);
pip->ttl = 1;
pip->daddr = fl4.daddr;
pip->saddr = fl4.saddr;
pip->protocol = IPPROTO_IGMP;
pip->tot_len = 0; /* filled in later */
ip_select_ident(pip, &rt->dst, NULL);
((u8*)&pip[1])[0] = IPOPT_RA;
((u8*)&pip[1])[1] = 4;
((u8*)&pip[1])[2] = 0;
((u8*)&pip[1])[3] = 0;
skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
skb_put(skb, sizeof(*pig));
pig = igmpv3_report_hdr(skb);
pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT;
pig->resv1 = 0;
pig->csum = 0;
pig->resv2 = 0;
pig->ngrec = 0;
return skb;
}
static int igmpv3_sendpack(struct sk_buff *skb)
{
struct igmphdr *pig = igmp_hdr(skb);
const int igmplen = skb->tail - skb->transport_header;
pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
return ip_local_out(skb);
}
static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
{
return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel);
}
static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
int type, struct igmpv3_grec **ppgr)
{
struct net_device *dev = pmc->interface->dev;
struct igmpv3_report *pih;
struct igmpv3_grec *pgr;
if (!skb)
skb = igmpv3_newpack(dev, dev->mtu);
if (!skb)
return NULL;
pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
pgr->grec_type = type;
pgr->grec_auxwords = 0;
pgr->grec_nsrcs = 0;
pgr->grec_mca = pmc->multiaddr;
pih = igmpv3_report_hdr(skb);
pih->ngrec = htons(ntohs(pih->ngrec)+1);
*ppgr = pgr;
return skb;
}
#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \
skb_tailroom(skb)) : 0)
static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
int type, int gdeleted, int sdeleted)
{
struct net_device *dev = pmc->interface->dev;
struct igmpv3_report *pih;
struct igmpv3_grec *pgr = NULL;
struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
int scount, stotal, first, isquery, truncate;
if (pmc->multiaddr == IGMP_ALL_HOSTS)
return skb;
isquery = type == IGMPV3_MODE_IS_INCLUDE ||
type == IGMPV3_MODE_IS_EXCLUDE;
truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
type == IGMPV3_CHANGE_TO_EXCLUDE;
stotal = scount = 0;
psf_list = sdeleted ? &pmc->tomb : &pmc->sources;
if (!*psf_list)
goto empty_source;
pih = skb ? igmpv3_report_hdr(skb) : NULL;
/* EX and TO_EX get a fresh packet, if needed */
if (truncate) {
if (pih && pih->ngrec &&
AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
if (skb)
igmpv3_sendpack(skb);
skb = igmpv3_newpack(dev, dev->mtu);
}
}
first = 1;
psf_prev = NULL;
for (psf=*psf_list; psf; psf=psf_next) {
__be32 *psrc;
psf_next = psf->sf_next;
if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
psf_prev = psf;
continue;
}
/* clear marks on query responses */
if (isquery)
psf->sf_gsresp = 0;
if (AVAILABLE(skb) < sizeof(__be32) +
first*sizeof(struct igmpv3_grec)) {
if (truncate && !first)
break; /* truncate these */
if (pgr)
pgr->grec_nsrcs = htons(scount);
if (skb)
igmpv3_sendpack(skb);
skb = igmpv3_newpack(dev, dev->mtu);
first = 1;
scount = 0;
}
if (first) {
skb = add_grhead(skb, pmc, type, &pgr);
first = 0;
}
if (!skb)
return NULL;
psrc = (__be32 *)skb_put(skb, sizeof(__be32));
*psrc = psf->sf_inaddr;
scount++; stotal++;
if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
psf->sf_crcount--;
if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
if (psf_prev)
psf_prev->sf_next = psf->sf_next;
else
*psf_list = psf->sf_next;
kfree(psf);
continue;
}
}
psf_prev = psf;
}
empty_source:
if (!stotal) {
if (type == IGMPV3_ALLOW_NEW_SOURCES ||
type == IGMPV3_BLOCK_OLD_SOURCES)
return skb;
if (pmc->crcount || isquery) {
/* make sure we have room for group header */
if (skb && AVAILABLE(skb)<sizeof(struct igmpv3_grec)) {
igmpv3_sendpack(skb);
skb = NULL; /* add_grhead will get a new one */
}
skb = add_grhead(skb, pmc, type, &pgr);
}
}
if (pgr)
pgr->grec_nsrcs = htons(scount);
if (isquery)
pmc->gsquery = 0; /* clear query state on report */
return skb;
}
static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
{
struct sk_buff *skb = NULL;
int type;
if (!pmc) {
rcu_read_lock();
for_each_pmc_rcu(in_dev, pmc) {
if (pmc->multiaddr == IGMP_ALL_HOSTS)
continue;
spin_lock_bh(&pmc->lock);
if (pmc->sfcount[MCAST_EXCLUDE])
type = IGMPV3_MODE_IS_EXCLUDE;
else
type = IGMPV3_MODE_IS_INCLUDE;
skb = add_grec(skb, pmc, type, 0, 0);
spin_unlock_bh(&pmc->lock);
}
rcu_read_unlock();
} else {
spin_lock_bh(&pmc->lock);
if (pmc->sfcount[MCAST_EXCLUDE])
type = IGMPV3_MODE_IS_EXCLUDE;
else
type = IGMPV3_MODE_IS_INCLUDE;
skb = add_grec(skb, pmc, type, 0, 0);
spin_unlock_bh(&pmc->lock);
}
if (!skb)
return 0;
return igmpv3_sendpack(skb);
}
/*
* remove zero-count source records from a source filter list
*/
static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
{
struct ip_sf_list *psf_prev, *psf_next, *psf;
psf_prev = NULL;
for (psf=*ppsf; psf; psf = psf_next) {
psf_next = psf->sf_next;
if (psf->sf_crcount == 0) {
if (psf_prev)
psf_prev->sf_next = psf->sf_next;
else
*ppsf = psf->sf_next;
kfree(psf);
} else
psf_prev = psf;
}
}
static void igmpv3_send_cr(struct in_device *in_dev)
{
struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
struct sk_buff *skb = NULL;
int type, dtype;
rcu_read_lock();
spin_lock_bh(&in_dev->mc_tomb_lock);
/* deleted MCA's */
pmc_prev = NULL;
for (pmc=in_dev->mc_tomb; pmc; pmc=pmc_next) {
pmc_next = pmc->next;
if (pmc->sfmode == MCAST_INCLUDE) {
type = IGMPV3_BLOCK_OLD_SOURCES;
dtype = IGMPV3_BLOCK_OLD_SOURCES;
skb = add_grec(skb, pmc, type, 1, 0);
skb = add_grec(skb, pmc, dtype, 1, 1);
}
if (pmc->crcount) {
if (pmc->sfmode == MCAST_EXCLUDE) {
type = IGMPV3_CHANGE_TO_INCLUDE;
skb = add_grec(skb, pmc, type, 1, 0);
}
pmc->crcount--;
if (pmc->crcount == 0) {
igmpv3_clear_zeros(&pmc->tomb);
igmpv3_clear_zeros(&pmc->sources);
}
}
if (pmc->crcount == 0 && !pmc->tomb && !pmc->sources) {
if (pmc_prev)
pmc_prev->next = pmc_next;
else
in_dev->mc_tomb = pmc_next;
in_dev_put(pmc->interface);
kfree(pmc);
} else
pmc_prev = pmc;
}
spin_unlock_bh(&in_dev->mc_tomb_lock);
/* change recs */
for_each_pmc_rcu(in_dev, pmc) {
spin_lock_bh(&pmc->lock);
if (pmc->sfcount[MCAST_EXCLUDE]) {
type = IGMPV3_BLOCK_OLD_SOURCES;
dtype = IGMPV3_ALLOW_NEW_SOURCES;
} else {
type = IGMPV3_ALLOW_NEW_SOURCES;
dtype = IGMPV3_BLOCK_OLD_SOURCES;
}
skb = add_grec(skb, pmc, type, 0, 0);
skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */
/* filter mode changes */
if (pmc->crcount) {
if (pmc->sfmode == MCAST_EXCLUDE)
type = IGMPV3_CHANGE_TO_EXCLUDE;
else
type = IGMPV3_CHANGE_TO_INCLUDE;
skb = add_grec(skb, pmc, type, 0, 0);
pmc->crcount--;
}
spin_unlock_bh(&pmc->lock);
}
rcu_read_unlock();
if (!skb)
return;
(void) igmpv3_sendpack(skb);
}
static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
int type)
{
struct sk_buff *skb;
struct iphdr *iph;
struct igmphdr *ih;
struct rtable *rt;
struct net_device *dev = in_dev->dev;
struct net *net = dev_net(dev);
__be32 group = pmc ? pmc->multiaddr : 0;
struct flowi4 fl4;
__be32 dst;
int hlen, tlen;
if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
return igmpv3_send_report(in_dev, pmc);
else if (type == IGMP_HOST_LEAVE_MESSAGE)
dst = IGMP_ALL_ROUTER;
else
dst = group;
rt = ip_route_output_ports(net, &fl4, NULL, dst, 0,
0, 0,
IPPROTO_IGMP, 0, dev->ifindex);
if (IS_ERR(rt))
return -1;
hlen = LL_RESERVED_SPACE(dev);
tlen = dev->needed_tailroom;
skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
if (skb == NULL) {
ip_rt_put(rt);
return -1;
}
skb_dst_set(skb, &rt->dst);
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
iph = ip_hdr(skb);
skb_put(skb, sizeof(struct iphdr) + 4);
iph->version = 4;
iph->ihl = (sizeof(struct iphdr)+4)>>2;
iph->tos = 0xc0;
iph->frag_off = htons(IP_DF);
iph->ttl = 1;
iph->daddr = dst;
iph->saddr = fl4.saddr;
iph->protocol = IPPROTO_IGMP;
ip_select_ident(iph, &rt->dst, NULL);
((u8*)&iph[1])[0] = IPOPT_RA;
((u8*)&iph[1])[1] = 4;
((u8*)&iph[1])[2] = 0;
((u8*)&iph[1])[3] = 0;
ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
ih->type = type;
ih->code = 0;
ih->csum = 0;
ih->group = group;
ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
return ip_local_out(skb);
}
static void igmp_gq_timer_expire(unsigned long data)
{
struct in_device *in_dev = (struct in_device *)data;
in_dev->mr_gq_running = 0;
igmpv3_send_report(in_dev, NULL);
__in_dev_put(in_dev);
}
static void igmp_ifc_timer_expire(unsigned long data)
{
struct in_device *in_dev = (struct in_device *)data;
igmpv3_send_cr(in_dev);
if (in_dev->mr_ifc_count) {
in_dev->mr_ifc_count--;
igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval);
}
__in_dev_put(in_dev);
}
static void igmp_ifc_event(struct in_device *in_dev)
{
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
return;
in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv :
IGMP_Unsolicited_Report_Count;
igmp_ifc_start_timer(in_dev, 1);
}
static void igmp_timer_expire(unsigned long data)
{
struct ip_mc_list *im=(struct ip_mc_list *)data;
struct in_device *in_dev = im->interface;
spin_lock(&im->lock);
im->tm_running = 0;
if (im->unsolicit_count) {
im->unsolicit_count--;
igmp_start_timer(im, IGMP_Unsolicited_Report_Interval);
}
im->reporter = 1;
spin_unlock(&im->lock);
if (IGMP_V1_SEEN(in_dev))
igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
else if (IGMP_V2_SEEN(in_dev))
igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
else
igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
ip_ma_put(im);
}
/* mark EXCLUDE-mode sources */
static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
{
struct ip_sf_list *psf;
int i, scount;
scount = 0;
for (psf=pmc->sources; psf; psf=psf->sf_next) {
if (scount == nsrcs)
break;
for (i=0; i<nsrcs; i++) {
/* skip inactive filters */
if (psf->sf_count[MCAST_INCLUDE] ||
pmc->sfcount[MCAST_EXCLUDE] !=
psf->sf_count[MCAST_EXCLUDE])
continue;
if (srcs[i] == psf->sf_inaddr) {
scount++;
break;
}
}
}
pmc->gsquery = 0;
if (scount == nsrcs) /* all sources excluded */
return 0;
return 1;
}
static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
{
struct ip_sf_list *psf;
int i, scount;
if (pmc->sfmode == MCAST_EXCLUDE)
return igmp_xmarksources(pmc, nsrcs, srcs);
/* mark INCLUDE-mode sources */
scount = 0;
for (psf=pmc->sources; psf; psf=psf->sf_next) {
if (scount == nsrcs)
break;
for (i=0; i<nsrcs; i++)
if (srcs[i] == psf->sf_inaddr) {
psf->sf_gsresp = 1;
scount++;
break;
}
}
if (!scount) {
pmc->gsquery = 0;
return 0;
}
pmc->gsquery = 1;
return 1;
}
static void igmp_heard_report(struct in_device *in_dev, __be32 group)
{
struct ip_mc_list *im;
/* Timers are only set for non-local groups */
if (group == IGMP_ALL_HOSTS)
return;
rcu_read_lock();
for_each_pmc_rcu(in_dev, im) {
if (im->multiaddr == group) {
igmp_stop_timer(im);
break;
}
}
rcu_read_unlock();
}
static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
int len)
{
struct igmphdr *ih = igmp_hdr(skb);
struct igmpv3_query *ih3 = igmpv3_query_hdr(skb);
struct ip_mc_list *im;
__be32 group = ih->group;
int max_delay;
int mark = 0;
if (len == 8) {
if (ih->code == 0) {
/* Alas, old v1 router presents here. */
max_delay = IGMP_Query_Response_Interval;
in_dev->mr_v1_seen = jiffies +
IGMP_V1_Router_Present_Timeout;
group = 0;
} else {
/* v2 router present */
max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
in_dev->mr_v2_seen = jiffies +
IGMP_V2_Router_Present_Timeout;
}
/* cancel the interface change timer */
in_dev->mr_ifc_count = 0;
if (del_timer(&in_dev->mr_ifc_timer))
__in_dev_put(in_dev);
/* clear deleted report items */
igmpv3_clear_delrec(in_dev);
} else if (len < 12) {
return; /* ignore bogus packet; freed by caller */
} else if (IGMP_V1_SEEN(in_dev)) {
/* This is a v3 query with v1 queriers present */
max_delay = IGMP_Query_Response_Interval;
group = 0;
} else if (IGMP_V2_SEEN(in_dev)) {
/* this is a v3 query with v2 queriers present;
* Interpretation of the max_delay code is problematic here.
* A real v2 host would use ih_code directly, while v3 has a
* different encoding. We use the v3 encoding as more likely
* to be intended in a v3 query.
*/
max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
if (!max_delay)
max_delay = 1; /* can't mod w/ 0 */
} else { /* v3 */
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
return;
ih3 = igmpv3_query_hdr(skb);
if (ih3->nsrcs) {
if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
+ ntohs(ih3->nsrcs)*sizeof(__be32)))
return;
ih3 = igmpv3_query_hdr(skb);
}
max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
if (!max_delay)
max_delay = 1; /* can't mod w/ 0 */
in_dev->mr_maxdelay = max_delay;
if (ih3->qrv)
in_dev->mr_qrv = ih3->qrv;
if (!group) { /* general query */
if (ih3->nsrcs)
return; /* no sources allowed */
igmp_gq_start_timer(in_dev);
return;
}
/* mark sources to include, if group & source-specific */
mark = ih3->nsrcs != 0;
}
/*
* - Start the timers in all of our membership records
* that the query applies to for the interface on
* which the query arrived excl. those that belong
* to a "local" group (224.0.0.X)
* - For timers already running check if they need to
* be reset.
* - Use the igmp->igmp_code field as the maximum
* delay possible
*/
rcu_read_lock();
for_each_pmc_rcu(in_dev, im) {
int changed;
if (group && group != im->multiaddr)
continue;
if (im->multiaddr == IGMP_ALL_HOSTS)
continue;
spin_lock_bh(&im->lock);
if (im->tm_running)
im->gsquery = im->gsquery && mark;
else
im->gsquery = mark;
changed = !im->gsquery ||
igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
spin_unlock_bh(&im->lock);
if (changed)
igmp_mod_timer(im, max_delay);
}
rcu_read_unlock();
}
/* called in rcu_read_lock() section */
int igmp_rcv(struct sk_buff *skb)
{
/* This basically follows the spec line by line -- see RFC1112 */
struct igmphdr *ih;
struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
int len = skb->len;
if (in_dev == NULL)
goto drop;
if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
goto drop;
switch (skb->ip_summed) {
case CHECKSUM_COMPLETE:
if (!csum_fold(skb->csum))
break;
/* fall through */
case CHECKSUM_NONE:
skb->csum = 0;
if (__skb_checksum_complete(skb))
goto drop;
}
ih = igmp_hdr(skb);
switch (ih->type) {
case IGMP_HOST_MEMBERSHIP_QUERY:
igmp_heard_query(in_dev, skb, len);
break;
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
/* Is it our report looped back? */
if (rt_is_output_route(skb_rtable(skb)))
break;
/* don't rely on MC router hearing unicast reports */
if (skb->pkt_type == PACKET_MULTICAST ||
skb->pkt_type == PACKET_BROADCAST)
igmp_heard_report(in_dev, ih->group);
break;
case IGMP_PIM:
#ifdef CONFIG_IP_PIMSM_V1
return pim_rcv_v1(skb);
#endif
case IGMPV3_HOST_MEMBERSHIP_REPORT:
case IGMP_DVMRP:
case IGMP_TRACE:
case IGMP_HOST_LEAVE_MESSAGE:
case IGMP_MTRACE:
case IGMP_MTRACE_RESP:
break;
default:
break;
}
drop:
kfree_skb(skb);
return 0;
}
#endif
/*
* Add a filter to a device
*/
static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
{
char buf[MAX_ADDR_LEN];
struct net_device *dev = in_dev->dev;
/* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG.
We will get multicast token leakage, when IFF_MULTICAST
is changed. This check should be done in ndo_set_rx_mode
routine. Something sort of:
if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; }
--ANK
*/
if (arp_mc_map(addr, buf, dev, 0) == 0)
dev_mc_add(dev, buf);
}
/*
* Remove a filter from a device
*/
static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
{
char buf[MAX_ADDR_LEN];
struct net_device *dev = in_dev->dev;
if (arp_mc_map(addr, buf, dev, 0) == 0)
dev_mc_del(dev, buf);
}
#ifdef CONFIG_IP_MULTICAST
/*
* deleted ip_mc_list manipulation
*/
static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
{
struct ip_mc_list *pmc;
/* this is an "ip_mc_list" for convenience; only the fields below
* are actually used. In particular, the refcnt and users are not
* used for management of the delete list. Using the same structure
* for deleted items allows change reports to use common code with
* non-deleted or query-response MCA's.
*/
pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
if (!pmc)
return;
spin_lock_bh(&im->lock);
pmc->interface = im->interface;
in_dev_hold(in_dev);
pmc->multiaddr = im->multiaddr;
pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
IGMP_Unsolicited_Report_Count;
pmc->sfmode = im->sfmode;
if (pmc->sfmode == MCAST_INCLUDE) {
struct ip_sf_list *psf;
pmc->tomb = im->tomb;
pmc->sources = im->sources;
im->tomb = im->sources = NULL;
for (psf=pmc->sources; psf; psf=psf->sf_next)
psf->sf_crcount = pmc->crcount;
}
spin_unlock_bh(&im->lock);
spin_lock_bh(&in_dev->mc_tomb_lock);
pmc->next = in_dev->mc_tomb;
in_dev->mc_tomb = pmc;
spin_unlock_bh(&in_dev->mc_tomb_lock);
}
static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
{
struct ip_mc_list *pmc, *pmc_prev;
struct ip_sf_list *psf, *psf_next;
spin_lock_bh(&in_dev->mc_tomb_lock);
pmc_prev = NULL;
for (pmc=in_dev->mc_tomb; pmc; pmc=pmc->next) {
if (pmc->multiaddr == multiaddr)
break;
pmc_prev = pmc;
}
if (pmc) {
if (pmc_prev)
pmc_prev->next = pmc->next;
else
in_dev->mc_tomb = pmc->next;
}
spin_unlock_bh(&in_dev->mc_tomb_lock);
if (pmc) {
for (psf=pmc->tomb; psf; psf=psf_next) {
psf_next = psf->sf_next;
kfree(psf);
}
in_dev_put(pmc->interface);
kfree(pmc);
}
}
static void igmpv3_clear_delrec(struct in_device *in_dev)
{
struct ip_mc_list *pmc, *nextpmc;
spin_lock_bh(&in_dev->mc_tomb_lock);
pmc = in_dev->mc_tomb;
in_dev->mc_tomb = NULL;
spin_unlock_bh(&in_dev->mc_tomb_lock);
for (; pmc; pmc = nextpmc) {
nextpmc = pmc->next;
ip_mc_clear_src(pmc);
in_dev_put(pmc->interface);
kfree(pmc);
}
/* clear dead sources, too */
rcu_read_lock();
for_each_pmc_rcu(in_dev, pmc) {
struct ip_sf_list *psf, *psf_next;
spin_lock_bh(&pmc->lock);
psf = pmc->tomb;
pmc->tomb = NULL;
spin_unlock_bh(&pmc->lock);
for (; psf; psf=psf_next) {
psf_next = psf->sf_next;
kfree(psf);
}
}
rcu_read_unlock();
}
#endif
static void igmp_group_dropped(struct ip_mc_list *im)
{
struct in_device *in_dev = im->interface;
#ifdef CONFIG_IP_MULTICAST
int reporter;
#endif
if (im->loaded) {
im->loaded = 0;
ip_mc_filter_del(in_dev, im->multiaddr);
}
#ifdef CONFIG_IP_MULTICAST
if (im->multiaddr == IGMP_ALL_HOSTS)
return;
reporter = im->reporter;
igmp_stop_timer(im);
if (!in_dev->dead) {
if (IGMP_V1_SEEN(in_dev))
return;
if (IGMP_V2_SEEN(in_dev)) {
if (reporter)
igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
return;
}
/* IGMPv3 */
igmpv3_add_delrec(in_dev, im);
igmp_ifc_event(in_dev);
}
#endif
}
static void igmp_group_added(struct ip_mc_list *im)
{
struct in_device *in_dev = im->interface;
if (im->loaded == 0) {
im->loaded = 1;
ip_mc_filter_add(in_dev, im->multiaddr);
}
#ifdef CONFIG_IP_MULTICAST
if (im->multiaddr == IGMP_ALL_HOSTS)
return;
if (in_dev->dead)
return;
if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
spin_lock_bh(&im->lock);
igmp_start_timer(im, IGMP_Initial_Report_Delay);
spin_unlock_bh(&im->lock);
return;
}
/* else, v3 */
im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
IGMP_Unsolicited_Report_Count;
igmp_ifc_event(in_dev);
#endif
}
/*
* Multicast list managers
*/
/*
* A socket has joined a multicast group on device dev.
*/
void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
{
struct ip_mc_list *im;
ASSERT_RTNL();
for_each_pmc_rtnl(in_dev, im) {
if (im->multiaddr == addr) {
im->users++;
ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
goto out;
}
}
im = kzalloc(sizeof(*im), GFP_KERNEL);
if (!im)
goto out;
im->users = 1;
im->interface = in_dev;
in_dev_hold(in_dev);
im->multiaddr = addr;
/* initial mode is (EX, empty) */
im->sfmode = MCAST_EXCLUDE;
im->sfcount[MCAST_EXCLUDE] = 1;
atomic_set(&im->refcnt, 1);
spin_lock_init(&im->lock);
#ifdef CONFIG_IP_MULTICAST
setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
im->unsolicit_count = IGMP_Unsolicited_Report_Count;
#endif
im->next_rcu = in_dev->mc_list;
in_dev->mc_count++;
RCU_INIT_POINTER(in_dev->mc_list, im);
#ifdef CONFIG_IP_MULTICAST
igmpv3_del_delrec(in_dev, im->multiaddr);
#endif
igmp_group_added(im);
if (!in_dev->dead)
ip_rt_multicast_event(in_dev);
out:
return;
}
EXPORT_SYMBOL(ip_mc_inc_group);
/*
* Resend IGMP JOIN report; used for bonding.
* Called with rcu_read_lock()
*/
void ip_mc_rejoin_groups(struct in_device *in_dev)
{
#ifdef CONFIG_IP_MULTICAST
struct ip_mc_list *im;
int type;
for_each_pmc_rcu(in_dev, im) {
if (im->multiaddr == IGMP_ALL_HOSTS)
continue;
/* a failover is happening and switches
* must be notified immediately
*/
if (IGMP_V1_SEEN(in_dev))
type = IGMP_HOST_MEMBERSHIP_REPORT;
else if (IGMP_V2_SEEN(in_dev))
type = IGMPV2_HOST_MEMBERSHIP_REPORT;
else
type = IGMPV3_HOST_MEMBERSHIP_REPORT;
igmp_send_report(in_dev, im, type);
}
#endif
}
EXPORT_SYMBOL(ip_mc_rejoin_groups);
/*
* A socket has left a multicast group on device dev
*/
void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
{
struct ip_mc_list *i;
struct ip_mc_list __rcu **ip;
ASSERT_RTNL();
for (ip = &in_dev->mc_list;
(i = rtnl_dereference(*ip)) != NULL;
ip = &i->next_rcu) {
if (i->multiaddr == addr) {
if (--i->users == 0) {
*ip = i->next_rcu;
in_dev->mc_count--;
igmp_group_dropped(i);
ip_mc_clear_src(i);
if (!in_dev->dead)
ip_rt_multicast_event(in_dev);
ip_ma_put(i);
return;
}
break;
}
}
}
EXPORT_SYMBOL(ip_mc_dec_group);
/* Device changing type */
void ip_mc_unmap(struct in_device *in_dev)
{
struct ip_mc_list *pmc;
ASSERT_RTNL();
for_each_pmc_rtnl(in_dev, pmc)
igmp_group_dropped(pmc);
}
void ip_mc_remap(struct in_device *in_dev)
{
struct ip_mc_list *pmc;
ASSERT_RTNL();
for_each_pmc_rtnl(in_dev, pmc)
igmp_group_added(pmc);
}
/* Device going down */
void ip_mc_down(struct in_device *in_dev)
{
struct ip_mc_list *pmc;
ASSERT_RTNL();
for_each_pmc_rtnl(in_dev, pmc)
igmp_group_dropped(pmc);
#ifdef CONFIG_IP_MULTICAST
in_dev->mr_ifc_count = 0;
if (del_timer(&in_dev->mr_ifc_timer))
__in_dev_put(in_dev);
in_dev->mr_gq_running = 0;
if (del_timer(&in_dev->mr_gq_timer))
__in_dev_put(in_dev);
igmpv3_clear_delrec(in_dev);
#endif
ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
}
void ip_mc_init_dev(struct in_device *in_dev)
{
ASSERT_RTNL();
in_dev->mc_tomb = NULL;
#ifdef CONFIG_IP_MULTICAST
in_dev->mr_gq_running = 0;
setup_timer(&in_dev->mr_gq_timer, igmp_gq_timer_expire,
(unsigned long)in_dev);
in_dev->mr_ifc_count = 0;
in_dev->mc_count = 0;
setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire,
(unsigned long)in_dev);
in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
#endif
spin_lock_init(&in_dev->mc_tomb_lock);
}
/* Device going up */
void ip_mc_up(struct in_device *in_dev)
{
struct ip_mc_list *pmc;
ASSERT_RTNL();
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
for_each_pmc_rtnl(in_dev, pmc)
igmp_group_added(pmc);
}
/*
* Device is about to be destroyed: clean up.
*/
void ip_mc_destroy_dev(struct in_device *in_dev)
{
struct ip_mc_list *i;
ASSERT_RTNL();
/* Deactivate timers */
ip_mc_down(in_dev);
while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
in_dev->mc_list = i->next_rcu;
in_dev->mc_count--;
/* We've dropped the groups in ip_mc_down already */
ip_mc_clear_src(i);
ip_ma_put(i);
}
}
/* RTNL is locked */
static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
{
struct net_device *dev = NULL;
struct in_device *idev = NULL;
if (imr->imr_ifindex) {
idev = inetdev_by_index(net, imr->imr_ifindex);
return idev;
}
if (imr->imr_address.s_addr) {
dev = __ip_dev_find(net, imr->imr_address.s_addr, false);
if (!dev)
return NULL;
}
if (!dev) {
struct rtable *rt = ip_route_output(net,
imr->imr_multiaddr.s_addr,
0, 0, 0);
if (!IS_ERR(rt)) {
dev = rt->dst.dev;
ip_rt_put(rt);
}
}
if (dev) {
imr->imr_ifindex = dev->ifindex;
idev = __in_dev_get_rtnl(dev);
}
return idev;
}
/*
* Join a socket to a group
*/
int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS;
int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF;
static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
__be32 *psfsrc)
{
struct ip_sf_list *psf, *psf_prev;
int rv = 0;
psf_prev = NULL;
for (psf=pmc->sources; psf; psf=psf->sf_next) {
if (psf->sf_inaddr == *psfsrc)
break;
psf_prev = psf;
}
if (!psf || psf->sf_count[sfmode] == 0) {
/* source filter not found, or count wrong => bug */
return -ESRCH;
}
psf->sf_count[sfmode]--;
if (psf->sf_count[sfmode] == 0) {
ip_rt_multicast_event(pmc->interface);
}
if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
#ifdef CONFIG_IP_MULTICAST
struct in_device *in_dev = pmc->interface;
#endif
/* no more filters for this source */
if (psf_prev)
psf_prev->sf_next = psf->sf_next;
else
pmc->sources = psf->sf_next;
#ifdef CONFIG_IP_MULTICAST
if (psf->sf_oldin &&
!IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
IGMP_Unsolicited_Report_Count;
psf->sf_next = pmc->tomb;
pmc->tomb = psf;
rv = 1;
} else
#endif
kfree(psf);
}
return rv;
}
#ifndef CONFIG_IP_MULTICAST
#define igmp_ifc_event(x) do { } while (0)
#endif
static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
int sfcount, __be32 *psfsrc, int delta)
{
struct ip_mc_list *pmc;
int changerec = 0;
int i, err;
if (!in_dev)
return -ENODEV;
rcu_read_lock();
for_each_pmc_rcu(in_dev, pmc) {
if (*pmca == pmc->multiaddr)
break;
}
if (!pmc) {
/* MCA not found?? bug */
rcu_read_unlock();
return -ESRCH;
}
spin_lock_bh(&pmc->lock);
rcu_read_unlock();
#ifdef CONFIG_IP_MULTICAST
sf_markstate(pmc);
#endif
if (!delta) {
err = -EINVAL;
if (!pmc->sfcount[sfmode])
goto out_unlock;
pmc->sfcount[sfmode]--;
}
err = 0;
for (i=0; i<sfcount; i++) {
int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
changerec |= rv > 0;
if (!err && rv < 0)
err = rv;
}
if (pmc->sfmode == MCAST_EXCLUDE &&
pmc->sfcount[MCAST_EXCLUDE] == 0 &&
pmc->sfcount[MCAST_INCLUDE]) {
#ifdef CONFIG_IP_MULTICAST
struct ip_sf_list *psf;
#endif
/* filter mode change */
pmc->sfmode = MCAST_INCLUDE;
#ifdef CONFIG_IP_MULTICAST
pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
IGMP_Unsolicited_Report_Count;
in_dev->mr_ifc_count = pmc->crcount;
for (psf=pmc->sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
igmp_ifc_event(pmc->interface);
} else if (sf_setstate(pmc) || changerec) {
igmp_ifc_event(pmc->interface);
#endif
}
out_unlock:
spin_unlock_bh(&pmc->lock);
return err;
}
/*
* Add multicast single-source filter to the interface list
*/
static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
__be32 *psfsrc)
{
struct ip_sf_list *psf, *psf_prev;
psf_prev = NULL;
for (psf=pmc->sources; psf; psf=psf->sf_next) {
if (psf->sf_inaddr == *psfsrc)
break;
psf_prev = psf;
}
if (!psf) {
psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
if (!psf)
return -ENOBUFS;
psf->sf_inaddr = *psfsrc;
if (psf_prev) {
psf_prev->sf_next = psf;
} else
pmc->sources = psf;
}
psf->sf_count[sfmode]++;
if (psf->sf_count[sfmode] == 1) {
ip_rt_multicast_event(pmc->interface);
}
return 0;
}
#ifdef CONFIG_IP_MULTICAST
static void sf_markstate(struct ip_mc_list *pmc)
{
struct ip_sf_list *psf;
int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
for (psf=pmc->sources; psf; psf=psf->sf_next)
if (pmc->sfcount[MCAST_EXCLUDE]) {
psf->sf_oldin = mca_xcount ==
psf->sf_count[MCAST_EXCLUDE] &&
!psf->sf_count[MCAST_INCLUDE];
} else
psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
}
static int sf_setstate(struct ip_mc_list *pmc)
{
struct ip_sf_list *psf, *dpsf;
int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
int qrv = pmc->interface->mr_qrv;
int new_in, rv;
rv = 0;
for (psf=pmc->sources; psf; psf=psf->sf_next) {
if (pmc->sfcount[MCAST_EXCLUDE]) {
new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
!psf->sf_count[MCAST_INCLUDE];
} else
new_in = psf->sf_count[MCAST_INCLUDE] != 0;
if (new_in) {
if (!psf->sf_oldin) {
struct ip_sf_list *prev = NULL;
for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next) {
if (dpsf->sf_inaddr == psf->sf_inaddr)
break;
prev = dpsf;
}
if (dpsf) {
if (prev)
prev->sf_next = dpsf->sf_next;
else
pmc->tomb = dpsf->sf_next;
kfree(dpsf);
}
psf->sf_crcount = qrv;
rv++;
}
} else if (psf->sf_oldin) {
psf->sf_crcount = 0;
/*
* add or update "delete" records if an active filter
* is now inactive
*/
for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next)
if (dpsf->sf_inaddr == psf->sf_inaddr)
break;
if (!dpsf) {
dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
if (!dpsf)
continue;
*dpsf = *psf;
/* pmc->lock held by callers */
dpsf->sf_next = pmc->tomb;
pmc->tomb = dpsf;
}
dpsf->sf_crcount = qrv;
rv++;
}
}
return rv;
}
#endif
/*
* Add multicast source filter list to the interface list
*/
static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
int sfcount, __be32 *psfsrc, int delta)
{
struct ip_mc_list *pmc;
int isexclude;
int i, err;
if (!in_dev)
return -ENODEV;
rcu_read_lock();
for_each_pmc_rcu(in_dev, pmc) {
if (*pmca == pmc->multiaddr)
break;
}
if (!pmc) {
/* MCA not found?? bug */
rcu_read_unlock();
return -ESRCH;
}
spin_lock_bh(&pmc->lock);
rcu_read_unlock();
#ifdef CONFIG_IP_MULTICAST
sf_markstate(pmc);
#endif
isexclude = pmc->sfmode == MCAST_EXCLUDE;
if (!delta)
pmc->sfcount[sfmode]++;
err = 0;
for (i=0; i<sfcount; i++) {
err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]);
if (err)
break;
}
if (err) {
int j;
if (!delta)
pmc->sfcount[sfmode]--;
for (j=0; j<i; j++)
(void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
} else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
#ifdef CONFIG_IP_MULTICAST
struct ip_sf_list *psf;
in_dev = pmc->interface;
#endif
/* filter mode change */
if (pmc->sfcount[MCAST_EXCLUDE])
pmc->sfmode = MCAST_EXCLUDE;
else if (pmc->sfcount[MCAST_INCLUDE])
pmc->sfmode = MCAST_INCLUDE;
#ifdef CONFIG_IP_MULTICAST
/* else no filters; keep old mode for reports */
pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
IGMP_Unsolicited_Report_Count;
in_dev->mr_ifc_count = pmc->crcount;
for (psf=pmc->sources; psf; psf = psf->sf_next)
psf->sf_crcount = 0;
igmp_ifc_event(in_dev);
} else if (sf_setstate(pmc)) {
igmp_ifc_event(in_dev);
#endif
}
spin_unlock_bh(&pmc->lock);
return err;
}
static void ip_mc_clear_src(struct ip_mc_list *pmc)
{
struct ip_sf_list *psf, *nextpsf;
for (psf=pmc->tomb; psf; psf=nextpsf) {
nextpsf = psf->sf_next;
kfree(psf);
}
pmc->tomb = NULL;
for (psf=pmc->sources; psf; psf=nextpsf) {
nextpsf = psf->sf_next;
kfree(psf);
}
pmc->sources = NULL;
pmc->sfmode = MCAST_EXCLUDE;
pmc->sfcount[MCAST_INCLUDE] = 0;
pmc->sfcount[MCAST_EXCLUDE] = 1;
}
/*
* Join a multicast group
*/
int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
{
int err;
__be32 addr = imr->imr_multiaddr.s_addr;
struct ip_mc_socklist *iml = NULL, *i;
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
int ifindex;
int count = 0;
if (!ipv4_is_multicast(addr))
return -EINVAL;
rtnl_lock();
in_dev = ip_mc_find_dev(net, imr);
if (!in_dev) {
iml = NULL;
err = -ENODEV;
goto done;
}
err = -EADDRINUSE;
ifindex = imr->imr_ifindex;
for_each_pmc_rtnl(inet, i) {
if (i->multi.imr_multiaddr.s_addr == addr &&
i->multi.imr_ifindex == ifindex)
goto done;
count++;
}
err = -ENOBUFS;
if (count >= sysctl_igmp_max_memberships)
goto done;
iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
if (iml == NULL)
goto done;
memcpy(&iml->multi, imr, sizeof(*imr));
iml->next_rcu = inet->mc_list;
iml->sflist = NULL;
iml->sfmode = MCAST_EXCLUDE;
RCU_INIT_POINTER(inet->mc_list, iml);
ip_mc_inc_group(in_dev, addr);
err = 0;
done:
rtnl_unlock();
return err;
}
EXPORT_SYMBOL(ip_mc_join_group);
static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
struct in_device *in_dev)
{
struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
int err;
if (psf == NULL) {
/* any-source empty exclude case */
return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
iml->sfmode, 0, NULL, 0);
}
err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
iml->sfmode, psf->sl_count, psf->sl_addr, 0);
RCU_INIT_POINTER(iml->sflist, NULL);
/* decrease mem now to avoid the memleak warning */
atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
kfree_rcu(psf, rcu);
return err;
}
/*
* Ask a socket to leave a group.
*/
int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
{
struct inet_sock *inet = inet_sk(sk);
struct ip_mc_socklist *iml;
struct ip_mc_socklist __rcu **imlp;
struct in_device *in_dev;
struct net *net = sock_net(sk);
__be32 group = imr->imr_multiaddr.s_addr;
u32 ifindex;
int ret = -EADDRNOTAVAIL;
rtnl_lock();
in_dev = ip_mc_find_dev(net, imr);
ifindex = imr->imr_ifindex;
for (imlp = &inet->mc_list;
(iml = rtnl_dereference(*imlp)) != NULL;
imlp = &iml->next_rcu) {
if (iml->multi.imr_multiaddr.s_addr != group)
continue;
if (ifindex) {
if (iml->multi.imr_ifindex != ifindex)
continue;
} else if (imr->imr_address.s_addr && imr->imr_address.s_addr !=
iml->multi.imr_address.s_addr)
continue;
(void) ip_mc_leave_src(sk, iml, in_dev);
*imlp = iml->next_rcu;
if (in_dev)
ip_mc_dec_group(in_dev, group);
rtnl_unlock();
/* decrease mem now to avoid the memleak warning */
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
kfree_rcu(iml, rcu);
return 0;
}
if (!in_dev)
ret = -ENODEV;
rtnl_unlock();
return ret;
}
int ip_mc_source(int add, int omode, struct sock *sk, struct
ip_mreq_source *mreqs, int ifindex)
{
int err;
struct ip_mreqn imr;
__be32 addr = mreqs->imr_multiaddr;
struct ip_mc_socklist *pmc;
struct in_device *in_dev = NULL;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *psl;
struct net *net = sock_net(sk);
int leavegroup = 0;
int i, j, rv;
if (!ipv4_is_multicast(addr))
return -EINVAL;
rtnl_lock();
imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
imr.imr_address.s_addr = mreqs->imr_interface;
imr.imr_ifindex = ifindex;
in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) {
err = -ENODEV;
goto done;
}
err = -EADDRNOTAVAIL;
for_each_pmc_rtnl(inet, pmc) {
if ((pmc->multi.imr_multiaddr.s_addr ==
imr.imr_multiaddr.s_addr) &&
(pmc->multi.imr_ifindex == imr.imr_ifindex))
break;
}
if (!pmc) { /* must have a prior join */
err = -EINVAL;
goto done;
}
/* if a source filter was set, must be the same mode as before */
if (pmc->sflist) {
if (pmc->sfmode != omode) {
err = -EINVAL;
goto done;
}
} else if (pmc->sfmode != omode) {
/* allow mode switches for empty-set filters */
ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0);
ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0,
NULL, 0);
pmc->sfmode = omode;
}
psl = rtnl_dereference(pmc->sflist);
if (!add) {
if (!psl)
goto done; /* err = -EADDRNOTAVAIL */
rv = !0;
for (i=0; i<psl->sl_count; i++) {
rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
sizeof(__be32));
if (rv == 0)
break;
}
if (rv) /* source not found */
goto done; /* err = -EADDRNOTAVAIL */
/* special case - (INCLUDE, empty) == LEAVE_GROUP */
if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
leavegroup = 1;
goto done;
}
/* update the interface filter */
ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
&mreqs->imr_sourceaddr, 1);
for (j=i+1; j<psl->sl_count; j++)
psl->sl_addr[j-1] = psl->sl_addr[j];
psl->sl_count--;
err = 0;
goto done;
}
/* else, add a new source to the filter */
if (psl && psl->sl_count >= sysctl_igmp_max_msf) {
err = -ENOBUFS;
goto done;
}
if (!psl || psl->sl_count == psl->sl_max) {
struct ip_sf_socklist *newpsl;
int count = IP_SFBLOCK;
if (psl)
count += psl->sl_max;
newpsl = sock_kmalloc(sk, IP_SFLSIZE(count), GFP_KERNEL);
if (!newpsl) {
err = -ENOBUFS;
goto done;
}
newpsl->sl_max = count;
newpsl->sl_count = count - IP_SFBLOCK;
if (psl) {
for (i=0; i<psl->sl_count; i++)
newpsl->sl_addr[i] = psl->sl_addr[i];
/* decrease mem now to avoid the memleak warning */
atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
kfree_rcu(psl, rcu);
}
RCU_INIT_POINTER(pmc->sflist, newpsl);
psl = newpsl;
}
rv = 1; /* > 0 for insert logic below if sl_count is 0 */
for (i=0; i<psl->sl_count; i++) {
rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
sizeof(__be32));
if (rv == 0)
break;
}
if (rv == 0) /* address already there is an error */
goto done;
for (j=psl->sl_count-1; j>=i; j--)
psl->sl_addr[j+1] = psl->sl_addr[j];
psl->sl_addr[i] = mreqs->imr_sourceaddr;
psl->sl_count++;
err = 0;
/* update the interface list */
ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
&mreqs->imr_sourceaddr, 1);
done:
rtnl_unlock();
if (leavegroup)
return ip_mc_leave_group(sk, &imr);
return err;
}
int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
{
int err = 0;
struct ip_mreqn imr;
__be32 addr = msf->imsf_multiaddr;
struct ip_mc_socklist *pmc;
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *newpsl, *psl;
struct net *net = sock_net(sk);
int leavegroup = 0;
if (!ipv4_is_multicast(addr))
return -EINVAL;
if (msf->imsf_fmode != MCAST_INCLUDE &&
msf->imsf_fmode != MCAST_EXCLUDE)
return -EINVAL;
rtnl_lock();
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
imr.imr_address.s_addr = msf->imsf_interface;
imr.imr_ifindex = ifindex;
in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) {
err = -ENODEV;
goto done;
}
/* special case - (INCLUDE, empty) == LEAVE_GROUP */
if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) {
leavegroup = 1;
goto done;
}
for_each_pmc_rtnl(inet, pmc) {
if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
pmc->multi.imr_ifindex == imr.imr_ifindex)
break;
}
if (!pmc) { /* must have a prior join */
err = -EINVAL;
goto done;
}
if (msf->imsf_numsrc) {
newpsl = sock_kmalloc(sk, IP_SFLSIZE(msf->imsf_numsrc),
GFP_KERNEL);
if (!newpsl) {
err = -ENOBUFS;
goto done;
}
newpsl->sl_max = newpsl->sl_count = msf->imsf_numsrc;
memcpy(newpsl->sl_addr, msf->imsf_slist,
msf->imsf_numsrc * sizeof(msf->imsf_slist[0]));
err = ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
msf->imsf_fmode, newpsl->sl_count, newpsl->sl_addr, 0);
if (err) {
sock_kfree_s(sk, newpsl, IP_SFLSIZE(newpsl->sl_max));
goto done;
}
} else {
newpsl = NULL;
(void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
msf->imsf_fmode, 0, NULL, 0);
}
psl = rtnl_dereference(pmc->sflist);
if (psl) {
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
psl->sl_count, psl->sl_addr, 0);
/* decrease mem now to avoid the memleak warning */
atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
kfree_rcu(psl, rcu);
} else
(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
0, NULL, 0);
RCU_INIT_POINTER(pmc->sflist, newpsl);
pmc->sfmode = msf->imsf_fmode;
err = 0;
done:
rtnl_unlock();
if (leavegroup)
err = ip_mc_leave_group(sk, &imr);
return err;
}
int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
struct ip_msfilter __user *optval, int __user *optlen)
{
int err, len, count, copycount;
struct ip_mreqn imr;
__be32 addr = msf->imsf_multiaddr;
struct ip_mc_socklist *pmc;
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *psl;
struct net *net = sock_net(sk);
if (!ipv4_is_multicast(addr))
return -EINVAL;
rtnl_lock();
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
imr.imr_address.s_addr = msf->imsf_interface;
imr.imr_ifindex = 0;
in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) {
err = -ENODEV;
goto done;
}
err = -EADDRNOTAVAIL;
for_each_pmc_rtnl(inet, pmc) {
if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
pmc->multi.imr_ifindex == imr.imr_ifindex)
break;
}
if (!pmc) /* must have a prior join */
goto done;
msf->imsf_fmode = pmc->sfmode;
psl = rtnl_dereference(pmc->sflist);
rtnl_unlock();
if (!psl) {
len = 0;
count = 0;
} else {
count = psl->sl_count;
}
copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc;
len = copycount * sizeof(psl->sl_addr[0]);
msf->imsf_numsrc = count;
if (put_user(IP_MSFILTER_SIZE(copycount), optlen) ||
copy_to_user(optval, msf, IP_MSFILTER_SIZE(0))) {
return -EFAULT;
}
if (len &&
copy_to_user(&optval->imsf_slist[0], psl->sl_addr, len))
return -EFAULT;
return 0;
done:
rtnl_unlock();
return err;
}
int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
struct group_filter __user *optval, int __user *optlen)
{
int err, i, count, copycount;
struct sockaddr_in *psin;
__be32 addr;
struct ip_mc_socklist *pmc;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *psl;
psin = (struct sockaddr_in *)&gsf->gf_group;
if (psin->sin_family != AF_INET)
return -EINVAL;
addr = psin->sin_addr.s_addr;
if (!ipv4_is_multicast(addr))
return -EINVAL;
rtnl_lock();
err = -EADDRNOTAVAIL;
for_each_pmc_rtnl(inet, pmc) {
if (pmc->multi.imr_multiaddr.s_addr == addr &&
pmc->multi.imr_ifindex == gsf->gf_interface)
break;
}
if (!pmc) /* must have a prior join */
goto done;
gsf->gf_fmode = pmc->sfmode;
psl = rtnl_dereference(pmc->sflist);
rtnl_unlock();
count = psl ? psl->sl_count : 0;
copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
gsf->gf_numsrc = count;
if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
return -EFAULT;
}
for (i=0; i<copycount; i++) {
struct sockaddr_storage ss;
psin = (struct sockaddr_in *)&ss;
memset(&ss, 0, sizeof(ss));
psin->sin_family = AF_INET;
psin->sin_addr.s_addr = psl->sl_addr[i];
if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
return -EFAULT;
}
return 0;
done:
rtnl_unlock();
return err;
}
/*
* check if a multicast source filter allows delivery for a given <src,dst,intf>
*/
int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
{
struct inet_sock *inet = inet_sk(sk);
struct ip_mc_socklist *pmc;
struct ip_sf_socklist *psl;
int i;
int ret;
ret = 1;
if (!ipv4_is_multicast(loc_addr))
goto out;
rcu_read_lock();
for_each_pmc_rcu(inet, pmc) {
if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
pmc->multi.imr_ifindex == dif)
break;
}
ret = inet->mc_all;
if (!pmc)
goto unlock;
psl = rcu_dereference(pmc->sflist);
ret = (pmc->sfmode == MCAST_EXCLUDE);
if (!psl)
goto unlock;
for (i=0; i<psl->sl_count; i++) {
if (psl->sl_addr[i] == rmt_addr)
break;
}
ret = 0;
if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
goto unlock;
if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
goto unlock;
ret = 1;
unlock:
rcu_read_unlock();
out:
return ret;
}
/*
* A socket is closing.
*/
void ip_mc_drop_socket(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct ip_mc_socklist *iml;
struct net *net = sock_net(sk);
if (inet->mc_list == NULL)
return;
rtnl_lock();
while ((iml = rtnl_dereference(inet->mc_list)) != NULL) {
struct in_device *in_dev;
inet->mc_list = iml->next_rcu;
in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
(void) ip_mc_leave_src(sk, iml, in_dev);
if (in_dev != NULL)
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
/* decrease mem now to avoid the memleak warning */
atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
kfree_rcu(iml, rcu);
}
rtnl_unlock();
}
/* called with rcu_read_lock() */
int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 proto)
{
struct ip_mc_list *im;
struct ip_sf_list *psf;
int rv = 0;
for_each_pmc_rcu(in_dev, im) {
if (im->multiaddr == mc_addr)
break;
}
if (im && proto == IPPROTO_IGMP) {
rv = 1;
} else if (im) {
if (src_addr) {
for (psf=im->sources; psf; psf=psf->sf_next) {
if (psf->sf_inaddr == src_addr)
break;
}
if (psf)
rv = psf->sf_count[MCAST_INCLUDE] ||
psf->sf_count[MCAST_EXCLUDE] !=
im->sfcount[MCAST_EXCLUDE];
else
rv = im->sfcount[MCAST_EXCLUDE] != 0;
} else
rv = 1; /* unspecified source; tentatively allow */
}
return rv;
}
#if defined(CONFIG_PROC_FS)
struct igmp_mc_iter_state {
struct seq_net_private p;
struct net_device *dev;
struct in_device *in_dev;
};
#define igmp_mc_seq_private(seq) ((struct igmp_mc_iter_state *)(seq)->private)
static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
{
struct net *net = seq_file_net(seq);
struct ip_mc_list *im = NULL;
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
state->in_dev = NULL;
for_each_netdev_rcu(net, state->dev) {
struct in_device *in_dev;
in_dev = __in_dev_get_rcu(state->dev);
if (!in_dev)
continue;
im = rcu_dereference(in_dev->mc_list);
if (im) {
state->in_dev = in_dev;
break;
}
}
return im;
}
static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
{
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
im = rcu_dereference(im->next_rcu);
while (!im) {
state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->in_dev = NULL;
break;
}
state->in_dev = __in_dev_get_rcu(state->dev);
if (!state->in_dev)
continue;
im = rcu_dereference(state->in_dev->mc_list);
}
return im;
}
static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos)
{
struct ip_mc_list *im = igmp_mc_get_first(seq);
if (im)
while (pos && (im = igmp_mc_get_next(seq, im)) != NULL)
--pos;
return pos ? NULL : im;
}
static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(rcu)
{
rcu_read_lock();
return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_mc_list *im;
if (v == SEQ_START_TOKEN)
im = igmp_mc_get_first(seq);
else
im = igmp_mc_get_next(seq, v);
++*pos;
return im;
}
static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
__releases(rcu)
{
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
state->in_dev = NULL;
state->dev = NULL;
rcu_read_unlock();
}
static int igmp_mc_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq,
"Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
else {
struct ip_mc_list *im = (struct ip_mc_list *)v;
struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
char *querier;
#ifdef CONFIG_IP_MULTICAST
querier = IGMP_V1_SEEN(state->in_dev) ? "V1" :
IGMP_V2_SEEN(state->in_dev) ? "V2" :
"V3";
#else
querier = "NONE";
#endif
if (rcu_dereference(state->in_dev->mc_list) == im) {
seq_printf(seq, "%d\t%-10s: %5d %7s\n",
state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
}
seq_printf(seq,
"\t\t\t\t%08X %5d %d:%08lX\t\t%d\n",
im->multiaddr, im->users,
im->tm_running, im->tm_running ?
jiffies_to_clock_t(im->timer.expires-jiffies) : 0,
im->reporter);
}
return 0;
}
static const struct seq_operations igmp_mc_seq_ops = {
.start = igmp_mc_seq_start,
.next = igmp_mc_seq_next,
.stop = igmp_mc_seq_stop,
.show = igmp_mc_seq_show,
};
static int igmp_mc_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &igmp_mc_seq_ops,
sizeof(struct igmp_mc_iter_state));
}
static const struct file_operations igmp_mc_seq_fops = {
.owner = THIS_MODULE,
.open = igmp_mc_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
struct igmp_mcf_iter_state {
struct seq_net_private p;
struct net_device *dev;
struct in_device *idev;
struct ip_mc_list *im;
};
#define igmp_mcf_seq_private(seq) ((struct igmp_mcf_iter_state *)(seq)->private)
static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
{
struct net *net = seq_file_net(seq);
struct ip_sf_list *psf = NULL;
struct ip_mc_list *im = NULL;
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
state->idev = NULL;
state->im = NULL;
for_each_netdev_rcu(net, state->dev) {
struct in_device *idev;
idev = __in_dev_get_rcu(state->dev);
if (unlikely(idev == NULL))
continue;
im = rcu_dereference(idev->mc_list);
if (likely(im != NULL)) {
spin_lock_bh(&im->lock);
psf = im->sources;
if (likely(psf != NULL)) {
state->im = im;
state->idev = idev;
break;
}
spin_unlock_bh(&im->lock);
}
}
return psf;
}
static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_list *psf)
{
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
psf = psf->sf_next;
while (!psf) {
spin_unlock_bh(&state->im->lock);
state->im = state->im->next;
while (!state->im) {
state->dev = next_net_device_rcu(state->dev);
if (!state->dev) {
state->idev = NULL;
goto out;
}
state->idev = __in_dev_get_rcu(state->dev);
if (!state->idev)
continue;
state->im = rcu_dereference(state->idev->mc_list);
}
if (!state->im)
break;
spin_lock_bh(&state->im->lock);
psf = state->im->sources;
}
out:
return psf;
}
static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos)
{
struct ip_sf_list *psf = igmp_mcf_get_first(seq);
if (psf)
while (pos && (psf = igmp_mcf_get_next(seq, psf)) != NULL)
--pos;
return pos ? NULL : psf;
}
static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(rcu)
{
rcu_read_lock();
return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct ip_sf_list *psf;
if (v == SEQ_START_TOKEN)
psf = igmp_mcf_get_first(seq);
else
psf = igmp_mcf_get_next(seq, v);
++*pos;
return psf;
}
static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
__releases(rcu)
{
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
if (likely(state->im != NULL)) {
spin_unlock_bh(&state->im->lock);
state->im = NULL;
}
state->idev = NULL;
state->dev = NULL;
rcu_read_unlock();
}
static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
{
struct ip_sf_list *psf = (struct ip_sf_list *)v;
struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
if (v == SEQ_START_TOKEN) {
seq_printf(seq,
"%3s %6s "
"%10s %10s %6s %6s\n", "Idx",
"Device", "MCA",
"SRC", "INC", "EXC");
} else {
seq_printf(seq,
"%3d %6.6s 0x%08x "
"0x%08x %6lu %6lu\n",
state->dev->ifindex, state->dev->name,
ntohl(state->im->multiaddr),
ntohl(psf->sf_inaddr),
psf->sf_count[MCAST_INCLUDE],
psf->sf_count[MCAST_EXCLUDE]);
}
return 0;
}
static const struct seq_operations igmp_mcf_seq_ops = {
.start = igmp_mcf_seq_start,
.next = igmp_mcf_seq_next,
.stop = igmp_mcf_seq_stop,
.show = igmp_mcf_seq_show,
};
static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &igmp_mcf_seq_ops,
sizeof(struct igmp_mcf_iter_state));
}
static const struct file_operations igmp_mcf_seq_fops = {
.owner = THIS_MODULE,
.open = igmp_mcf_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int __net_init igmp_net_init(struct net *net)
{
struct proc_dir_entry *pde;
pde = proc_net_fops_create(net, "igmp", S_IRUGO, &igmp_mc_seq_fops);
if (!pde)
goto out_igmp;
pde = proc_net_fops_create(net, "mcfilter", S_IRUGO, &igmp_mcf_seq_fops);
if (!pde)
goto out_mcfilter;
return 0;
out_mcfilter:
proc_net_remove(net, "igmp");
out_igmp:
return -ENOMEM;
}
static void __net_exit igmp_net_exit(struct net *net)
{
proc_net_remove(net, "mcfilter");
proc_net_remove(net, "igmp");
}
static struct pernet_operations igmp_net_ops = {
.init = igmp_net_init,
.exit = igmp_net_exit,
};
int __init igmp_mc_proc_init(void)
{
return register_pernet_subsys(&igmp_net_ops);
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3575_0 |
crossvul-cpp_data_good_5366_2 | /* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <assert.h>
#include <stddef.h>
#include <apr_strings.h>
#include <httpd.h>
#include <http_core.h>
#include <http_connection.h>
#include <http_log.h>
#include <nghttp2/nghttp2.h>
#include "h2_private.h"
#include "h2.h"
#include "h2_bucket_beam.h"
#include "h2_conn.h"
#include "h2_config.h"
#include "h2_h2.h"
#include "h2_mplx.h"
#include "h2_push.h"
#include "h2_request.h"
#include "h2_headers.h"
#include "h2_session.h"
#include "h2_stream.h"
#include "h2_task.h"
#include "h2_ctx.h"
#include "h2_task.h"
#include "h2_util.h"
static int state_transition[][7] = {
/* ID OP RL RR CI CO CL */
/*ID*/{ 1, 0, 0, 0, 0, 0, 0 },
/*OP*/{ 1, 1, 0, 0, 0, 0, 0 },
/*RL*/{ 0, 0, 1, 0, 0, 0, 0 },
/*RR*/{ 0, 0, 0, 1, 0, 0, 0 },
/*CI*/{ 1, 1, 0, 0, 1, 0, 0 },
/*CO*/{ 1, 1, 0, 0, 0, 1, 0 },
/*CL*/{ 1, 1, 0, 0, 1, 1, 1 },
};
static void H2_STREAM_OUT_LOG(int lvl, h2_stream *s, const char *tag)
{
if (APLOG_C_IS_LEVEL(s->session->c, lvl)) {
conn_rec *c = s->session->c;
char buffer[4 * 1024];
const char *line = "(null)";
apr_size_t len, bmax = sizeof(buffer)/sizeof(buffer[0]);
len = h2_util_bb_print(buffer, bmax, tag, "", s->out_buffer);
ap_log_cerror(APLOG_MARK, lvl, 0, c, "bb_dump(%s): %s",
c->log_id, len? buffer : line);
}
}
static int set_state(h2_stream *stream, h2_stream_state_t state)
{
int allowed = state_transition[state][stream->state];
if (allowed) {
stream->state = state;
return 1;
}
ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, stream->session->c, APLOGNO(03081)
"h2_stream(%ld-%d): invalid state transition from %d to %d",
stream->session->id, stream->id, stream->state, state);
return 0;
}
static int close_input(h2_stream *stream)
{
switch (stream->state) {
case H2_STREAM_ST_CLOSED_INPUT:
case H2_STREAM_ST_CLOSED:
return 0; /* ignore, idempotent */
case H2_STREAM_ST_CLOSED_OUTPUT:
/* both closed now */
set_state(stream, H2_STREAM_ST_CLOSED);
break;
default:
/* everything else we jump to here */
set_state(stream, H2_STREAM_ST_CLOSED_INPUT);
break;
}
return 1;
}
static int input_closed(h2_stream *stream)
{
switch (stream->state) {
case H2_STREAM_ST_OPEN:
case H2_STREAM_ST_CLOSED_OUTPUT:
return 0;
default:
return 1;
}
}
static int close_output(h2_stream *stream)
{
switch (stream->state) {
case H2_STREAM_ST_CLOSED_OUTPUT:
case H2_STREAM_ST_CLOSED:
return 0; /* ignore, idempotent */
case H2_STREAM_ST_CLOSED_INPUT:
/* both closed now */
set_state(stream, H2_STREAM_ST_CLOSED);
break;
default:
/* everything else we jump to here */
set_state(stream, H2_STREAM_ST_CLOSED_OUTPUT);
break;
}
return 1;
}
static int input_open(const h2_stream *stream)
{
switch (stream->state) {
case H2_STREAM_ST_OPEN:
case H2_STREAM_ST_CLOSED_OUTPUT:
return 1;
default:
return 0;
}
}
static int output_open(h2_stream *stream)
{
switch (stream->state) {
case H2_STREAM_ST_OPEN:
case H2_STREAM_ST_CLOSED_INPUT:
return 1;
default:
return 0;
}
}
static void prep_output(h2_stream *stream) {
conn_rec *c = stream->session->c;
if (!stream->out_buffer) {
stream->out_buffer = apr_brigade_create(stream->pool, c->bucket_alloc);
}
}
static void prepend_response(h2_stream *stream, h2_headers *response)
{
conn_rec *c = stream->session->c;
apr_bucket *b;
prep_output(stream);
b = h2_bucket_headers_create(c->bucket_alloc, response);
APR_BRIGADE_INSERT_HEAD(stream->out_buffer, b);
}
static apr_status_t stream_pool_cleanup(void *ctx)
{
h2_stream *stream = ctx;
apr_status_t status;
ap_assert(stream->can_be_cleaned);
if (stream->files) {
apr_file_t *file;
int i;
for (i = 0; i < stream->files->nelts; ++i) {
file = APR_ARRAY_IDX(stream->files, i, apr_file_t*);
status = apr_file_close(file);
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, stream->session->c,
"h2_stream(%ld-%d): destroy, closed file %d",
stream->session->id, stream->id, i);
}
stream->files = NULL;
}
return APR_SUCCESS;
}
h2_stream *h2_stream_open(int id, apr_pool_t *pool, h2_session *session,
int initiated_on)
{
h2_stream *stream = apr_pcalloc(pool, sizeof(h2_stream));
stream->id = id;
stream->initiated_on = initiated_on;
stream->created = apr_time_now();
stream->state = H2_STREAM_ST_IDLE;
stream->pool = pool;
stream->session = session;
stream->can_be_cleaned = 1;
h2_beam_create(&stream->input, pool, id, "input", H2_BEAM_OWNER_SEND, 0);
h2_beam_create(&stream->output, pool, id, "output", H2_BEAM_OWNER_RECV, 0);
set_state(stream, H2_STREAM_ST_OPEN);
apr_pool_cleanup_register(pool, stream, stream_pool_cleanup,
apr_pool_cleanup_null);
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03082)
"h2_stream(%ld-%d): opened", session->id, stream->id);
return stream;
}
void h2_stream_cleanup(h2_stream *stream)
{
apr_status_t status;
ap_assert(stream);
if (stream->out_buffer) {
/* remove any left over output buckets that may still have
* references into request pools */
apr_brigade_cleanup(stream->out_buffer);
}
h2_beam_abort(stream->input);
status = h2_beam_wait_empty(stream->input, APR_NONBLOCK_READ);
if (status == APR_EAGAIN) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, stream->session->c,
"h2_stream(%ld-%d): wait on input drain",
stream->session->id, stream->id);
status = h2_beam_wait_empty(stream->input, APR_BLOCK_READ);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c,
"h2_stream(%ld-%d): input drain returned",
stream->session->id, stream->id);
}
}
void h2_stream_destroy(h2_stream *stream)
{
ap_assert(stream);
ap_assert(!h2_mplx_stream_get(stream->session->mplx, stream->id));
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, stream->session->c,
"h2_stream(%ld-%d): destroy",
stream->session->id, stream->id);
stream->can_be_cleaned = 1;
if (stream->pool) {
apr_pool_destroy(stream->pool);
}
}
void h2_stream_eos_destroy(h2_stream *stream)
{
h2_session_stream_done(stream->session, stream);
/* stream possibly destroyed */
}
apr_pool_t *h2_stream_detach_pool(h2_stream *stream)
{
apr_pool_t *pool = stream->pool;
stream->pool = NULL;
return pool;
}
void h2_stream_rst(h2_stream *stream, int error_code)
{
stream->rst_error = error_code;
close_input(stream);
close_output(stream);
if (stream->out_buffer) {
apr_brigade_cleanup(stream->out_buffer);
}
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): reset, error=%d",
stream->session->id, stream->id, error_code);
}
apr_status_t h2_stream_set_request_rec(h2_stream *stream, request_rec *r)
{
h2_request *req;
apr_status_t status;
ap_assert(stream->request == NULL);
ap_assert(stream->rtmp == NULL);
if (stream->rst_error) {
return APR_ECONNRESET;
}
status = h2_request_rcreate(&req, stream->pool, r);
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, status, r, APLOGNO(03058)
"h2_request(%d): set_request_rec %s host=%s://%s%s",
stream->id, req->method, req->scheme, req->authority,
req->path);
stream->rtmp = req;
return status;
}
apr_status_t h2_stream_set_request(h2_stream *stream, const h2_request *r)
{
ap_assert(stream->request == NULL);
ap_assert(stream->rtmp == NULL);
stream->rtmp = h2_request_clone(stream->pool, r);
return APR_SUCCESS;
}
static apr_status_t add_trailer(h2_stream *stream,
const char *name, size_t nlen,
const char *value, size_t vlen)
{
conn_rec *c = stream->session->c;
char *hname, *hvalue;
if (nlen == 0 || name[0] == ':') {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, APR_EINVAL, c, APLOGNO(03060)
"h2_request(%ld-%d): pseudo header in trailer",
c->id, stream->id);
return APR_EINVAL;
}
if (h2_req_ignore_trailer(name, nlen)) {
return APR_SUCCESS;
}
if (!stream->trailers) {
stream->trailers = apr_table_make(stream->pool, 5);
}
hname = apr_pstrndup(stream->pool, name, nlen);
hvalue = apr_pstrndup(stream->pool, value, vlen);
h2_util_camel_case_header(hname, nlen);
apr_table_mergen(stream->trailers, hname, hvalue);
return APR_SUCCESS;
}
apr_status_t h2_stream_add_header(h2_stream *stream,
const char *name, size_t nlen,
const char *value, size_t vlen)
{
int error = 0;
ap_assert(stream);
if (stream->has_response) {
return APR_EINVAL;
}
++stream->request_headers_added;
if (name[0] == ':') {
if ((vlen) > stream->session->s->limit_req_line) {
/* pseudo header: approximation of request line size check */
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): pseudo header %s too long",
stream->session->id, stream->id, name);
error = HTTP_REQUEST_URI_TOO_LARGE;
}
}
else if ((nlen + 2 + vlen) > stream->session->s->limit_req_fieldsize) {
/* header too long */
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): header %s too long",
stream->session->id, stream->id, name);
error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
}
if (stream->request_headers_added
> stream->session->s->limit_req_fields + 4) {
/* too many header lines, include 4 pseudo headers */
if (stream->request_headers_added
> stream->session->s->limit_req_fields + 4 + 100) {
/* yeah, right */
return APR_ECONNRESET;
}
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): too many header lines",
stream->session->id, stream->id);
error = HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE;
}
if (h2_stream_is_scheduled(stream)) {
return add_trailer(stream, name, nlen, value, vlen);
}
else if (error) {
return h2_stream_set_error(stream, error);
}
else {
if (!stream->rtmp) {
stream->rtmp = h2_req_create(stream->id, stream->pool,
NULL, NULL, NULL, NULL, NULL, 0);
}
if (stream->state != H2_STREAM_ST_OPEN) {
return APR_ECONNRESET;
}
return h2_request_add_header(stream->rtmp, stream->pool,
name, nlen, value, vlen);
}
}
apr_status_t h2_stream_schedule(h2_stream *stream, int eos, int push_enabled,
h2_stream_pri_cmp *cmp, void *ctx)
{
apr_status_t status = APR_EINVAL;
ap_assert(stream);
ap_assert(stream->session);
ap_assert(stream->session->mplx);
if (!stream->scheduled) {
if (eos) {
close_input(stream);
}
if (h2_stream_is_ready(stream)) {
/* already have a resonse, probably a HTTP error code */
return h2_mplx_process(stream->session->mplx, stream, cmp, ctx);
}
else if (!stream->request && stream->rtmp) {
/* This is the common case: a h2_request was being assembled, now
* it gets finalized and checked for completness */
status = h2_request_end_headers(stream->rtmp, stream->pool, eos);
if (status == APR_SUCCESS) {
stream->rtmp->serialize = h2_config_geti(stream->session->config,
H2_CONF_SER_HEADERS);
stream->request = stream->rtmp;
stream->rtmp = NULL;
stream->scheduled = 1;
stream->push_policy = h2_push_policy_determine(stream->request->headers,
stream->pool, push_enabled);
status = h2_mplx_process(stream->session->mplx, stream, cmp, ctx);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): scheduled %s %s://%s%s "
"chunked=%d",
stream->session->id, stream->id,
stream->request->method, stream->request->scheme,
stream->request->authority, stream->request->path,
stream->request->chunked);
return status;
}
}
else {
status = APR_ECONNRESET;
}
}
h2_stream_rst(stream, H2_ERR_INTERNAL_ERROR);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, stream->session->c,
"h2_stream(%ld-%d): RST=2 (internal err) %s %s://%s%s",
stream->session->id, stream->id,
stream->request->method, stream->request->scheme,
stream->request->authority, stream->request->path);
return status;
}
int h2_stream_is_scheduled(const h2_stream *stream)
{
return stream->scheduled;
}
apr_status_t h2_stream_close_input(h2_stream *stream)
{
conn_rec *c = stream->session->c;
apr_status_t status;
apr_bucket_brigade *tmp;
apr_bucket *b;
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): closing input",
stream->session->id, stream->id);
if (stream->rst_error) {
return APR_ECONNRESET;
}
tmp = apr_brigade_create(stream->pool, c->bucket_alloc);
if (stream->trailers && !apr_is_empty_table(stream->trailers)) {
h2_headers *r = h2_headers_create(HTTP_OK, stream->trailers,
NULL, stream->pool);
b = h2_bucket_headers_create(c->bucket_alloc, r);
APR_BRIGADE_INSERT_TAIL(tmp, b);
stream->trailers = NULL;
}
b = apr_bucket_eos_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(tmp, b);
status = h2_beam_send(stream->input, tmp, APR_BLOCK_READ);
apr_brigade_destroy(tmp);
return status;
}
apr_status_t h2_stream_write_data(h2_stream *stream,
const char *data, size_t len, int eos)
{
conn_rec *c = stream->session->c;
apr_status_t status = APR_SUCCESS;
apr_bucket_brigade *tmp;
ap_assert(stream);
if (!stream->input) {
return APR_EOF;
}
if (input_closed(stream) || !stream->request) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_stream(%ld-%d): writing denied, closed=%d, eoh=%d",
stream->session->id, stream->id, input_closed(stream),
stream->request != NULL);
return APR_EINVAL;
}
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_stream(%ld-%d): add %ld input bytes",
stream->session->id, stream->id, (long)len);
tmp = apr_brigade_create(stream->pool, c->bucket_alloc);
apr_brigade_write(tmp, NULL, NULL, data, len);
status = h2_beam_send(stream->input, tmp, APR_BLOCK_READ);
apr_brigade_destroy(tmp);
stream->in_data_frames++;
stream->in_data_octets += len;
if (eos) {
return h2_stream_close_input(stream);
}
return status;
}
static apr_status_t fill_buffer(h2_stream *stream, apr_size_t amount)
{
conn_rec *c = stream->session->c;
apr_bucket *b;
apr_status_t status;
if (!stream->output) {
return APR_EOF;
}
status = h2_beam_receive(stream->output, stream->out_buffer,
APR_NONBLOCK_READ, amount);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, stream->session->c,
"h2_stream(%ld-%d): beam_received",
stream->session->id, stream->id);
/* The buckets we reveive are using the stream->out_buffer pool as
* lifetime which is exactly what we want since this is stream->pool.
*
* However: when we send these buckets down the core output filters, the
* filter might decide to setaside them into a pool of its own. And it
* might decide, after having sent the buckets, to clear its pool.
*
* This is problematic for file buckets because it then closed the contained
* file. Any split off buckets we sent afterwards will result in a
* APR_EBADF.
*/
for (b = APR_BRIGADE_FIRST(stream->out_buffer);
b != APR_BRIGADE_SENTINEL(stream->out_buffer);
b = APR_BUCKET_NEXT(b)) {
if (APR_BUCKET_IS_FILE(b)) {
apr_bucket_file *f = (apr_bucket_file *)b->data;
apr_pool_t *fpool = apr_file_pool_get(f->fd);
if (fpool != c->pool) {
apr_bucket_setaside(b, c->pool);
if (!stream->files) {
stream->files = apr_array_make(stream->pool,
5, sizeof(apr_file_t*));
}
APR_ARRAY_PUSH(stream->files, apr_file_t*) = f->fd;
}
}
}
return status;
}
apr_status_t h2_stream_set_error(h2_stream *stream, int http_status)
{
h2_headers *response;
if (h2_stream_is_ready(stream)) {
return APR_EINVAL;
}
if (stream->rtmp) {
stream->request = stream->rtmp;
stream->rtmp = NULL;
}
response = h2_headers_die(http_status, stream->request, stream->pool);
prepend_response(stream, response);
h2_beam_close(stream->output);
return APR_SUCCESS;
}
static apr_bucket *get_first_headers_bucket(apr_bucket_brigade *bb)
{
if (bb) {
apr_bucket *b = APR_BRIGADE_FIRST(bb);
while (b != APR_BRIGADE_SENTINEL(bb)) {
if (H2_BUCKET_IS_HEADERS(b)) {
return b;
}
b = APR_BUCKET_NEXT(b);
}
}
return NULL;
}
apr_status_t h2_stream_out_prepare(h2_stream *stream, apr_off_t *plen,
int *peos, h2_headers **presponse)
{
conn_rec *c = stream->session->c;
apr_status_t status = APR_SUCCESS;
apr_off_t requested;
apr_bucket *b, *e;
if (presponse) {
*presponse = NULL;
}
if (stream->rst_error) {
*plen = 0;
*peos = 1;
return APR_ECONNRESET;
}
if (!output_open(stream)) {
return APR_ECONNRESET;
}
prep_output(stream);
if (*plen > 0) {
requested = H2MIN(*plen, H2_DATA_CHUNK_SIZE);
}
else {
requested = H2_DATA_CHUNK_SIZE;
}
*plen = requested;
H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "h2_stream_out_prepare_pre");
h2_util_bb_avail(stream->out_buffer, plen, peos);
if (!*peos && *plen < requested) {
/* try to get more data */
status = fill_buffer(stream, (requested - *plen) + H2_DATA_CHUNK_SIZE);
if (APR_STATUS_IS_EOF(status)) {
apr_bucket *eos = apr_bucket_eos_create(c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(stream->out_buffer, eos);
status = APR_SUCCESS;
}
else if (status == APR_EAGAIN) {
/* did not receive more, it's ok */
status = APR_SUCCESS;
}
*plen = requested;
h2_util_bb_avail(stream->out_buffer, plen, peos);
}
H2_STREAM_OUT_LOG(APLOG_TRACE2, stream, "h2_stream_out_prepare_post");
b = APR_BRIGADE_FIRST(stream->out_buffer);
while (b != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
e = APR_BUCKET_NEXT(b);
if (APR_BUCKET_IS_FLUSH(b)
|| (!APR_BUCKET_IS_METADATA(b) && b->length == 0)) {
APR_BUCKET_REMOVE(b);
apr_bucket_destroy(b);
}
else {
break;
}
b = e;
}
b = get_first_headers_bucket(stream->out_buffer);
if (b) {
/* there are HEADERS to submit */
*peos = 0;
*plen = 0;
if (b == APR_BRIGADE_FIRST(stream->out_buffer)) {
if (presponse) {
*presponse = h2_bucket_headers_get(b);
APR_BUCKET_REMOVE(b);
apr_bucket_destroy(b);
status = APR_SUCCESS;
}
else {
/* someone needs to retrieve the response first */
h2_mplx_keep_active(stream->session->mplx, stream->id);
status = APR_EAGAIN;
}
}
else {
apr_bucket *e = APR_BRIGADE_FIRST(stream->out_buffer);
while (e != APR_BRIGADE_SENTINEL(stream->out_buffer)) {
if (e == b) {
break;
}
else if (e->length != (apr_size_t)-1) {
*plen += e->length;
}
e = APR_BUCKET_NEXT(e);
}
}
}
if (!*peos && !*plen && status == APR_SUCCESS
&& (!presponse || !*presponse)) {
status = APR_EAGAIN;
}
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, c,
"h2_stream(%ld-%d): prepare, len=%ld eos=%d",
c->id, stream->id, (long)*plen, *peos);
return status;
}
static int is_not_headers(apr_bucket *b)
{
return !H2_BUCKET_IS_HEADERS(b);
}
apr_status_t h2_stream_read_to(h2_stream *stream, apr_bucket_brigade *bb,
apr_off_t *plen, int *peos)
{
conn_rec *c = stream->session->c;
apr_status_t status = APR_SUCCESS;
if (stream->rst_error) {
return APR_ECONNRESET;
}
status = h2_append_brigade(bb, stream->out_buffer, plen, peos, is_not_headers);
if (status == APR_SUCCESS && !*peos && !*plen) {
status = APR_EAGAIN;
}
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
"h2_stream(%ld-%d): read_to, len=%ld eos=%d",
c->id, stream->id, (long)*plen, *peos);
return status;
}
int h2_stream_input_is_open(const h2_stream *stream)
{
return input_open(stream);
}
apr_status_t h2_stream_submit_pushes(h2_stream *stream, h2_headers *response)
{
apr_status_t status = APR_SUCCESS;
apr_array_header_t *pushes;
int i;
pushes = h2_push_collect_update(stream, stream->request, response);
if (pushes && !apr_is_empty_array(pushes)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, stream->session->c,
"h2_stream(%ld-%d): found %d push candidates",
stream->session->id, stream->id, pushes->nelts);
for (i = 0; i < pushes->nelts; ++i) {
h2_push *push = APR_ARRAY_IDX(pushes, i, h2_push*);
h2_stream *s = h2_session_push(stream->session, stream, push);
if (!s) {
status = APR_ECONNRESET;
break;
}
}
}
return status;
}
apr_table_t *h2_stream_get_trailers(h2_stream *stream)
{
return NULL;
}
const h2_priority *h2_stream_get_priority(h2_stream *stream,
h2_headers *response)
{
if (response && stream->initiated_on) {
const char *ctype = apr_table_get(response->headers, "content-type");
if (ctype) {
/* FIXME: Not good enough, config needs to come from request->server */
return h2_config_get_priority(stream->session->config, ctype);
}
}
return NULL;
}
const char *h2_stream_state_str(h2_stream *stream)
{
switch (stream->state) {
case H2_STREAM_ST_IDLE:
return "IDLE";
case H2_STREAM_ST_OPEN:
return "OPEN";
case H2_STREAM_ST_RESV_LOCAL:
return "RESERVED_LOCAL";
case H2_STREAM_ST_RESV_REMOTE:
return "RESERVED_REMOTE";
case H2_STREAM_ST_CLOSED_INPUT:
return "HALF_CLOSED_REMOTE";
case H2_STREAM_ST_CLOSED_OUTPUT:
return "HALF_CLOSED_LOCAL";
case H2_STREAM_ST_CLOSED:
return "CLOSED";
default:
return "UNKNOWN";
}
}
int h2_stream_is_ready(h2_stream *stream)
{
if (stream->has_response) {
return 1;
}
else if (stream->out_buffer && get_first_headers_bucket(stream->out_buffer)) {
return 1;
}
return 0;
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_5366_2 |
crossvul-cpp_data_bad_3486_19 | /*
* 'traps.c' handles hardware traps and faults after we have saved some
* state in 'entry.S'.
*
* SuperH version: Copyright (C) 1999 Niibe Yutaka
* Copyright (C) 2000 Philipp Rumpf
* Copyright (C) 2000 David Howells
* Copyright (C) 2002 - 2010 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/io.h>
#include <linux/bug.h>
#include <linux/debug_locks.h>
#include <linux/kdebug.h>
#include <linux/kexec.h>
#include <linux/limits.h>
#include <linux/sysfs.h>
#include <linux/uaccess.h>
#include <linux/perf_event.h>
#include <asm/system.h>
#include <asm/alignment.h>
#include <asm/fpu.h>
#include <asm/kprobes.h>
#ifdef CONFIG_CPU_SH2
# define TRAP_RESERVED_INST 4
# define TRAP_ILLEGAL_SLOT_INST 6
# define TRAP_ADDRESS_ERROR 9
# ifdef CONFIG_CPU_SH2A
# define TRAP_UBC 12
# define TRAP_FPU_ERROR 13
# define TRAP_DIVZERO_ERROR 17
# define TRAP_DIVOVF_ERROR 18
# endif
#else
#define TRAP_RESERVED_INST 12
#define TRAP_ILLEGAL_SLOT_INST 13
#endif
static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
{
unsigned long p;
int i;
printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
for (p = bottom & ~31; p < top; ) {
printk("%04lx: ", p & 0xffff);
for (i = 0; i < 8; i++, p += 4) {
unsigned int val;
if (p < bottom || p >= top)
printk(" ");
else {
if (__get_user(val, (unsigned int __user *)p)) {
printk("\n");
return;
}
printk("%08x ", val);
}
}
printk("\n");
}
}
static DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
{
static int die_counter;
oops_enter();
spin_lock_irq(&die_lock);
console_verbose();
bust_spinlocks(1);
printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
print_modules();
show_regs(regs);
printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
task_pid_nr(current), task_stack_page(current) + 1);
if (!user_mode(regs) || in_interrupt())
dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
(unsigned long)task_stack_page(current));
notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV);
bust_spinlocks(0);
add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
oops_exit();
if (kexec_should_crash(current))
crash_kexec(regs);
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
do_exit(SIGSEGV);
}
static inline void die_if_kernel(const char *str, struct pt_regs *regs,
long err)
{
if (!user_mode(regs))
die(str, regs, err);
}
/*
* try and fix up kernelspace address errors
* - userspace errors just cause EFAULT to be returned, resulting in SEGV
* - kernel/userspace interfaces cause a jump to an appropriate handler
* - other kernel errors are bad
*/
static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
{
if (!user_mode(regs)) {
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
return;
}
die(str, regs, err);
}
}
static inline void sign_extend(unsigned int count, unsigned char *dst)
{
#ifdef __LITTLE_ENDIAN__
if ((count == 1) && dst[0] & 0x80) {
dst[1] = 0xff;
dst[2] = 0xff;
dst[3] = 0xff;
}
if ((count == 2) && dst[1] & 0x80) {
dst[2] = 0xff;
dst[3] = 0xff;
}
#else
if ((count == 1) && dst[3] & 0x80) {
dst[2] = 0xff;
dst[1] = 0xff;
dst[0] = 0xff;
}
if ((count == 2) && dst[2] & 0x80) {
dst[1] = 0xff;
dst[0] = 0xff;
}
#endif
}
static struct mem_access user_mem_access = {
copy_from_user,
copy_to_user,
};
/*
* handle an instruction that does an unaligned memory access by emulating the
* desired behaviour
* - note that PC _may not_ point to the faulting instruction
* (if that instruction is in a branch delay slot)
* - return 0 if emulation okay, -EFAULT on existential error
*/
static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
struct mem_access *ma)
{
int ret, index, count;
unsigned long *rm, *rn;
unsigned char *src, *dst;
unsigned char __user *srcu, *dstu;
index = (instruction>>8)&15; /* 0x0F00 */
rn = ®s->regs[index];
index = (instruction>>4)&15; /* 0x00F0 */
rm = ®s->regs[index];
count = 1<<(instruction&3);
switch (count) {
case 1: inc_unaligned_byte_access(); break;
case 2: inc_unaligned_word_access(); break;
case 4: inc_unaligned_dword_access(); break;
case 8: inc_unaligned_multi_access(); break;
}
ret = -EFAULT;
switch (instruction>>12) {
case 0: /* mov.[bwl] to/from memory via r0+rn */
if (instruction & 8) {
/* from memory */
srcu = (unsigned char __user *)*rm;
srcu += regs->regs[0];
dst = (unsigned char *)rn;
*(unsigned long *)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
#endif
if (ma->from(dst, srcu, count))
goto fetch_fault;
sign_extend(count, dst);
} else {
/* to memory */
src = (unsigned char *)rm;
#if !defined(__LITTLE_ENDIAN__)
src += 4-count;
#endif
dstu = (unsigned char __user *)*rn;
dstu += regs->regs[0];
if (ma->to(dstu, src, count))
goto fetch_fault;
}
ret = 0;
break;
case 1: /* mov.l Rm,@(disp,Rn) */
src = (unsigned char*) rm;
dstu = (unsigned char __user *)*rn;
dstu += (instruction&0x000F)<<2;
if (ma->to(dstu, src, 4))
goto fetch_fault;
ret = 0;
break;
case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
if (instruction & 4)
*rn -= count;
src = (unsigned char*) rm;
dstu = (unsigned char __user *)*rn;
#if !defined(__LITTLE_ENDIAN__)
src += 4-count;
#endif
if (ma->to(dstu, src, count))
goto fetch_fault;
ret = 0;
break;
case 5: /* mov.l @(disp,Rm),Rn */
srcu = (unsigned char __user *)*rm;
srcu += (instruction & 0x000F) << 2;
dst = (unsigned char *)rn;
*(unsigned long *)dst = 0;
if (ma->from(dst, srcu, 4))
goto fetch_fault;
ret = 0;
break;
case 6: /* mov.[bwl] from memory, possibly with post-increment */
srcu = (unsigned char __user *)*rm;
if (instruction & 4)
*rm += count;
dst = (unsigned char*) rn;
*(unsigned long*)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
#endif
if (ma->from(dst, srcu, count))
goto fetch_fault;
sign_extend(count, dst);
ret = 0;
break;
case 8:
switch ((instruction&0xFF00)>>8) {
case 0x81: /* mov.w R0,@(disp,Rn) */
src = (unsigned char *) ®s->regs[0];
#if !defined(__LITTLE_ENDIAN__)
src += 2;
#endif
dstu = (unsigned char __user *)*rm; /* called Rn in the spec */
dstu += (instruction & 0x000F) << 1;
if (ma->to(dstu, src, 2))
goto fetch_fault;
ret = 0;
break;
case 0x85: /* mov.w @(disp,Rm),R0 */
srcu = (unsigned char __user *)*rm;
srcu += (instruction & 0x000F) << 1;
dst = (unsigned char *) ®s->regs[0];
*(unsigned long *)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 2;
#endif
if (ma->from(dst, srcu, 2))
goto fetch_fault;
sign_extend(2, dst);
ret = 0;
break;
}
break;
}
return ret;
fetch_fault:
/* Argh. Address not only misaligned but also non-existent.
* Raise an EFAULT and see if it's trapped
*/
die_if_no_fixup("Fault in unaligned fixup", regs, 0);
return -EFAULT;
}
/*
* emulate the instruction in the delay slot
* - fetches the instruction from PC+2
*/
static inline int handle_delayslot(struct pt_regs *regs,
insn_size_t old_instruction,
struct mem_access *ma)
{
insn_size_t instruction;
void __user *addr = (void __user *)(regs->pc +
instruction_size(old_instruction));
if (copy_from_user(&instruction, addr, sizeof(instruction))) {
/* the instruction-fetch faulted */
if (user_mode(regs))
return -EFAULT;
/* kernel */
die("delay-slot-insn faulting in handle_unaligned_delayslot",
regs, 0);
}
return handle_unaligned_ins(instruction, regs, ma);
}
/*
* handle an instruction that does an unaligned memory access
* - have to be careful of branch delay-slot instructions that fault
* SH3:
* - if the branch would be taken PC points to the branch
* - if the branch would not be taken, PC points to delay-slot
* SH4:
* - PC always points to delayed branch
* - return 0 if handled, -EFAULT if failed (may not return if in kernel)
*/
/* Macros to determine offset from current PC for branch instructions */
/* Explicit type coercion is used to force sign extension where needed */
#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
struct mem_access *ma, int expected,
unsigned long address)
{
u_int rm;
int ret, index;
/*
* XXX: We can't handle mixed 16/32-bit instructions yet
*/
if (instruction_size(instruction) != 2)
return -EINVAL;
index = (instruction>>8)&15; /* 0x0F00 */
rm = regs->regs[index];
/*
* Log the unexpected fixups, and then pass them on to perf.
*
* We intentionally don't report the expected cases to perf as
* otherwise the trapped I/O case will skew the results too much
* to be useful.
*/
if (!expected) {
unaligned_fixups_notify(current, instruction, regs);
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0,
regs, address);
}
ret = -EFAULT;
switch (instruction&0xF000) {
case 0x0000:
if (instruction==0x000B) {
/* rts */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc = regs->pr;
}
else if ((instruction&0x00FF)==0x0023) {
/* braf @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc += rm + 4;
}
else if ((instruction&0x00FF)==0x0003) {
/* bsrf @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc += rm + 4;
}
}
else {
/* mov.[bwl] to/from memory via r0+rn */
goto simple;
}
break;
case 0x1000: /* mov.l Rm,@(disp,Rn) */
goto simple;
case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
goto simple;
case 0x4000:
if ((instruction&0x00FF)==0x002B) {
/* jmp @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc = rm;
}
else if ((instruction&0x00FF)==0x000B) {
/* jsr @Rm */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc = rm;
}
}
else {
/* mov.[bwl] to/from memory via r0+rn */
goto simple;
}
break;
case 0x5000: /* mov.l @(disp,Rm),Rn */
goto simple;
case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
goto simple;
case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
switch (instruction&0x0F00) {
case 0x0100: /* mov.w R0,@(disp,Rm) */
goto simple;
case 0x0500: /* mov.w @(disp,Rm),R0 */
goto simple;
case 0x0B00: /* bf lab - no delayslot*/
break;
case 0x0F00: /* bf/s lab */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
if ((regs->sr & 0x00000001) != 0)
regs->pc += 4; /* next after slot */
else
#endif
regs->pc += SH_PC_8BIT_OFFSET(instruction);
}
break;
case 0x0900: /* bt lab - no delayslot */
break;
case 0x0D00: /* bt/s lab */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
if ((regs->sr & 0x00000001) == 0)
regs->pc += 4; /* next after slot */
else
#endif
regs->pc += SH_PC_8BIT_OFFSET(instruction);
}
break;
}
break;
case 0xA000: /* bra label */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0)
regs->pc += SH_PC_12BIT_OFFSET(instruction);
break;
case 0xB000: /* bsr label */
ret = handle_delayslot(regs, instruction, ma);
if (ret==0) {
regs->pr = regs->pc + 4;
regs->pc += SH_PC_12BIT_OFFSET(instruction);
}
break;
}
return ret;
/* handle non-delay-slot instruction */
simple:
ret = handle_unaligned_ins(instruction, regs, ma);
if (ret==0)
regs->pc += instruction_size(instruction);
return ret;
}
/*
* Handle various address error exceptions:
* - instruction address error:
* misaligned PC
* PC >= 0x80000000 in user mode
* - data address error (read and write)
* misaligned data access
* access to >= 0x80000000 is user mode
* Unfortuntaly we can't distinguish between instruction address error
* and data address errors caused by read accesses.
*/
asmlinkage void do_address_error(struct pt_regs *regs,
unsigned long writeaccess,
unsigned long address)
{
unsigned long error_code = 0;
mm_segment_t oldfs;
siginfo_t info;
insn_size_t instruction;
int tmp;
/* Intentional ifdef */
#ifdef CONFIG_CPU_HAS_SR_RB
error_code = lookup_exception_vector();
#endif
oldfs = get_fs();
if (user_mode(regs)) {
int si_code = BUS_ADRERR;
unsigned int user_action;
local_irq_enable();
inc_unaligned_user_access();
set_fs(USER_DS);
if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1),
sizeof(instruction))) {
set_fs(oldfs);
goto uspace_segv;
}
set_fs(oldfs);
/* shout about userspace fixups */
unaligned_fixups_notify(current, instruction, regs);
user_action = unaligned_user_action();
if (user_action & UM_FIXUP)
goto fixup;
if (user_action & UM_SIGNAL)
goto uspace_segv;
else {
/* ignore */
regs->pc += instruction_size(instruction);
return;
}
fixup:
/* bad PC is not something we can fix */
if (regs->pc & 1) {
si_code = BUS_ADRALN;
goto uspace_segv;
}
set_fs(USER_DS);
tmp = handle_unaligned_access(instruction, regs,
&user_mem_access, 0,
address);
set_fs(oldfs);
if (tmp == 0)
return; /* sorted */
uspace_segv:
printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
"access (PC %lx PR %lx)\n", current->comm, regs->pc,
regs->pr);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = si_code;
info.si_addr = (void __user *)address;
force_sig_info(SIGBUS, &info, current);
} else {
inc_unaligned_kernel_access();
if (regs->pc & 1)
die("unaligned program counter", regs, error_code);
set_fs(KERNEL_DS);
if (copy_from_user(&instruction, (void __user *)(regs->pc),
sizeof(instruction))) {
/* Argh. Fault on the instruction itself.
This should never happen non-SMP
*/
set_fs(oldfs);
die("insn faulting in do_address_error", regs, 0);
}
unaligned_fixups_notify(current, instruction, regs);
handle_unaligned_access(instruction, regs, &user_mem_access,
0, address);
set_fs(oldfs);
}
}
#ifdef CONFIG_SH_DSP
/*
* SH-DSP support gerg@snapgear.com.
*/
int is_dsp_inst(struct pt_regs *regs)
{
unsigned short inst = 0;
/*
* Safe guard if DSP mode is already enabled or we're lacking
* the DSP altogether.
*/
if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
return 0;
get_user(inst, ((unsigned short *) regs->pc));
inst &= 0xf000;
/* Check for any type of DSP or support instruction */
if ((inst == 0xf000) || (inst == 0x4000))
return 1;
return 0;
}
#else
#define is_dsp_inst(regs) (0)
#endif /* CONFIG_SH_DSP */
#ifdef CONFIG_CPU_SH2A
asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
siginfo_t info;
switch (r4) {
case TRAP_DIVZERO_ERROR:
info.si_code = FPE_INTDIV;
break;
case TRAP_DIVOVF_ERROR:
info.si_code = FPE_INTOVF;
break;
}
force_sig_info(SIGFPE, &info, current);
}
#endif
asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
unsigned long error_code;
struct task_struct *tsk = current;
#ifdef CONFIG_SH_FPU_EMU
unsigned short inst = 0;
int err;
get_user(inst, (unsigned short*)regs->pc);
err = do_fpu_inst(inst, regs);
if (!err) {
regs->pc += instruction_size(inst);
return;
}
/* not a FPU inst. */
#endif
#ifdef CONFIG_SH_DSP
/* Check if it's a DSP instruction */
if (is_dsp_inst(regs)) {
/* Enable DSP mode, and restart instruction. */
regs->sr |= SR_DSP;
/* Save DSP mode */
tsk->thread.dsp_status.status |= SR_DSP;
return;
}
#endif
error_code = lookup_exception_vector();
local_irq_enable();
force_sig(SIGILL, tsk);
die_if_no_fixup("reserved instruction", regs, error_code);
}
#ifdef CONFIG_SH_FPU_EMU
static int emulate_branch(unsigned short inst, struct pt_regs *regs)
{
/*
* bfs: 8fxx: PC+=d*2+4;
* bts: 8dxx: PC+=d*2+4;
* bra: axxx: PC+=D*2+4;
* bsr: bxxx: PC+=D*2+4 after PR=PC+4;
* braf:0x23: PC+=Rn*2+4;
* bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
* jmp: 4x2b: PC=Rn;
* jsr: 4x0b: PC=Rn after PR=PC+4;
* rts: 000b: PC=PR;
*/
if (((inst & 0xf000) == 0xb000) || /* bsr */
((inst & 0xf0ff) == 0x0003) || /* bsrf */
((inst & 0xf0ff) == 0x400b)) /* jsr */
regs->pr = regs->pc + 4;
if ((inst & 0xfd00) == 0x8d00) { /* bfs, bts */
regs->pc += SH_PC_8BIT_OFFSET(inst);
return 0;
}
if ((inst & 0xe000) == 0xa000) { /* bra, bsr */
regs->pc += SH_PC_12BIT_OFFSET(inst);
return 0;
}
if ((inst & 0xf0df) == 0x0003) { /* braf, bsrf */
regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
return 0;
}
if ((inst & 0xf0df) == 0x400b) { /* jmp, jsr */
regs->pc = regs->regs[(inst & 0x0f00) >> 8];
return 0;
}
if ((inst & 0xffff) == 0x000b) { /* rts */
regs->pc = regs->pr;
return 0;
}
return 1;
}
#endif
asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
unsigned long inst;
struct task_struct *tsk = current;
if (kprobe_handle_illslot(regs->pc) == 0)
return;
#ifdef CONFIG_SH_FPU_EMU
get_user(inst, (unsigned short *)regs->pc + 1);
if (!do_fpu_inst(inst, regs)) {
get_user(inst, (unsigned short *)regs->pc);
if (!emulate_branch(inst, regs))
return;
/* fault in branch.*/
}
/* not a FPU inst. */
#endif
inst = lookup_exception_vector();
local_irq_enable();
force_sig(SIGILL, tsk);
die_if_no_fixup("illegal slot instruction", regs, inst);
}
asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs __regs)
{
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
long ex;
ex = lookup_exception_vector();
die_if_kernel("exception", regs, ex);
}
void __cpuinit per_cpu_trap_init(void)
{
extern void *vbr_base;
/* NOTE: The VBR value should be at P1
(or P2, virtural "fixed" address space).
It's definitely should not in physical address. */
asm volatile("ldc %0, vbr"
: /* no output */
: "r" (&vbr_base)
: "memory");
/* disable exception blocking now when the vbr has been setup */
clear_bl_bit();
}
void *set_exception_table_vec(unsigned int vec, void *handler)
{
extern void *exception_handling_table[];
void *old_handler;
old_handler = exception_handling_table[vec];
exception_handling_table[vec] = handler;
return old_handler;
}
void __init trap_init(void)
{
set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
defined(CONFIG_SH_FPU_EMU)
/*
* For SH-4 lacking an FPU, treat floating point instructions as
* reserved. They'll be handled in the math-emu case, or faulted on
* otherwise.
*/
set_exception_table_evt(0x800, do_reserved_inst);
set_exception_table_evt(0x820, do_illegal_slot_inst);
#elif defined(CONFIG_SH_FPU)
set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
#endif
#ifdef CONFIG_CPU_SH2
set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
#endif
#ifdef CONFIG_CPU_SH2A
set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
#ifdef CONFIG_SH_FPU
set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler);
#endif
#endif
#ifdef TRAP_UBC
set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
#endif
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
{
unsigned long stack;
if (!tsk)
tsk = current;
if (tsk == current)
sp = (unsigned long *)current_stack_pointer;
else
sp = (unsigned long *)tsk->thread.sp;
stack = (unsigned long)sp;
dump_mem("Stack: ", stack, THREAD_SIZE +
(unsigned long)task_stack_page(tsk));
show_trace(tsk, sp, NULL);
}
void dump_stack(void)
{
show_stack(NULL, NULL);
}
EXPORT_SYMBOL(dump_stack);
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3486_19 |
crossvul-cpp_data_good_1716_0 | /*
* Performance counter callchain support - powerpc architecture code
*
* Copyright © 2009 Paul Mackerras, IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <asm/ptrace.h>
#include <asm/pgtable.h>
#include <asm/sigcontext.h>
#include <asm/ucontext.h>
#include <asm/vdso.h>
#ifdef CONFIG_PPC64
#include "../kernel/ppc32.h"
#endif
/*
* Is sp valid as the address of the next kernel stack frame after prev_sp?
* The next frame may be in a different stack area but should not go
* back down in the same stack area.
*/
static int valid_next_sp(unsigned long sp, unsigned long prev_sp)
{
if (sp & 0xf)
return 0; /* must be 16-byte aligned */
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return 0;
if (sp >= prev_sp + STACK_FRAME_MIN_SIZE)
return 1;
/*
* sp could decrease when we jump off an interrupt stack
* back to the regular process stack.
*/
if ((sp & ~(THREAD_SIZE - 1)) != (prev_sp & ~(THREAD_SIZE - 1)))
return 1;
return 0;
}
void
perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
unsigned long sp, next_sp;
unsigned long next_ip;
unsigned long lr;
long level = 0;
unsigned long *fp;
lr = regs->link;
sp = regs->gpr[1];
perf_callchain_store(entry, perf_instruction_pointer(regs));
if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD))
return;
for (;;) {
fp = (unsigned long *) sp;
next_sp = fp[0];
if (next_sp == sp + STACK_INT_FRAME_SIZE &&
fp[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
/*
* This looks like an interrupt frame for an
* interrupt that occurred in the kernel
*/
regs = (struct pt_regs *)(sp + STACK_FRAME_OVERHEAD);
next_ip = regs->nip;
lr = regs->link;
level = 0;
perf_callchain_store(entry, PERF_CONTEXT_KERNEL);
} else {
if (level == 0)
next_ip = lr;
else
next_ip = fp[STACK_FRAME_LR_SAVE];
/*
* We can't tell which of the first two addresses
* we get are valid, but we can filter out the
* obviously bogus ones here. We replace them
* with 0 rather than removing them entirely so
* that userspace can tell which is which.
*/
if ((level == 1 && next_ip == lr) ||
(level <= 1 && !kernel_text_address(next_ip)))
next_ip = 0;
++level;
}
perf_callchain_store(entry, next_ip);
if (!valid_next_sp(next_sp, sp))
return;
sp = next_sp;
}
}
#ifdef CONFIG_PPC64
/*
* On 64-bit we don't want to invoke hash_page on user addresses from
* interrupt context, so if the access faults, we read the page tables
* to find which page (if any) is mapped and access it directly.
*/
static int read_user_stack_slow(void __user *ptr, void *ret, int nb)
{
pgd_t *pgdir;
pte_t *ptep, pte;
unsigned shift;
unsigned long addr = (unsigned long) ptr;
unsigned long offset;
unsigned long pfn;
void *kaddr;
pgdir = current->mm->pgd;
if (!pgdir)
return -EFAULT;
ptep = find_linux_pte_or_hugepte(pgdir, addr, &shift);
if (!shift)
shift = PAGE_SHIFT;
/* align address to page boundary */
offset = addr & ((1UL << shift) - 1);
addr -= offset;
if (ptep == NULL)
return -EFAULT;
pte = *ptep;
if (!pte_present(pte) || !(pte_val(pte) & _PAGE_USER))
return -EFAULT;
pfn = pte_pfn(pte);
if (!page_is_ram(pfn))
return -EFAULT;
/* no highmem to worry about here */
kaddr = pfn_to_kaddr(pfn);
memcpy(ret, kaddr + offset, nb);
return 0;
}
static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret)
{
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned long) ||
((unsigned long)ptr & 7))
return -EFAULT;
pagefault_disable();
if (!__get_user_inatomic(*ret, ptr)) {
pagefault_enable();
return 0;
}
pagefault_enable();
return read_user_stack_slow(ptr, ret, 8);
}
static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
{
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
((unsigned long)ptr & 3))
return -EFAULT;
pagefault_disable();
if (!__get_user_inatomic(*ret, ptr)) {
pagefault_enable();
return 0;
}
pagefault_enable();
return read_user_stack_slow(ptr, ret, 4);
}
static inline int valid_user_sp(unsigned long sp, int is_64)
{
if (!sp || (sp & 7) || sp > (is_64 ? TASK_SIZE : 0x100000000UL) - 32)
return 0;
return 1;
}
/*
* 64-bit user processes use the same stack frame for RT and non-RT signals.
*/
struct signal_frame_64 {
char dummy[__SIGNAL_FRAMESIZE];
struct ucontext uc;
unsigned long unused[2];
unsigned int tramp[6];
struct siginfo *pinfo;
void *puc;
struct siginfo info;
char abigap[288];
};
static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
{
if (nip == fp + offsetof(struct signal_frame_64, tramp))
return 1;
if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
return 1;
return 0;
}
/*
* Do some sanity checking on the signal frame pointed to by sp.
* We check the pinfo and puc pointers in the frame.
*/
static int sane_signal_64_frame(unsigned long sp)
{
struct signal_frame_64 __user *sf;
unsigned long pinfo, puc;
sf = (struct signal_frame_64 __user *) sp;
if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
return 0;
return pinfo == (unsigned long) &sf->info &&
puc == (unsigned long) &sf->uc;
}
static void perf_callchain_user_64(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long sp, next_sp;
unsigned long next_ip;
unsigned long lr;
long level = 0;
struct signal_frame_64 __user *sigframe;
unsigned long __user *fp, *uregs;
next_ip = perf_instruction_pointer(regs);
lr = regs->link;
sp = regs->gpr[1];
perf_callchain_store(entry, next_ip);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
fp = (unsigned long __user *) sp;
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
return;
if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
return;
/*
* Note: the next_sp - sp >= signal frame size check
* is true when next_sp < sp, which can happen when
* transitioning from an alternate signal stack to the
* normal stack.
*/
if (next_sp - sp >= sizeof(struct signal_frame_64) &&
(is_sigreturn_64_address(next_ip, sp) ||
(level <= 1 && is_sigreturn_64_address(lr, sp))) &&
sane_signal_64_frame(sp)) {
/*
* This looks like an signal frame
*/
sigframe = (struct signal_frame_64 __user *) sp;
uregs = sigframe->uc.uc_mcontext.gp_regs;
if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
read_user_stack_64(&uregs[PT_LNK], &lr) ||
read_user_stack_64(&uregs[PT_R1], &sp))
return;
level = 0;
perf_callchain_store(entry, PERF_CONTEXT_USER);
perf_callchain_store(entry, next_ip);
continue;
}
if (level == 0)
next_ip = lr;
perf_callchain_store(entry, next_ip);
++level;
sp = next_sp;
}
}
static inline int current_is_64bit(void)
{
/*
* We can't use test_thread_flag() here because we may be on an
* interrupt stack, and the thread flags don't get copied over
* from the thread_info on the main stack to the interrupt stack.
*/
return !test_ti_thread_flag(task_thread_info(current), TIF_32BIT);
}
#else /* CONFIG_PPC64 */
/*
* On 32-bit we just access the address and let hash_page create a
* HPTE if necessary, so there is no need to fall back to reading
* the page tables. Since this is called at interrupt level,
* do_page_fault() won't treat a DSI as a page fault.
*/
static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret)
{
int rc;
if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) ||
((unsigned long)ptr & 3))
return -EFAULT;
pagefault_disable();
rc = __get_user_inatomic(*ret, ptr);
pagefault_enable();
return rc;
}
static inline void perf_callchain_user_64(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
}
static inline int current_is_64bit(void)
{
return 0;
}
static inline int valid_user_sp(unsigned long sp, int is_64)
{
if (!sp || (sp & 7) || sp > TASK_SIZE - 32)
return 0;
return 1;
}
#define __SIGNAL_FRAMESIZE32 __SIGNAL_FRAMESIZE
#define sigcontext32 sigcontext
#define mcontext32 mcontext
#define ucontext32 ucontext
#define compat_siginfo_t struct siginfo
#endif /* CONFIG_PPC64 */
/*
* Layout for non-RT signal frames
*/
struct signal_frame_32 {
char dummy[__SIGNAL_FRAMESIZE32];
struct sigcontext32 sctx;
struct mcontext32 mctx;
int abigap[56];
};
/*
* Layout for RT signal frames
*/
struct rt_signal_frame_32 {
char dummy[__SIGNAL_FRAMESIZE32 + 16];
compat_siginfo_t info;
struct ucontext32 uc;
int abigap[56];
};
static int is_sigreturn_32_address(unsigned int nip, unsigned int fp)
{
if (nip == fp + offsetof(struct signal_frame_32, mctx.mc_pad))
return 1;
if (vdso32_sigtramp && current->mm->context.vdso_base &&
nip == current->mm->context.vdso_base + vdso32_sigtramp)
return 1;
return 0;
}
static int is_rt_sigreturn_32_address(unsigned int nip, unsigned int fp)
{
if (nip == fp + offsetof(struct rt_signal_frame_32,
uc.uc_mcontext.mc_pad))
return 1;
if (vdso32_rt_sigtramp && current->mm->context.vdso_base &&
nip == current->mm->context.vdso_base + vdso32_rt_sigtramp)
return 1;
return 0;
}
static int sane_signal_32_frame(unsigned int sp)
{
struct signal_frame_32 __user *sf;
unsigned int regs;
sf = (struct signal_frame_32 __user *) (unsigned long) sp;
if (read_user_stack_32((unsigned int __user *) &sf->sctx.regs, ®s))
return 0;
return regs == (unsigned long) &sf->mctx;
}
static int sane_rt_signal_32_frame(unsigned int sp)
{
struct rt_signal_frame_32 __user *sf;
unsigned int regs;
sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
if (read_user_stack_32((unsigned int __user *) &sf->uc.uc_regs, ®s))
return 0;
return regs == (unsigned long) &sf->uc.uc_mcontext;
}
static unsigned int __user *signal_frame_32_regs(unsigned int sp,
unsigned int next_sp, unsigned int next_ip)
{
struct mcontext32 __user *mctx = NULL;
struct signal_frame_32 __user *sf;
struct rt_signal_frame_32 __user *rt_sf;
/*
* Note: the next_sp - sp >= signal frame size check
* is true when next_sp < sp, for example, when
* transitioning from an alternate signal stack to the
* normal stack.
*/
if (next_sp - sp >= sizeof(struct signal_frame_32) &&
is_sigreturn_32_address(next_ip, sp) &&
sane_signal_32_frame(sp)) {
sf = (struct signal_frame_32 __user *) (unsigned long) sp;
mctx = &sf->mctx;
}
if (!mctx && next_sp - sp >= sizeof(struct rt_signal_frame_32) &&
is_rt_sigreturn_32_address(next_ip, sp) &&
sane_rt_signal_32_frame(sp)) {
rt_sf = (struct rt_signal_frame_32 __user *) (unsigned long) sp;
mctx = &rt_sf->uc.uc_mcontext;
}
if (!mctx)
return NULL;
return mctx->mc_gregs;
}
static void perf_callchain_user_32(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned int sp, next_sp;
unsigned int next_ip;
unsigned int lr;
long level = 0;
unsigned int __user *fp, *uregs;
next_ip = perf_instruction_pointer(regs);
lr = regs->link;
sp = regs->gpr[1];
perf_callchain_store(entry, next_ip);
while (entry->nr < PERF_MAX_STACK_DEPTH) {
fp = (unsigned int __user *) (unsigned long) sp;
if (!valid_user_sp(sp, 0) || read_user_stack_32(fp, &next_sp))
return;
if (level > 0 && read_user_stack_32(&fp[1], &next_ip))
return;
uregs = signal_frame_32_regs(sp, next_sp, next_ip);
if (!uregs && level <= 1)
uregs = signal_frame_32_regs(sp, next_sp, lr);
if (uregs) {
/*
* This looks like an signal frame, so restart
* the stack trace with the values in it.
*/
if (read_user_stack_32(&uregs[PT_NIP], &next_ip) ||
read_user_stack_32(&uregs[PT_LNK], &lr) ||
read_user_stack_32(&uregs[PT_R1], &sp))
return;
level = 0;
perf_callchain_store(entry, PERF_CONTEXT_USER);
perf_callchain_store(entry, next_ip);
continue;
}
if (level == 0)
next_ip = lr;
perf_callchain_store(entry, next_ip);
++level;
sp = next_sp;
}
}
void
perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
{
if (current_is_64bit())
perf_callchain_user_64(entry, regs);
else
perf_callchain_user_32(entry, regs);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_1716_0 |
crossvul-cpp_data_bad_3486_18 | /*
* SuperH process tracing
*
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
* Copyright (C) 2002 - 2009 Paul Mundt
*
* Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/io.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <linux/tracehook.h>
#include <linux/elf.h>
#include <linux/regset.h>
#include <linux/hw_breakpoint.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
#include <asm/fpu.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/*
* This routine will get a word off of the process kernel stack.
*/
static inline int get_stack_long(struct task_struct *task, int offset)
{
unsigned char *stack;
stack = (unsigned char *)task_pt_regs(task);
stack += offset;
return (*((int *)stack));
}
/*
* This routine will put a word on the process kernel stack.
*/
static inline int put_stack_long(struct task_struct *task, int offset,
unsigned long data)
{
unsigned char *stack;
stack = (unsigned char *)task_pt_regs(task);
stack += offset;
*(unsigned long *) stack = data;
return 0;
}
void ptrace_triggered(struct perf_event *bp, int nmi,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
/*
* Disable the breakpoint request here since ptrace has defined a
* one-shot behaviour for breakpoint exceptions.
*/
attr = bp->attr;
attr.disabled = true;
modify_user_hw_breakpoint(bp, &attr);
}
static int set_single_step(struct task_struct *tsk, unsigned long addr)
{
struct thread_struct *thread = &tsk->thread;
struct perf_event *bp;
struct perf_event_attr attr;
bp = thread->ptrace_bps[0];
if (!bp) {
ptrace_breakpoint_init(&attr);
attr.bp_addr = addr;
attr.bp_len = HW_BREAKPOINT_LEN_2;
attr.bp_type = HW_BREAKPOINT_R;
bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
if (IS_ERR(bp))
return PTR_ERR(bp);
thread->ptrace_bps[0] = bp;
} else {
int err;
attr = bp->attr;
attr.bp_addr = addr;
/* reenable breakpoint */
attr.disabled = false;
err = modify_user_hw_breakpoint(bp, &attr);
if (unlikely(err))
return err;
}
return 0;
}
void user_enable_single_step(struct task_struct *child)
{
unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
set_tsk_thread_flag(child, TIF_SINGLESTEP);
if (ptrace_get_breakpoints(child) < 0)
return;
set_single_step(child, pc);
ptrace_put_breakpoints(child);
}
void user_disable_single_step(struct task_struct *child)
{
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
}
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_regs *regs = task_pt_regs(target);
int ret;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs->regs,
0, 16 * sizeof(unsigned long));
if (!ret)
/* PC, PR, SR, GBR, MACH, MACL, TRA */
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
®s->pc,
offsetof(struct pt_regs, pc),
sizeof(struct pt_regs));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->regs,
0, 16 * sizeof(unsigned long));
if (!ret && count > 0)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->pc,
offsetof(struct pt_regs, pc),
sizeof(struct pt_regs));
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
#ifdef CONFIG_SH_FPU
int fpregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
if ((boot_cpu_data.flags & CPU_HAS_FPU))
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->hardfpu, 0, -1);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->softfpu, 0, -1);
}
static int fpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
set_stopped_child_used_math(target);
if ((boot_cpu_data.flags & CPU_HAS_FPU))
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->hardfpu, 0, -1);
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->softfpu, 0, -1);
}
static int fpregs_active(struct task_struct *target,
const struct user_regset *regset)
{
return tsk_used_math(target) ? regset->n : 0;
}
#endif
#ifdef CONFIG_SH_DSP
static int dspregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_dspregs *regs =
(struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
int ret;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
0, sizeof(struct pt_dspregs));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_dspregs), -1);
return ret;
}
static int dspregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_dspregs *regs =
(struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
0, sizeof(struct pt_dspregs));
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_dspregs), -1);
return ret;
}
static int dspregs_active(struct task_struct *target,
const struct user_regset *regset)
{
struct pt_regs *regs = task_pt_regs(target);
return regs->sr & SR_DSP ? regset->n : 0;
}
#endif
const struct pt_regs_offset regoffset_table[] = {
REGS_OFFSET_NAME(0),
REGS_OFFSET_NAME(1),
REGS_OFFSET_NAME(2),
REGS_OFFSET_NAME(3),
REGS_OFFSET_NAME(4),
REGS_OFFSET_NAME(5),
REGS_OFFSET_NAME(6),
REGS_OFFSET_NAME(7),
REGS_OFFSET_NAME(8),
REGS_OFFSET_NAME(9),
REGS_OFFSET_NAME(10),
REGS_OFFSET_NAME(11),
REGS_OFFSET_NAME(12),
REGS_OFFSET_NAME(13),
REGS_OFFSET_NAME(14),
REGS_OFFSET_NAME(15),
REG_OFFSET_NAME(pc),
REG_OFFSET_NAME(pr),
REG_OFFSET_NAME(sr),
REG_OFFSET_NAME(gbr),
REG_OFFSET_NAME(mach),
REG_OFFSET_NAME(macl),
REG_OFFSET_NAME(tra),
REG_OFFSET_END,
};
/*
* These are our native regset flavours.
*/
enum sh_regset {
REGSET_GENERAL,
#ifdef CONFIG_SH_FPU
REGSET_FPU,
#endif
#ifdef CONFIG_SH_DSP
REGSET_DSP,
#endif
};
static const struct user_regset sh_regsets[] = {
/*
* Format is:
* R0 --> R15
* PC, PR, SR, GBR, MACH, MACL, TRA
*/
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(long),
.align = sizeof(long),
.get = genregs_get,
.set = genregs_set,
},
#ifdef CONFIG_SH_FPU
[REGSET_FPU] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_fpu_struct) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.get = fpregs_get,
.set = fpregs_set,
.active = fpregs_active,
},
#endif
#ifdef CONFIG_SH_DSP
[REGSET_DSP] = {
.n = sizeof(struct pt_dspregs) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.get = dspregs_get,
.set = dspregs_set,
.active = dspregs_active,
},
#endif
};
static const struct user_regset_view user_sh_native_view = {
.name = "sh",
.e_machine = EM_SH,
.regsets = sh_regsets,
.n = ARRAY_SIZE(sh_regsets),
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_sh_native_view;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
unsigned long __user *datap = (unsigned long __user *)data;
int ret;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
ret = -EIO;
if ((addr & 3) || addr < 0 ||
addr > sizeof(struct user) - 3)
break;
if (addr < sizeof(struct pt_regs))
tmp = get_stack_long(child, addr);
else if (addr >= offsetof(struct user, fpu) &&
addr < offsetof(struct user, u_fpvalid)) {
if (!tsk_used_math(child)) {
if (addr == offsetof(struct user, fpu.fpscr))
tmp = FPSCR_INIT;
else
tmp = 0;
} else {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
tmp = ((unsigned long *)child->thread.xstate)
[index >> 2];
}
} else if (addr == offsetof(struct user, u_fpvalid))
tmp = !!tsk_used_math(child);
else if (addr == PT_TEXT_ADDR)
tmp = child->mm->start_code;
else if (addr == PT_DATA_ADDR)
tmp = child->mm->start_data;
else if (addr == PT_TEXT_END_ADDR)
tmp = child->mm->end_code;
else if (addr == PT_TEXT_LEN)
tmp = child->mm->end_code - child->mm->start_code;
else
tmp = 0;
ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
if ((addr & 3) || addr < 0 ||
addr > sizeof(struct user) - 3)
break;
if (addr < sizeof(struct pt_regs))
ret = put_stack_long(child, addr, data);
else if (addr >= offsetof(struct user, fpu) &&
addr < offsetof(struct user, u_fpvalid)) {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
set_stopped_child_used_math(child);
((unsigned long *)child->thread.xstate)
[index >> 2] = data;
ret = 0;
} else if (addr == offsetof(struct user, u_fpvalid)) {
conditional_stopped_child_used_math(data, child);
ret = 0;
}
break;
case PTRACE_GETREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
case PTRACE_SETREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
#ifdef CONFIG_SH_FPU
case PTRACE_GETFPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
case PTRACE_SETFPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
#endif
#ifdef CONFIG_SH_DSP
case PTRACE_GETDSPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
datap);
case PTRACE_SETDSPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
datap);
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
static inline int audit_arch(void)
{
int arch = EM_SH;
#ifdef CONFIG_CPU_LITTLE_ENDIAN
arch |= __AUDIT_ARCH_LE;
#endif
return arch;
}
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
secure_computing(regs->regs[0]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
/*
* Tracing decided this syscall should not happen.
* We'll return a bogus call number to get an ENOSYS
* error, but leave the original number in regs->regs[0].
*/
ret = -1L;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[0]);
if (unlikely(current->audit_context))
audit_syscall_entry(audit_arch(), regs->regs[3],
regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
return ret ?: regs->regs[0];
}
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
regs->regs[0]);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->regs[0]);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3486_18 |
crossvul-cpp_data_bad_2372_0 | /*
* linux/fs/isofs/rock.c
*
* (C) 1992, 1993 Eric Youngdale
*
* Rock Ridge Extensions to iso9660
*/
#include <linux/slab.h>
#include <linux/pagemap.h>
#include "isofs.h"
#include "rock.h"
/*
* These functions are designed to read the system areas of a directory record
* and extract relevant information. There are different functions provided
* depending upon what information we need at the time. One function fills
* out an inode structure, a second one extracts a filename, a third one
* returns a symbolic link name, and a fourth one returns the extent number
* for the file.
*/
#define SIG(A,B) ((A) | ((B) << 8)) /* isonum_721() */
struct rock_state {
void *buffer;
unsigned char *chr;
int len;
int cont_size;
int cont_extent;
int cont_offset;
struct inode *inode;
};
/*
* This is a way of ensuring that we have something in the system
* use fields that is compatible with Rock Ridge. Return zero on success.
*/
static int check_sp(struct rock_ridge *rr, struct inode *inode)
{
if (rr->u.SP.magic[0] != 0xbe)
return -1;
if (rr->u.SP.magic[1] != 0xef)
return -1;
ISOFS_SB(inode->i_sb)->s_rock_offset = rr->u.SP.skip;
return 0;
}
static void setup_rock_ridge(struct iso_directory_record *de,
struct inode *inode, struct rock_state *rs)
{
rs->len = sizeof(struct iso_directory_record) + de->name_len[0];
if (rs->len & 1)
(rs->len)++;
rs->chr = (unsigned char *)de + rs->len;
rs->len = *((unsigned char *)de) - rs->len;
if (rs->len < 0)
rs->len = 0;
if (ISOFS_SB(inode->i_sb)->s_rock_offset != -1) {
rs->len -= ISOFS_SB(inode->i_sb)->s_rock_offset;
rs->chr += ISOFS_SB(inode->i_sb)->s_rock_offset;
if (rs->len < 0)
rs->len = 0;
}
}
static void init_rock_state(struct rock_state *rs, struct inode *inode)
{
memset(rs, 0, sizeof(*rs));
rs->inode = inode;
}
/*
* Returns 0 if the caller should continue scanning, 1 if the scan must end
* and -ve on error.
*/
static int rock_continue(struct rock_state *rs)
{
int ret = 1;
int blocksize = 1 << rs->inode->i_blkbits;
const int min_de_size = offsetof(struct rock_ridge, u);
kfree(rs->buffer);
rs->buffer = NULL;
if ((unsigned)rs->cont_offset > blocksize - min_de_size ||
(unsigned)rs->cont_size > blocksize ||
(unsigned)(rs->cont_offset + rs->cont_size) > blocksize) {
printk(KERN_NOTICE "rock: corrupted directory entry. "
"extent=%d, offset=%d, size=%d\n",
rs->cont_extent, rs->cont_offset, rs->cont_size);
ret = -EIO;
goto out;
}
if (rs->cont_extent) {
struct buffer_head *bh;
rs->buffer = kmalloc(rs->cont_size, GFP_KERNEL);
if (!rs->buffer) {
ret = -ENOMEM;
goto out;
}
ret = -EIO;
bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
if (bh) {
memcpy(rs->buffer, bh->b_data + rs->cont_offset,
rs->cont_size);
put_bh(bh);
rs->chr = rs->buffer;
rs->len = rs->cont_size;
rs->cont_extent = 0;
rs->cont_size = 0;
rs->cont_offset = 0;
return 0;
}
printk("Unable to read rock-ridge attributes\n");
}
out:
kfree(rs->buffer);
rs->buffer = NULL;
return ret;
}
/*
* We think there's a record of type `sig' at rs->chr. Parse the signature
* and make sure that there's really room for a record of that type.
*/
static int rock_check_overflow(struct rock_state *rs, int sig)
{
int len;
switch (sig) {
case SIG('S', 'P'):
len = sizeof(struct SU_SP_s);
break;
case SIG('C', 'E'):
len = sizeof(struct SU_CE_s);
break;
case SIG('E', 'R'):
len = sizeof(struct SU_ER_s);
break;
case SIG('R', 'R'):
len = sizeof(struct RR_RR_s);
break;
case SIG('P', 'X'):
len = sizeof(struct RR_PX_s);
break;
case SIG('P', 'N'):
len = sizeof(struct RR_PN_s);
break;
case SIG('S', 'L'):
len = sizeof(struct RR_SL_s);
break;
case SIG('N', 'M'):
len = sizeof(struct RR_NM_s);
break;
case SIG('C', 'L'):
len = sizeof(struct RR_CL_s);
break;
case SIG('P', 'L'):
len = sizeof(struct RR_PL_s);
break;
case SIG('T', 'F'):
len = sizeof(struct RR_TF_s);
break;
case SIG('Z', 'F'):
len = sizeof(struct RR_ZF_s);
break;
default:
len = 0;
break;
}
len += offsetof(struct rock_ridge, u);
if (len > rs->len) {
printk(KERN_NOTICE "rock: directory entry would overflow "
"storage\n");
printk(KERN_NOTICE "rock: sig=0x%02x, size=%d, remaining=%d\n",
sig, len, rs->len);
return -EIO;
}
return 0;
}
/*
* return length of name field; 0: not found, -1: to be ignored
*/
int get_rock_ridge_filename(struct iso_directory_record *de,
char *retname, struct inode *inode)
{
struct rock_state rs;
struct rock_ridge *rr;
int sig;
int retnamlen = 0;
int truncate = 0;
int ret = 0;
if (!ISOFS_SB(inode->i_sb)->s_rock)
return 0;
*retname = 0;
init_rock_state(&rs, inode);
setup_rock_ridge(de, inode, &rs);
repeat:
while (rs.len > 2) { /* There may be one byte for padding somewhere */
rr = (struct rock_ridge *)rs.chr;
/*
* Ignore rock ridge info if rr->len is out of range, but
* don't return -EIO because that would make the file
* invisible.
*/
if (rr->len < 3)
goto out; /* Something got screwed up here */
sig = isonum_721(rs.chr);
if (rock_check_overflow(&rs, sig))
goto eio;
rs.chr += rr->len;
rs.len -= rr->len;
/*
* As above, just ignore the rock ridge info if rr->len
* is bogus.
*/
if (rs.len < 0)
goto out; /* Something got screwed up here */
switch (sig) {
case SIG('R', 'R'):
if ((rr->u.RR.flags[0] & RR_NM) == 0)
goto out;
break;
case SIG('S', 'P'):
if (check_sp(rr, inode))
goto out;
break;
case SIG('C', 'E'):
rs.cont_extent = isonum_733(rr->u.CE.extent);
rs.cont_offset = isonum_733(rr->u.CE.offset);
rs.cont_size = isonum_733(rr->u.CE.size);
break;
case SIG('N', 'M'):
if (truncate)
break;
if (rr->len < 5)
break;
/*
* If the flags are 2 or 4, this indicates '.' or '..'.
* We don't want to do anything with this, because it
* screws up the code that calls us. We don't really
* care anyways, since we can just use the non-RR
* name.
*/
if (rr->u.NM.flags & 6)
break;
if (rr->u.NM.flags & ~1) {
printk("Unsupported NM flag settings (%d)\n",
rr->u.NM.flags);
break;
}
if ((strlen(retname) + rr->len - 5) >= 254) {
truncate = 1;
break;
}
strncat(retname, rr->u.NM.name, rr->len - 5);
retnamlen += rr->len - 5;
break;
case SIG('R', 'E'):
kfree(rs.buffer);
return -1;
default:
break;
}
}
ret = rock_continue(&rs);
if (ret == 0)
goto repeat;
if (ret == 1)
return retnamlen; /* If 0, this file did not have a NM field */
out:
kfree(rs.buffer);
return ret;
eio:
ret = -EIO;
goto out;
}
#define RR_REGARD_XA 1
#define RR_RELOC_DE 2
static int
parse_rock_ridge_inode_internal(struct iso_directory_record *de,
struct inode *inode, int flags)
{
int symlink_len = 0;
int cnt, sig;
unsigned int reloc_block;
struct inode *reloc;
struct rock_ridge *rr;
int rootflag;
struct rock_state rs;
int ret = 0;
if (!ISOFS_SB(inode->i_sb)->s_rock)
return 0;
init_rock_state(&rs, inode);
setup_rock_ridge(de, inode, &rs);
if (flags & RR_REGARD_XA) {
rs.chr += 14;
rs.len -= 14;
if (rs.len < 0)
rs.len = 0;
}
repeat:
while (rs.len > 2) { /* There may be one byte for padding somewhere */
rr = (struct rock_ridge *)rs.chr;
/*
* Ignore rock ridge info if rr->len is out of range, but
* don't return -EIO because that would make the file
* invisible.
*/
if (rr->len < 3)
goto out; /* Something got screwed up here */
sig = isonum_721(rs.chr);
if (rock_check_overflow(&rs, sig))
goto eio;
rs.chr += rr->len;
rs.len -= rr->len;
/*
* As above, just ignore the rock ridge info if rr->len
* is bogus.
*/
if (rs.len < 0)
goto out; /* Something got screwed up here */
switch (sig) {
#ifndef CONFIG_ZISOFS /* No flag for SF or ZF */
case SIG('R', 'R'):
if ((rr->u.RR.flags[0] &
(RR_PX | RR_TF | RR_SL | RR_CL)) == 0)
goto out;
break;
#endif
case SIG('S', 'P'):
if (check_sp(rr, inode))
goto out;
break;
case SIG('C', 'E'):
rs.cont_extent = isonum_733(rr->u.CE.extent);
rs.cont_offset = isonum_733(rr->u.CE.offset);
rs.cont_size = isonum_733(rr->u.CE.size);
break;
case SIG('E', 'R'):
ISOFS_SB(inode->i_sb)->s_rock = 1;
printk(KERN_DEBUG "ISO 9660 Extensions: ");
{
int p;
for (p = 0; p < rr->u.ER.len_id; p++)
printk("%c", rr->u.ER.data[p]);
}
printk("\n");
break;
case SIG('P', 'X'):
inode->i_mode = isonum_733(rr->u.PX.mode);
set_nlink(inode, isonum_733(rr->u.PX.n_links));
i_uid_write(inode, isonum_733(rr->u.PX.uid));
i_gid_write(inode, isonum_733(rr->u.PX.gid));
break;
case SIG('P', 'N'):
{
int high, low;
high = isonum_733(rr->u.PN.dev_high);
low = isonum_733(rr->u.PN.dev_low);
/*
* The Rock Ridge standard specifies that if
* sizeof(dev_t) <= 4, then the high field is
* unused, and the device number is completely
* stored in the low field. Some writers may
* ignore this subtlety,
* and as a result we test to see if the entire
* device number is
* stored in the low field, and use that.
*/
if ((low & ~0xff) && high == 0) {
inode->i_rdev =
MKDEV(low >> 8, low & 0xff);
} else {
inode->i_rdev =
MKDEV(high, low);
}
}
break;
case SIG('T', 'F'):
/*
* Some RRIP writers incorrectly place ctime in the
* TF_CREATE field. Try to handle this correctly for
* either case.
*/
/* Rock ridge never appears on a High Sierra disk */
cnt = 0;
if (rr->u.TF.flags & TF_CREATE) {
inode->i_ctime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_ctime.tv_nsec = 0;
}
if (rr->u.TF.flags & TF_MODIFY) {
inode->i_mtime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_mtime.tv_nsec = 0;
}
if (rr->u.TF.flags & TF_ACCESS) {
inode->i_atime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_atime.tv_nsec = 0;
}
if (rr->u.TF.flags & TF_ATTRIBUTES) {
inode->i_ctime.tv_sec =
iso_date(rr->u.TF.times[cnt++].time,
0);
inode->i_ctime.tv_nsec = 0;
}
break;
case SIG('S', 'L'):
{
int slen;
struct SL_component *slp;
struct SL_component *oldslp;
slen = rr->len - 5;
slp = &rr->u.SL.link;
inode->i_size = symlink_len;
while (slen > 1) {
rootflag = 0;
switch (slp->flags & ~1) {
case 0:
inode->i_size +=
slp->len;
break;
case 2:
inode->i_size += 1;
break;
case 4:
inode->i_size += 2;
break;
case 8:
rootflag = 1;
inode->i_size += 1;
break;
default:
printk("Symlink component flag "
"not implemented\n");
}
slen -= slp->len + 2;
oldslp = slp;
slp = (struct SL_component *)
(((char *)slp) + slp->len + 2);
if (slen < 2) {
if (((rr->u.SL.
flags & 1) != 0)
&&
((oldslp->
flags & 1) == 0))
inode->i_size +=
1;
break;
}
/*
* If this component record isn't
* continued, then append a '/'.
*/
if (!rootflag
&& (oldslp->flags & 1) == 0)
inode->i_size += 1;
}
}
symlink_len = inode->i_size;
break;
case SIG('R', 'E'):
printk(KERN_WARNING "Attempt to read inode for "
"relocated directory\n");
goto out;
case SIG('C', 'L'):
if (flags & RR_RELOC_DE) {
printk(KERN_ERR
"ISOFS: Recursive directory relocation "
"is not supported\n");
goto eio;
}
reloc_block = isonum_733(rr->u.CL.location);
if (reloc_block == ISOFS_I(inode)->i_iget5_block &&
ISOFS_I(inode)->i_iget5_offset == 0) {
printk(KERN_ERR
"ISOFS: Directory relocation points to "
"itself\n");
goto eio;
}
ISOFS_I(inode)->i_first_extent = reloc_block;
reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0);
if (IS_ERR(reloc)) {
ret = PTR_ERR(reloc);
goto out;
}
inode->i_mode = reloc->i_mode;
set_nlink(inode, reloc->i_nlink);
inode->i_uid = reloc->i_uid;
inode->i_gid = reloc->i_gid;
inode->i_rdev = reloc->i_rdev;
inode->i_size = reloc->i_size;
inode->i_blocks = reloc->i_blocks;
inode->i_atime = reloc->i_atime;
inode->i_ctime = reloc->i_ctime;
inode->i_mtime = reloc->i_mtime;
iput(reloc);
break;
#ifdef CONFIG_ZISOFS
case SIG('Z', 'F'): {
int algo;
if (ISOFS_SB(inode->i_sb)->s_nocompress)
break;
algo = isonum_721(rr->u.ZF.algorithm);
if (algo == SIG('p', 'z')) {
int block_shift =
isonum_711(&rr->u.ZF.parms[1]);
if (block_shift > 17) {
printk(KERN_WARNING "isofs: "
"Can't handle ZF block "
"size of 2^%d\n",
block_shift);
} else {
/*
* Note: we don't change
* i_blocks here
*/
ISOFS_I(inode)->i_file_format =
isofs_file_compressed;
/*
* Parameters to compression
* algorithm (header size,
* block size)
*/
ISOFS_I(inode)->i_format_parm[0] =
isonum_711(&rr->u.ZF.parms[0]);
ISOFS_I(inode)->i_format_parm[1] =
isonum_711(&rr->u.ZF.parms[1]);
inode->i_size =
isonum_733(rr->u.ZF.
real_size);
}
} else {
printk(KERN_WARNING
"isofs: Unknown ZF compression "
"algorithm: %c%c\n",
rr->u.ZF.algorithm[0],
rr->u.ZF.algorithm[1]);
}
break;
}
#endif
default:
break;
}
}
ret = rock_continue(&rs);
if (ret == 0)
goto repeat;
if (ret == 1)
ret = 0;
out:
kfree(rs.buffer);
return ret;
eio:
ret = -EIO;
goto out;
}
static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit)
{
int slen;
int rootflag;
struct SL_component *oldslp;
struct SL_component *slp;
slen = rr->len - 5;
slp = &rr->u.SL.link;
while (slen > 1) {
rootflag = 0;
switch (slp->flags & ~1) {
case 0:
if (slp->len > plimit - rpnt)
return NULL;
memcpy(rpnt, slp->text, slp->len);
rpnt += slp->len;
break;
case 2:
if (rpnt >= plimit)
return NULL;
*rpnt++ = '.';
break;
case 4:
if (2 > plimit - rpnt)
return NULL;
*rpnt++ = '.';
*rpnt++ = '.';
break;
case 8:
if (rpnt >= plimit)
return NULL;
rootflag = 1;
*rpnt++ = '/';
break;
default:
printk("Symlink component flag not implemented (%d)\n",
slp->flags);
}
slen -= slp->len + 2;
oldslp = slp;
slp = (struct SL_component *)((char *)slp + slp->len + 2);
if (slen < 2) {
/*
* If there is another SL record, and this component
* record isn't continued, then add a slash.
*/
if ((!rootflag) && (rr->u.SL.flags & 1) &&
!(oldslp->flags & 1)) {
if (rpnt >= plimit)
return NULL;
*rpnt++ = '/';
}
break;
}
/*
* If this component record isn't continued, then append a '/'.
*/
if (!rootflag && !(oldslp->flags & 1)) {
if (rpnt >= plimit)
return NULL;
*rpnt++ = '/';
}
}
return rpnt;
}
int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode,
int relocated)
{
int flags = relocated ? RR_RELOC_DE : 0;
int result = parse_rock_ridge_inode_internal(de, inode, flags);
/*
* if rockridge flag was reset and we didn't look for attributes
* behind eventual XA attributes, have a look there
*/
if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1)
&& (ISOFS_SB(inode->i_sb)->s_rock == 2)) {
result = parse_rock_ridge_inode_internal(de, inode,
flags | RR_REGARD_XA);
}
return result;
}
/*
* readpage() for symlinks: reads symlink contents into the page and either
* makes it uptodate and returns 0 or returns error (-EIO)
*/
static int rock_ridge_symlink_readpage(struct file *file, struct page *page)
{
struct inode *inode = page->mapping->host;
struct iso_inode_info *ei = ISOFS_I(inode);
struct isofs_sb_info *sbi = ISOFS_SB(inode->i_sb);
char *link = kmap(page);
unsigned long bufsize = ISOFS_BUFFER_SIZE(inode);
struct buffer_head *bh;
char *rpnt = link;
unsigned char *pnt;
struct iso_directory_record *raw_de;
unsigned long block, offset;
int sig;
struct rock_ridge *rr;
struct rock_state rs;
int ret;
if (!sbi->s_rock)
goto error;
init_rock_state(&rs, inode);
block = ei->i_iget5_block;
bh = sb_bread(inode->i_sb, block);
if (!bh)
goto out_noread;
offset = ei->i_iget5_offset;
pnt = (unsigned char *)bh->b_data + offset;
raw_de = (struct iso_directory_record *)pnt;
/*
* If we go past the end of the buffer, there is some sort of error.
*/
if (offset + *pnt > bufsize)
goto out_bad_span;
/*
* Now test for possible Rock Ridge extensions which will override
* some of these numbers in the inode structure.
*/
setup_rock_ridge(raw_de, inode, &rs);
repeat:
while (rs.len > 2) { /* There may be one byte for padding somewhere */
rr = (struct rock_ridge *)rs.chr;
if (rr->len < 3)
goto out; /* Something got screwed up here */
sig = isonum_721(rs.chr);
if (rock_check_overflow(&rs, sig))
goto out;
rs.chr += rr->len;
rs.len -= rr->len;
if (rs.len < 0)
goto out; /* corrupted isofs */
switch (sig) {
case SIG('R', 'R'):
if ((rr->u.RR.flags[0] & RR_SL) == 0)
goto out;
break;
case SIG('S', 'P'):
if (check_sp(rr, inode))
goto out;
break;
case SIG('S', 'L'):
rpnt = get_symlink_chunk(rpnt, rr,
link + (PAGE_SIZE - 1));
if (rpnt == NULL)
goto out;
break;
case SIG('C', 'E'):
/* This tells is if there is a continuation record */
rs.cont_extent = isonum_733(rr->u.CE.extent);
rs.cont_offset = isonum_733(rr->u.CE.offset);
rs.cont_size = isonum_733(rr->u.CE.size);
default:
break;
}
}
ret = rock_continue(&rs);
if (ret == 0)
goto repeat;
if (ret < 0)
goto fail;
if (rpnt == link)
goto fail;
brelse(bh);
*rpnt = '\0';
SetPageUptodate(page);
kunmap(page);
unlock_page(page);
return 0;
/* error exit from macro */
out:
kfree(rs.buffer);
goto fail;
out_noread:
printk("unable to read i-node block");
goto fail;
out_bad_span:
printk("symlink spans iso9660 blocks\n");
fail:
brelse(bh);
error:
SetPageError(page);
kunmap(page);
unlock_page(page);
return -EIO;
}
const struct address_space_operations isofs_symlink_aops = {
.readpage = rock_ridge_symlink_readpage
};
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_2372_0 |
crossvul-cpp_data_good_1856_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2015 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/profile.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/module.h"
#include "magick/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColourLookup
{
DDSSourceBlock sources[2];
} DDSSingleColourLookup;
typedef MagickBooleanType
DDSDecoder(Image *, DDSInfo *, ExceptionInfo *);
static const DDSSingleColourLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColourLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColourLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if (max - min < steps) \
max = MagickMin(min + steps, 255); \
if (max - min < steps) \
min = MagickMax(min - steps, 0)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
static MagickBooleanType
ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3,
DDSVector4 *,DDSVector4 *,unsigned char *,size_t),
ReadDDSInfo(Image *,DDSInfo *),
ReadDXT1(Image *,DDSInfo *,ExceptionInfo *),
ReadDXT3(Image *,DDSInfo *,ExceptionInfo *),
ReadDXT5(Image *,DDSInfo *,ExceptionInfo *),
ReadUncompressedRGB(Image *,DDSInfo *,ExceptionInfo *),
ReadUncompressedRGBA(Image *,DDSInfo *,ExceptionInfo *),
SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
WriteDDSImage(const ImageInfo *,Image *),
WriteMipmaps(Image *,const size_t,const size_t,const size_t,
const MagickBooleanType,const MagickBooleanType,ExceptionInfo *);
static void
RemapIndices(const ssize_t *,const unsigned char *,unsigned char *),
WriteDDSInfo(Image *,const size_t,const size_t,const size_t),
WriteFourCC(Image *,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteIndices(Image *,const DDSVector3,const DDSVector3, unsigned char *),
WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *),
WriteUncompressed(Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
register ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
register ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColourLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
register ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
register ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = 1.0f / MagickMax(w.x,MagickMax(w.y,w.z));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
register ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status,
cubemap = MagickFalse,
volume = MagickFalse,
matte;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue) {
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
(void) SeekBlob(image, 128, SEEK_SET);
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
matte = MagickTrue;
decoder = ReadUncompressedRGBA;
}
else
{
matte = MagickTrue;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
matte = MagickFalse;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
matte = MagickFalse;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
matte = MagickTrue;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
matte = MagickTrue;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
/* Start a new image */
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->matte = matte;
image->compression = compression;
image->columns = dds_info.width;
image->rows = dds_info.height;
image->storage_class = DirectClass;
image->endian = LSBEndian;
image->depth = 8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
if ((decoder)(image, &dds_info, exception) != MagickTrue)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
}
if (EOFBlob(image) != MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
return MagickTrue;
}
static MagickBooleanType ReadDXT1(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
PixelPacket
*q;
register ssize_t
i,
x;
size_t
bits;
ssize_t
j,
y;
unsigned char
code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x),
MagickMin(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
SetPixelOpacity(q,ScaleCharToQuantum(colors.a[code]));
if (colors.a[code] && image->matte == MagickFalse)
/* Correct matte */
image->matte = MagickTrue;
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
}
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
ssize_t
j,
y;
PixelPacket
*q;
register ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x),
MagickMin(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
alpha));
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
}
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
ssize_t
j,
y;
MagickSizeType
alpha_bits;
PixelPacket
*q;
register ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x),
MagickMin(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
alpha));
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
}
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGB(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
PixelPacket
*q;
ssize_t
x, y;
unsigned short
color;
if (dds_info->pixelformat.rgb_bitcount == 8)
(void) SetImageType(image,GrayscaleType);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
for (y = 0; y < (ssize_t) dds_info->height; y++)
{
q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
for (x = 0; x < (ssize_t) dds_info->width; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
SetPixelGray(q,ScaleCharToQuantum(ReadBlobByte(image)));
else if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)));
}
else
{
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
if (dds_info->pixelformat.rgb_bitcount == 32)
(void) ReadBlobByte(image);
}
SetPixelAlpha(q,QuantumRange);
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBA(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
PixelPacket
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleMatteType);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
for (y = 0; y < (ssize_t) dds_info->height; y++)
{
q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
for (x = 0; x < (ssize_t) dds_info->width; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(q,(color & (1 << 15)) ? QuantumRange : 0);
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)));
}
else if (alphaBits == 2)
{
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
(color >> 8)));
SetPixelGray(q,ScaleCharToQuantum((unsigned char)color));
}
else
{
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)));
}
}
else
{
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = SetMagickInfo("DDS");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
entry = SetMagickInfo("DXT1");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
entry = SetMagickInfo("DXT5");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
register ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
register ssize_t
i;
MagickOffsetType
offset;
size_t
h,
w;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
w = DIV2(dds_info->width);
h = DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset = (MagickOffsetType) ((w + 3) / 4) * ((h + 3) / 4) * texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w = DIV2(w);
h = DIV2(h);
}
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
w = DIV2(dds_info->width);
h = DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset = (MagickOffsetType) w * h * pixel_size;
(void) SeekBlob(image, offset, SEEK_CUR);
w = DIV2(w);
h = DIV2(h);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
static void WriteAlphas(Image *image, const ssize_t* alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
register ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4* points, const ssize_t* map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if (clusterFit == MagickFalse || count == 0)
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (!image->matte)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (option != (char *) NULL && LocaleCompare(option,"true") == 0)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (option != (char *) NULL && LocaleCompare(option,"true") == 0)
weightByAlpha=MagickTrue;
}
}
}
maxMipmaps=SIZE_MAX;
mipmaps=0;
if ((image->columns & (image->columns - 1)) == 0 &&
(image->rows & (image->rows - 1)) == 0)
{
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while (columns != 1 && rows != 1 && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
&image->exception);
if (mipmaps > 0 && WriteMipmaps(image,pixelFormat,compression,mipmaps,
clusterFit,weightByAlpha,&image->exception) == MagickFalse)
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MaxTextExtent];
register ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT | DDSD_LINEARSIZE);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->matte)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,
(unsigned int) (MagickMax(1,(image->columns+3)/4) * 8));
else
(void) WriteBlobLSBLong(image,
(unsigned int) (MagickMax(1,(image->columns+3)/4) * 16));
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) ResetMagickMemory(software,0,sizeof(software));
(void) strcpy(software,"IMAGEMAGICK");
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) // bitcount / masks
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->matte)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) // ddscaps2 + reserved region
(void) WriteBlobLSBLong(image,0x00);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
i,
y,
bx,
by;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const PixelPacket *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(p));
else
alpha = 255;
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p++;
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression, const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static inline size_t ClampToLimit(const float value,
const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char* indices)
{
register ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static MagickBooleanType WriteMipmaps(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
Image*
resize_image;
register ssize_t
i;
size_t
columns,
rows;
columns = image->columns;
rows = image->rows;
for (i=0; i< (ssize_t) mipmaps; i++)
{
resize_image = ResizeImage(image,columns/2,rows/2,TriangleFilter,1.0,
exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
DestroyBlob(resize_image);
resize_image->blob=ReferenceBlob(image->blob);
WriteImageData(resize_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
resize_image=DestroyImage(resize_image);
columns = DIV2(columns);
rows = DIV2(rows);
}
return(MagickTrue);
}
static void WriteSingleColorFit(Image *image, const DDSVector4* points,
const ssize_t* map)
{
DDSVector3
start,
end;
register ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(p)));
if (image->matte)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(p)));
p++;
}
}
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_1856_0 |
crossvul-cpp_data_bad_4984_1 | /*
* linux/fs/pipe.c
*
* Copyright (C) 1991, 1992, 1999 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/log2.h>
#include <linux/mount.h>
#include <linux/magic.h>
#include <linux/pipe_fs_i.h>
#include <linux/uio.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/audit.h>
#include <linux/syscalls.h>
#include <linux/fcntl.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include "internal.h"
/*
* The max size that a non-root user is allowed to grow the pipe. Can
* be set by root in /proc/sys/fs/pipe-max-size
*/
unsigned int pipe_max_size = 1048576;
/*
* Minimum pipe size, as required by POSIX
*/
unsigned int pipe_min_size = PAGE_SIZE;
/*
* We use a start+len construction, which provides full use of the
* allocated memory.
* -- Florian Coosmann (FGC)
*
* Reads with count = 0 should always return 0.
* -- Julian Bradfield 1999-06-07.
*
* FIFOs and Pipes now generate SIGIO for both readers and writers.
* -- Jeremy Elson <jelson@circlemud.org> 2001-08-16
*
* pipe_read & write cleanup
* -- Manfred Spraul <manfred@colorfullife.com> 2002-05-09
*/
static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
{
if (pipe->files)
mutex_lock_nested(&pipe->mutex, subclass);
}
void pipe_lock(struct pipe_inode_info *pipe)
{
/*
* pipe_lock() nests non-pipe inode locks (for writing to a file)
*/
pipe_lock_nested(pipe, I_MUTEX_PARENT);
}
EXPORT_SYMBOL(pipe_lock);
void pipe_unlock(struct pipe_inode_info *pipe)
{
if (pipe->files)
mutex_unlock(&pipe->mutex);
}
EXPORT_SYMBOL(pipe_unlock);
static inline void __pipe_lock(struct pipe_inode_info *pipe)
{
mutex_lock_nested(&pipe->mutex, I_MUTEX_PARENT);
}
static inline void __pipe_unlock(struct pipe_inode_info *pipe)
{
mutex_unlock(&pipe->mutex);
}
void pipe_double_lock(struct pipe_inode_info *pipe1,
struct pipe_inode_info *pipe2)
{
BUG_ON(pipe1 == pipe2);
if (pipe1 < pipe2) {
pipe_lock_nested(pipe1, I_MUTEX_PARENT);
pipe_lock_nested(pipe2, I_MUTEX_CHILD);
} else {
pipe_lock_nested(pipe2, I_MUTEX_PARENT);
pipe_lock_nested(pipe1, I_MUTEX_CHILD);
}
}
/* Drop the inode semaphore and wait for a pipe event, atomically */
void pipe_wait(struct pipe_inode_info *pipe)
{
DEFINE_WAIT(wait);
/*
* Pipes are system-local resources, so sleeping on them
* is considered a noninteractive wait:
*/
prepare_to_wait(&pipe->wait, &wait, TASK_INTERRUPTIBLE);
pipe_unlock(pipe);
schedule();
finish_wait(&pipe->wait, &wait);
pipe_lock(pipe);
}
static void anon_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
/*
* If nobody else uses this page, and we don't already have a
* temporary page, let's keep track of it as a one-deep
* allocation cache. (Otherwise just release our reference to it)
*/
if (page_count(page) == 1 && !pipe->tmp_page)
pipe->tmp_page = page;
else
page_cache_release(page);
}
/**
* generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to attempt to steal
*
* Description:
* This function attempts to steal the &struct page attached to
* @buf. If successful, this function returns 0 and returns with
* the page locked. The caller may then reuse the page for whatever
* he wishes; the typical use is insertion into a different file
* page cache.
*/
int generic_pipe_buf_steal(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
struct page *page = buf->page;
/*
* A reference of one is golden, that means that the owner of this
* page is the only one holding a reference to it. lock the page
* and return OK.
*/
if (page_count(page) == 1) {
lock_page(page);
return 0;
}
return 1;
}
EXPORT_SYMBOL(generic_pipe_buf_steal);
/**
* generic_pipe_buf_get - get a reference to a &struct pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to get a reference to
*
* Description:
* This function grabs an extra reference to @buf. It's used in
* in the tee() system call, when we duplicate the buffers in one
* pipe into another.
*/
void generic_pipe_buf_get(struct pipe_inode_info *pipe, struct pipe_buffer *buf)
{
page_cache_get(buf->page);
}
EXPORT_SYMBOL(generic_pipe_buf_get);
/**
* generic_pipe_buf_confirm - verify contents of the pipe buffer
* @info: the pipe that the buffer belongs to
* @buf: the buffer to confirm
*
* Description:
* This function does nothing, because the generic pipe code uses
* pages that are always good when inserted into the pipe.
*/
int generic_pipe_buf_confirm(struct pipe_inode_info *info,
struct pipe_buffer *buf)
{
return 0;
}
EXPORT_SYMBOL(generic_pipe_buf_confirm);
/**
* generic_pipe_buf_release - put a reference to a &struct pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to put a reference to
*
* Description:
* This function releases a reference to @buf.
*/
void generic_pipe_buf_release(struct pipe_inode_info *pipe,
struct pipe_buffer *buf)
{
page_cache_release(buf->page);
}
EXPORT_SYMBOL(generic_pipe_buf_release);
static const struct pipe_buf_operations anon_pipe_buf_ops = {
.can_merge = 1,
.confirm = generic_pipe_buf_confirm,
.release = anon_pipe_buf_release,
.steal = generic_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
static const struct pipe_buf_operations packet_pipe_buf_ops = {
.can_merge = 0,
.confirm = generic_pipe_buf_confirm,
.release = anon_pipe_buf_release,
.steal = generic_pipe_buf_steal,
.get = generic_pipe_buf_get,
};
static ssize_t
pipe_read(struct kiocb *iocb, struct iov_iter *to)
{
size_t total_len = iov_iter_count(to);
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
int do_wakeup;
ssize_t ret;
/* Null read succeeds. */
if (unlikely(total_len == 0))
return 0;
do_wakeup = 0;
ret = 0;
__pipe_lock(pipe);
for (;;) {
int bufs = pipe->nrbufs;
if (bufs) {
int curbuf = pipe->curbuf;
struct pipe_buffer *buf = pipe->bufs + curbuf;
const struct pipe_buf_operations *ops = buf->ops;
size_t chars = buf->len;
size_t written;
int error;
if (chars > total_len)
chars = total_len;
error = ops->confirm(pipe, buf);
if (error) {
if (!ret)
ret = error;
break;
}
written = copy_page_to_iter(buf->page, buf->offset, chars, to);
if (unlikely(written < chars)) {
if (!ret)
ret = -EFAULT;
break;
}
ret += chars;
buf->offset += chars;
buf->len -= chars;
/* Was it a packet buffer? Clean up and exit */
if (buf->flags & PIPE_BUF_FLAG_PACKET) {
total_len = chars;
buf->len = 0;
}
if (!buf->len) {
buf->ops = NULL;
ops->release(pipe, buf);
curbuf = (curbuf + 1) & (pipe->buffers - 1);
pipe->curbuf = curbuf;
pipe->nrbufs = --bufs;
do_wakeup = 1;
}
total_len -= chars;
if (!total_len)
break; /* common path: read succeeded */
}
if (bufs) /* More to do? */
continue;
if (!pipe->writers)
break;
if (!pipe->waiting_writers) {
/* syscall merging: Usually we must not sleep
* if O_NONBLOCK is set, or if we got some data.
* But if a writer sleeps in kernel space, then
* we can wait for that data without violating POSIX.
*/
if (ret)
break;
if (filp->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
}
if (signal_pending(current)) {
if (!ret)
ret = -ERESTARTSYS;
break;
}
if (do_wakeup) {
wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
pipe_wait(pipe);
}
__pipe_unlock(pipe);
/* Signal writers asynchronously that there is more room. */
if (do_wakeup) {
wake_up_interruptible_sync_poll(&pipe->wait, POLLOUT | POLLWRNORM);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
if (ret > 0)
file_accessed(filp);
return ret;
}
static inline int is_packetized(struct file *file)
{
return (file->f_flags & O_DIRECT) != 0;
}
static ssize_t
pipe_write(struct kiocb *iocb, struct iov_iter *from)
{
struct file *filp = iocb->ki_filp;
struct pipe_inode_info *pipe = filp->private_data;
ssize_t ret = 0;
int do_wakeup = 0;
size_t total_len = iov_iter_count(from);
ssize_t chars;
/* Null write succeeds. */
if (unlikely(total_len == 0))
return 0;
__pipe_lock(pipe);
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
ret = -EPIPE;
goto out;
}
/* We try to merge small writes */
chars = total_len & (PAGE_SIZE-1); /* size of the last buffer */
if (pipe->nrbufs && chars != 0) {
int lastbuf = (pipe->curbuf + pipe->nrbufs - 1) &
(pipe->buffers - 1);
struct pipe_buffer *buf = pipe->bufs + lastbuf;
const struct pipe_buf_operations *ops = buf->ops;
int offset = buf->offset + buf->len;
if (ops->can_merge && offset + chars <= PAGE_SIZE) {
ret = ops->confirm(pipe, buf);
if (ret)
goto out;
ret = copy_page_from_iter(buf->page, offset, chars, from);
if (unlikely(ret < chars)) {
ret = -EFAULT;
goto out;
}
do_wakeup = 1;
buf->len += ret;
if (!iov_iter_count(from))
goto out;
}
}
for (;;) {
int bufs;
if (!pipe->readers) {
send_sig(SIGPIPE, current, 0);
if (!ret)
ret = -EPIPE;
break;
}
bufs = pipe->nrbufs;
if (bufs < pipe->buffers) {
int newbuf = (pipe->curbuf + bufs) & (pipe->buffers-1);
struct pipe_buffer *buf = pipe->bufs + newbuf;
struct page *page = pipe->tmp_page;
int copied;
if (!page) {
page = alloc_page(GFP_HIGHUSER);
if (unlikely(!page)) {
ret = ret ? : -ENOMEM;
break;
}
pipe->tmp_page = page;
}
/* Always wake up, even if the copy fails. Otherwise
* we lock up (O_NONBLOCK-)readers that sleep due to
* syscall merging.
* FIXME! Is this really true?
*/
do_wakeup = 1;
copied = copy_page_from_iter(page, 0, PAGE_SIZE, from);
if (unlikely(copied < PAGE_SIZE && iov_iter_count(from))) {
if (!ret)
ret = -EFAULT;
break;
}
ret += copied;
/* Insert it into the buffer array */
buf->page = page;
buf->ops = &anon_pipe_buf_ops;
buf->offset = 0;
buf->len = copied;
buf->flags = 0;
if (is_packetized(filp)) {
buf->ops = &packet_pipe_buf_ops;
buf->flags = PIPE_BUF_FLAG_PACKET;
}
pipe->nrbufs = ++bufs;
pipe->tmp_page = NULL;
if (!iov_iter_count(from))
break;
}
if (bufs < pipe->buffers)
continue;
if (filp->f_flags & O_NONBLOCK) {
if (!ret)
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
if (!ret)
ret = -ERESTARTSYS;
break;
}
if (do_wakeup) {
wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
do_wakeup = 0;
}
pipe->waiting_writers++;
pipe_wait(pipe);
pipe->waiting_writers--;
}
out:
__pipe_unlock(pipe);
if (do_wakeup) {
wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
}
if (ret > 0 && sb_start_write_trylock(file_inode(filp)->i_sb)) {
int err = file_update_time(filp);
if (err)
ret = err;
sb_end_write(file_inode(filp)->i_sb);
}
return ret;
}
static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
struct pipe_inode_info *pipe = filp->private_data;
int count, buf, nrbufs;
switch (cmd) {
case FIONREAD:
__pipe_lock(pipe);
count = 0;
buf = pipe->curbuf;
nrbufs = pipe->nrbufs;
while (--nrbufs >= 0) {
count += pipe->bufs[buf].len;
buf = (buf+1) & (pipe->buffers - 1);
}
__pipe_unlock(pipe);
return put_user(count, (int __user *)arg);
default:
return -ENOIOCTLCMD;
}
}
/* No kernel lock held - fine */
static unsigned int
pipe_poll(struct file *filp, poll_table *wait)
{
unsigned int mask;
struct pipe_inode_info *pipe = filp->private_data;
int nrbufs;
poll_wait(filp, &pipe->wait, wait);
/* Reading only -- no need for acquiring the semaphore. */
nrbufs = pipe->nrbufs;
mask = 0;
if (filp->f_mode & FMODE_READ) {
mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
if (!pipe->writers && filp->f_version != pipe->w_counter)
mask |= POLLHUP;
}
if (filp->f_mode & FMODE_WRITE) {
mask |= (nrbufs < pipe->buffers) ? POLLOUT | POLLWRNORM : 0;
/*
* Most Unices do not set POLLERR for FIFOs but on Linux they
* behave exactly like pipes for poll().
*/
if (!pipe->readers)
mask |= POLLERR;
}
return mask;
}
static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
{
int kill = 0;
spin_lock(&inode->i_lock);
if (!--pipe->files) {
inode->i_pipe = NULL;
kill = 1;
}
spin_unlock(&inode->i_lock);
if (kill)
free_pipe_info(pipe);
}
static int
pipe_release(struct inode *inode, struct file *file)
{
struct pipe_inode_info *pipe = file->private_data;
__pipe_lock(pipe);
if (file->f_mode & FMODE_READ)
pipe->readers--;
if (file->f_mode & FMODE_WRITE)
pipe->writers--;
if (pipe->readers || pipe->writers) {
wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
__pipe_unlock(pipe);
put_pipe_info(inode, pipe);
return 0;
}
static int
pipe_fasync(int fd, struct file *filp, int on)
{
struct pipe_inode_info *pipe = filp->private_data;
int retval = 0;
__pipe_lock(pipe);
if (filp->f_mode & FMODE_READ)
retval = fasync_helper(fd, filp, on, &pipe->fasync_readers);
if ((filp->f_mode & FMODE_WRITE) && retval >= 0) {
retval = fasync_helper(fd, filp, on, &pipe->fasync_writers);
if (retval < 0 && (filp->f_mode & FMODE_READ))
/* this can happen only if on == T */
fasync_helper(-1, filp, 0, &pipe->fasync_readers);
}
__pipe_unlock(pipe);
return retval;
}
struct pipe_inode_info *alloc_pipe_info(void)
{
struct pipe_inode_info *pipe;
pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL);
if (pipe) {
pipe->bufs = kzalloc(sizeof(struct pipe_buffer) * PIPE_DEF_BUFFERS, GFP_KERNEL);
if (pipe->bufs) {
init_waitqueue_head(&pipe->wait);
pipe->r_counter = pipe->w_counter = 1;
pipe->buffers = PIPE_DEF_BUFFERS;
mutex_init(&pipe->mutex);
return pipe;
}
kfree(pipe);
}
return NULL;
}
void free_pipe_info(struct pipe_inode_info *pipe)
{
int i;
for (i = 0; i < pipe->buffers; i++) {
struct pipe_buffer *buf = pipe->bufs + i;
if (buf->ops)
buf->ops->release(pipe, buf);
}
if (pipe->tmp_page)
__free_page(pipe->tmp_page);
kfree(pipe->bufs);
kfree(pipe);
}
static struct vfsmount *pipe_mnt __read_mostly;
/*
* pipefs_dname() is called from d_path().
*/
static char *pipefs_dname(struct dentry *dentry, char *buffer, int buflen)
{
return dynamic_dname(dentry, buffer, buflen, "pipe:[%lu]",
d_inode(dentry)->i_ino);
}
static const struct dentry_operations pipefs_dentry_operations = {
.d_dname = pipefs_dname,
};
static struct inode * get_pipe_inode(void)
{
struct inode *inode = new_inode_pseudo(pipe_mnt->mnt_sb);
struct pipe_inode_info *pipe;
if (!inode)
goto fail_inode;
inode->i_ino = get_next_ino();
pipe = alloc_pipe_info();
if (!pipe)
goto fail_iput;
inode->i_pipe = pipe;
pipe->files = 2;
pipe->readers = pipe->writers = 1;
inode->i_fop = &pipefifo_fops;
/*
* Mark the inode dirty from the very beginning,
* that way it will never be moved to the dirty
* list because "mark_inode_dirty()" will think
* that it already _is_ on the dirty list.
*/
inode->i_state = I_DIRTY;
inode->i_mode = S_IFIFO | S_IRUSR | S_IWUSR;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
return inode;
fail_iput:
iput(inode);
fail_inode:
return NULL;
}
int create_pipe_files(struct file **res, int flags)
{
int err;
struct inode *inode = get_pipe_inode();
struct file *f;
struct path path;
static struct qstr name = { .name = "" };
if (!inode)
return -ENFILE;
err = -ENOMEM;
path.dentry = d_alloc_pseudo(pipe_mnt->mnt_sb, &name);
if (!path.dentry)
goto err_inode;
path.mnt = mntget(pipe_mnt);
d_instantiate(path.dentry, inode);
f = alloc_file(&path, FMODE_WRITE, &pipefifo_fops);
if (IS_ERR(f)) {
err = PTR_ERR(f);
goto err_dentry;
}
f->f_flags = O_WRONLY | (flags & (O_NONBLOCK | O_DIRECT));
f->private_data = inode->i_pipe;
res[0] = alloc_file(&path, FMODE_READ, &pipefifo_fops);
if (IS_ERR(res[0])) {
err = PTR_ERR(res[0]);
goto err_file;
}
path_get(&path);
res[0]->private_data = inode->i_pipe;
res[0]->f_flags = O_RDONLY | (flags & O_NONBLOCK);
res[1] = f;
return 0;
err_file:
put_filp(f);
err_dentry:
free_pipe_info(inode->i_pipe);
path_put(&path);
return err;
err_inode:
free_pipe_info(inode->i_pipe);
iput(inode);
return err;
}
static int __do_pipe_flags(int *fd, struct file **files, int flags)
{
int error;
int fdw, fdr;
if (flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT))
return -EINVAL;
error = create_pipe_files(files, flags);
if (error)
return error;
error = get_unused_fd_flags(flags);
if (error < 0)
goto err_read_pipe;
fdr = error;
error = get_unused_fd_flags(flags);
if (error < 0)
goto err_fdr;
fdw = error;
audit_fd_pair(fdr, fdw);
fd[0] = fdr;
fd[1] = fdw;
return 0;
err_fdr:
put_unused_fd(fdr);
err_read_pipe:
fput(files[0]);
fput(files[1]);
return error;
}
int do_pipe_flags(int *fd, int flags)
{
struct file *files[2];
int error = __do_pipe_flags(fd, files, flags);
if (!error) {
fd_install(fd[0], files[0]);
fd_install(fd[1], files[1]);
}
return error;
}
/*
* sys_pipe() is the normal C calling standard for creating
* a pipe. It's not the way Unix traditionally does this, though.
*/
SYSCALL_DEFINE2(pipe2, int __user *, fildes, int, flags)
{
struct file *files[2];
int fd[2];
int error;
error = __do_pipe_flags(fd, files, flags);
if (!error) {
if (unlikely(copy_to_user(fildes, fd, sizeof(fd)))) {
fput(files[0]);
fput(files[1]);
put_unused_fd(fd[0]);
put_unused_fd(fd[1]);
error = -EFAULT;
} else {
fd_install(fd[0], files[0]);
fd_install(fd[1], files[1]);
}
}
return error;
}
SYSCALL_DEFINE1(pipe, int __user *, fildes)
{
return sys_pipe2(fildes, 0);
}
static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
{
int cur = *cnt;
while (cur == *cnt) {
pipe_wait(pipe);
if (signal_pending(current))
break;
}
return cur == *cnt ? -ERESTARTSYS : 0;
}
static void wake_up_partner(struct pipe_inode_info *pipe)
{
wake_up_interruptible(&pipe->wait);
}
static int fifo_open(struct inode *inode, struct file *filp)
{
struct pipe_inode_info *pipe;
bool is_pipe = inode->i_sb->s_magic == PIPEFS_MAGIC;
int ret;
filp->f_version = 0;
spin_lock(&inode->i_lock);
if (inode->i_pipe) {
pipe = inode->i_pipe;
pipe->files++;
spin_unlock(&inode->i_lock);
} else {
spin_unlock(&inode->i_lock);
pipe = alloc_pipe_info();
if (!pipe)
return -ENOMEM;
pipe->files = 1;
spin_lock(&inode->i_lock);
if (unlikely(inode->i_pipe)) {
inode->i_pipe->files++;
spin_unlock(&inode->i_lock);
free_pipe_info(pipe);
pipe = inode->i_pipe;
} else {
inode->i_pipe = pipe;
spin_unlock(&inode->i_lock);
}
}
filp->private_data = pipe;
/* OK, we have a pipe and it's pinned down */
__pipe_lock(pipe);
/* We can only do regular read/write on fifos */
filp->f_mode &= (FMODE_READ | FMODE_WRITE);
switch (filp->f_mode) {
case FMODE_READ:
/*
* O_RDONLY
* POSIX.1 says that O_NONBLOCK means return with the FIFO
* opened, even when there is no process writing the FIFO.
*/
pipe->r_counter++;
if (pipe->readers++ == 0)
wake_up_partner(pipe);
if (!is_pipe && !pipe->writers) {
if ((filp->f_flags & O_NONBLOCK)) {
/* suppress POLLHUP until we have
* seen a writer */
filp->f_version = pipe->w_counter;
} else {
if (wait_for_partner(pipe, &pipe->w_counter))
goto err_rd;
}
}
break;
case FMODE_WRITE:
/*
* O_WRONLY
* POSIX.1 says that O_NONBLOCK means return -1 with
* errno=ENXIO when there is no process reading the FIFO.
*/
ret = -ENXIO;
if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
goto err;
pipe->w_counter++;
if (!pipe->writers++)
wake_up_partner(pipe);
if (!is_pipe && !pipe->readers) {
if (wait_for_partner(pipe, &pipe->r_counter))
goto err_wr;
}
break;
case FMODE_READ | FMODE_WRITE:
/*
* O_RDWR
* POSIX.1 leaves this case "undefined" when O_NONBLOCK is set.
* This implementation will NEVER block on a O_RDWR open, since
* the process can at least talk to itself.
*/
pipe->readers++;
pipe->writers++;
pipe->r_counter++;
pipe->w_counter++;
if (pipe->readers == 1 || pipe->writers == 1)
wake_up_partner(pipe);
break;
default:
ret = -EINVAL;
goto err;
}
/* Ok! */
__pipe_unlock(pipe);
return 0;
err_rd:
if (!--pipe->readers)
wake_up_interruptible(&pipe->wait);
ret = -ERESTARTSYS;
goto err;
err_wr:
if (!--pipe->writers)
wake_up_interruptible(&pipe->wait);
ret = -ERESTARTSYS;
goto err;
err:
__pipe_unlock(pipe);
put_pipe_info(inode, pipe);
return ret;
}
const struct file_operations pipefifo_fops = {
.open = fifo_open,
.llseek = no_llseek,
.read_iter = pipe_read,
.write_iter = pipe_write,
.poll = pipe_poll,
.unlocked_ioctl = pipe_ioctl,
.release = pipe_release,
.fasync = pipe_fasync,
};
/*
* Allocate a new array of pipe buffers and copy the info over. Returns the
* pipe size if successful, or return -ERROR on error.
*/
static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
{
struct pipe_buffer *bufs;
/*
* We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
* expect a lot of shrink+grow operations, just free and allocate
* again like we would do for growing. If the pipe currently
* contains more buffers than arg, then return busy.
*/
if (nr_pages < pipe->nrbufs)
return -EBUSY;
bufs = kcalloc(nr_pages, sizeof(*bufs), GFP_KERNEL | __GFP_NOWARN);
if (unlikely(!bufs))
return -ENOMEM;
/*
* The pipe array wraps around, so just start the new one at zero
* and adjust the indexes.
*/
if (pipe->nrbufs) {
unsigned int tail;
unsigned int head;
tail = pipe->curbuf + pipe->nrbufs;
if (tail < pipe->buffers)
tail = 0;
else
tail &= (pipe->buffers - 1);
head = pipe->nrbufs - tail;
if (head)
memcpy(bufs, pipe->bufs + pipe->curbuf, head * sizeof(struct pipe_buffer));
if (tail)
memcpy(bufs + head, pipe->bufs, tail * sizeof(struct pipe_buffer));
}
pipe->curbuf = 0;
kfree(pipe->bufs);
pipe->bufs = bufs;
pipe->buffers = nr_pages;
return nr_pages * PAGE_SIZE;
}
/*
* Currently we rely on the pipe array holding a power-of-2 number
* of pages.
*/
static inline unsigned int round_pipe_size(unsigned int size)
{
unsigned long nr_pages;
nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
}
/*
* This should work even if CONFIG_PROC_FS isn't set, as proc_dointvec_minmax
* will return an error.
*/
int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
size_t *lenp, loff_t *ppos)
{
int ret;
ret = proc_dointvec_minmax(table, write, buf, lenp, ppos);
if (ret < 0 || !write)
return ret;
pipe_max_size = round_pipe_size(pipe_max_size);
return ret;
}
/*
* After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
* location, so checking ->i_pipe is not enough to verify that this is a
* pipe.
*/
struct pipe_inode_info *get_pipe_info(struct file *file)
{
return file->f_op == &pipefifo_fops ? file->private_data : NULL;
}
long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct pipe_inode_info *pipe;
long ret;
pipe = get_pipe_info(file);
if (!pipe)
return -EBADF;
__pipe_lock(pipe);
switch (cmd) {
case F_SETPIPE_SZ: {
unsigned int size, nr_pages;
size = round_pipe_size(arg);
nr_pages = size >> PAGE_SHIFT;
ret = -EINVAL;
if (!nr_pages)
goto out;
if (!capable(CAP_SYS_RESOURCE) && size > pipe_max_size) {
ret = -EPERM;
goto out;
}
ret = pipe_set_size(pipe, nr_pages);
break;
}
case F_GETPIPE_SZ:
ret = pipe->buffers * PAGE_SIZE;
break;
default:
ret = -EINVAL;
break;
}
out:
__pipe_unlock(pipe);
return ret;
}
static const struct super_operations pipefs_ops = {
.destroy_inode = free_inode_nonrcu,
.statfs = simple_statfs,
};
/*
* pipefs should _never_ be mounted by userland - too much of security hassle,
* no real gain from having the whole whorehouse mounted. So we don't need
* any operations on the root directory. However, we need a non-trivial
* d_name - pipe: will go nicely and kill the special-casing in procfs.
*/
static struct dentry *pipefs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_pseudo(fs_type, "pipe:", &pipefs_ops,
&pipefs_dentry_operations, PIPEFS_MAGIC);
}
static struct file_system_type pipe_fs_type = {
.name = "pipefs",
.mount = pipefs_mount,
.kill_sb = kill_anon_super,
};
static int __init init_pipe_fs(void)
{
int err = register_filesystem(&pipe_fs_type);
if (!err) {
pipe_mnt = kern_mount(&pipe_fs_type);
if (IS_ERR(pipe_mnt)) {
err = PTR_ERR(pipe_mnt);
unregister_filesystem(&pipe_fs_type);
}
}
return err;
}
fs_initcall(init_pipe_fs);
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_4984_1 |
crossvul-cpp_data_good_2166_2 | /* SCTP kernel implementation
* (C) Copyright IBM Corp. 2001, 2004
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2001-2002 Intel Corp.
* Copyright (c) 2002 Nokia Corp.
*
* This is part of the SCTP Linux Kernel Implementation.
*
* These are the state functions for the state machine.
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Mathew Kotowsky <kotowsky@sctp.org>
* Sridhar Samudrala <samudrala@us.ibm.com>
* Jon Grimm <jgrimm@us.ibm.com>
* Hui Huang <hui.huang@nokia.com>
* Dajiang Zhang <dajiang.zhang@nokia.com>
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Kevin Gao <kevin.gao@intel.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/slab.h>
#include <net/sock.h>
#include <net/inet_ecn.h>
#include <linux/skbuff.h>
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <net/sctp/structs.h>
static struct sctp_packet *sctp_abort_pkt_new(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
const void *payload,
size_t paylen);
static int sctp_eat_data(const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands);
static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
const struct sctp_association *asoc,
const struct sctp_chunk *chunk);
static void sctp_send_stale_cookie_err(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_chunk *err_chunk);
static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk);
static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net,
sctp_cmd_seq_t *commands,
__be16 error, int sk_err,
const struct sctp_association *asoc,
struct sctp_transport *transport);
static sctp_disposition_t sctp_sf_abort_violation(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
void *arg,
sctp_cmd_seq_t *commands,
const __u8 *payload,
const size_t paylen);
static sctp_disposition_t sctp_sf_violation_chunklen(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_violation_paramlen(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, void *ext,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_violation_ctsn(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static sctp_disposition_t sctp_sf_violation_chunk(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
static sctp_ierror_t sctp_sf_authenticate(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
struct sctp_chunk *chunk);
static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands);
/* Small helper function that checks if the chunk length
* is of the appropriate length. The 'required_length' argument
* is set to be the size of a specific chunk we are testing.
* Return Values: 1 = Valid length
* 0 = Invalid length
*
*/
static inline int
sctp_chunk_length_valid(struct sctp_chunk *chunk,
__u16 required_length)
{
__u16 chunk_length = ntohs(chunk->chunk_hdr->length);
if (unlikely(chunk_length < required_length))
return 0;
return 1;
}
/**********************************************************
* These are the state functions for handling chunk events.
**********************************************************/
/*
* Process the final SHUTDOWN COMPLETE.
*
* Section: 4 (C) (diagram), 9.2
* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint will verify
* that it is in SHUTDOWN-ACK-SENT state, if it is not the chunk should be
* discarded. If the endpoint is in the SHUTDOWN-ACK-SENT state the endpoint
* should stop the T2-shutdown timer and remove all knowledge of the
* association (and thus the association enters the CLOSED state).
*
* Verification Tag: 8.5.1(C), sctpimpguide 2.41.
* C) Rules for packet carrying SHUTDOWN COMPLETE:
* ...
* - The receiver of a SHUTDOWN COMPLETE shall accept the packet
* if the Verification Tag field of the packet matches its own tag and
* the T bit is not set
* OR
* it is set to its peer's tag and the T bit is set in the Chunk
* Flags.
* Otherwise, the receiver MUST silently discard the packet
* and take no further action. An endpoint MUST ignore the
* SHUTDOWN COMPLETE if it is not in the SHUTDOWN-ACK-SENT state.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_4_C(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_ulpevent *ev;
if (!sctp_vtag_verify_either(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* RFC 2960 6.10 Bundling
*
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*/
if (!chunk->singleton)
return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
/* Make sure that the SHUTDOWN_COMPLETE chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* RFC 2960 10.2 SCTP-to-ULP
*
* H) SHUTDOWN COMPLETE notification
*
* When SCTP completes the shutdown procedures (section 9.2) this
* notification is passed to the upper layer.
*/
ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP,
0, 0, 0, NULL, GFP_ATOMIC);
if (ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
/* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint
* will verify that it is in SHUTDOWN-ACK-SENT state, if it is
* not the chunk should be discarded. If the endpoint is in
* the SHUTDOWN-ACK-SENT state the endpoint should stop the
* T2-shutdown timer and remove all knowledge of the
* association (and thus the association enters the CLOSED
* state).
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
}
/*
* Respond to a normal INIT chunk.
* We are the side that is being asked for an association.
*
* Section: 5.1 Normal Establishment of an Association, B
* B) "Z" shall respond immediately with an INIT ACK chunk. The
* destination IP address of the INIT ACK MUST be set to the source
* IP address of the INIT to which this INIT ACK is responding. In
* the response, besides filling in other parameters, "Z" must set the
* Verification Tag field to Tag_A, and also provide its own
* Verification Tag (Tag_Z) in the Initiate Tag field.
*
* Verification Tag: Must be 0.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_1B_init(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_chunk *repl;
struct sctp_association *new_asoc;
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
sctp_unrecognized_param_t *unk_param;
int len;
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*
* IG Section 2.11.2
* Furthermore, we require that the receiver of an INIT chunk MUST
* enforce these rules by silently discarding an arriving packet
* with an INIT chunk that is bundled with other chunks.
*/
if (!chunk->singleton)
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, respond with an ABORT.
*/
if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
}
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
* Tag.
*/
if (chunk->sctp_hdr->vtag != 0)
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
/* Make sure that the INIT chunk has a valid length.
* Normally, this would cause an ABORT with a Protocol Violation
* error, but since we don't have an association, we'll
* just discard the packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* If the INIT is coming toward a closing socket, we'll send back
* and ABORT. Essentially, this catches the race of INIT being
* backloged to the socket at the same time as the user isses close().
* Since the socket and all its associations are going away, we
* can treat this OOTB
*/
if (sctp_sstate(ep->base.sk, CLOSING))
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
/* Verify the INIT chunk before processing it. */
err_chunk = NULL;
if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes if there is any.
*/
if (err_chunk) {
packet = sctp_abort_pkt_new(net, ep, asoc, arg,
(__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t),
ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t));
sctp_chunk_free(err_chunk);
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
return SCTP_DISPOSITION_CONSUME;
} else {
return SCTP_DISPOSITION_NOMEM;
}
} else {
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
commands);
}
}
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *)chunk->skb->data;
/* Tag the variable length parameters. */
chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t));
new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC);
if (!new_asoc)
goto nomem;
if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
sctp_scope(sctp_source(chunk)),
GFP_ATOMIC) < 0)
goto nomem_init;
/* The call, sctp_process_init(), can fail on memory allocation. */
if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
(sctp_init_chunk_t *)chunk->chunk_hdr,
GFP_ATOMIC))
goto nomem_init;
/* B) "Z" shall respond immediately with an INIT ACK chunk. */
/* If there are errors need to be reported for unknown parameters,
* make sure to reserve enough room in the INIT ACK for them.
*/
len = 0;
if (err_chunk)
len = ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t);
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
if (!repl)
goto nomem_init;
/* If there are errors need to be reported for unknown parameters,
* include them in the outgoing INIT ACK as "Unrecognized parameter"
* parameter.
*/
if (err_chunk) {
/* Get the "Unrecognized parameter" parameter(s) out of the
* ERROR chunk generated by sctp_verify_init(). Since the
* error cause code for "unknown parameter" and the
* "Unrecognized parameter" type is the same, we can
* construct the parameters in INIT ACK by copying the
* ERROR causes over.
*/
unk_param = (sctp_unrecognized_param_t *)
((__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t));
/* Replace the cause code with the "Unrecognized parameter"
* parameter type.
*/
sctp_addto_chunk(repl, len, unk_param);
sctp_chunk_free(err_chunk);
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
/*
* Note: After sending out INIT ACK with the State Cookie parameter,
* "Z" MUST NOT allocate any resources, nor keep any states for the
* new association. Otherwise, "Z" will be vulnerable to resource
* attacks.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
nomem_init:
sctp_association_free(new_asoc);
nomem:
if (err_chunk)
sctp_chunk_free(err_chunk);
return SCTP_DISPOSITION_NOMEM;
}
/*
* Respond to a normal INIT ACK chunk.
* We are the side that is initiating the association.
*
* Section: 5.1 Normal Establishment of an Association, C
* C) Upon reception of the INIT ACK from "Z", "A" shall stop the T1-init
* timer and leave COOKIE-WAIT state. "A" shall then send the State
* Cookie received in the INIT ACK chunk in a COOKIE ECHO chunk, start
* the T1-cookie timer, and enter the COOKIE-ECHOED state.
*
* Note: The COOKIE ECHO chunk can be bundled with any pending outbound
* DATA chunks, but it MUST be the first chunk in the packet and
* until the COOKIE ACK is returned the sender MUST NOT send any
* other packets to the peer.
*
* Verification Tag: 3.3.3
* If the value of the Initiate Tag in a received INIT ACK chunk is
* found to be 0, the receiver MUST treat it as an error and close the
* association by transmitting an ABORT.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_init_chunk_t *initchunk;
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*/
if (!chunk->singleton)
return sctp_sf_violation_chunk(net, ep, asoc, type, arg, commands);
/* Make sure that the INIT-ACK chunk has a valid length */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
/* Verify the INIT chunk before processing it. */
err_chunk = NULL;
if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
sctp_error_t error = SCTP_ERROR_NO_RESOURCE;
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes. If there are no causes,
* then there wasn't enough memory. Just terminate
* the association.
*/
if (err_chunk) {
packet = sctp_abort_pkt_new(net, ep, asoc, arg,
(__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t),
ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t));
sctp_chunk_free(err_chunk);
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
error = SCTP_ERROR_INV_PARAM;
}
}
/* SCTP-AUTH, Section 6.3:
* It should be noted that if the receiver wants to tear
* down an association in an authenticated way only, the
* handling of malformed packets should not result in
* tearing down the association.
*
* This means that if we only want to abort associations
* in an authenticated way (i.e AUTH+ABORT), then we
* can't destroy this association just because the packet
* was malformed.
*/
if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED,
asoc, chunk->transport);
}
/* Tag the variable length parameters. Note that we never
* convert the parameters in an INIT chunk.
*/
chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t));
initchunk = (sctp_init_chunk_t *) chunk->chunk_hdr;
sctp_add_cmd_sf(commands, SCTP_CMD_PEER_INIT,
SCTP_PEER_INIT(initchunk));
/* Reset init error count upon receipt of INIT-ACK. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL());
/* 5.1 C) "A" shall stop the T1-init timer and leave
* COOKIE-WAIT state. "A" shall then ... start the T1-cookie
* timer, and enter the COOKIE-ECHOED state.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_COOKIE_ECHOED));
/* SCTP-AUTH: genereate the assocition shared keys so that
* we can potentially signe the COOKIE-ECHO.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_SHKEY, SCTP_NULL());
/* 5.1 C) "A" shall then send the State Cookie received in the
* INIT ACK chunk in a COOKIE ECHO chunk, ...
*/
/* If there is any errors to report, send the ERROR chunk generated
* for unknown parameters as well.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_COOKIE_ECHO,
SCTP_CHUNK(err_chunk));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Respond to a normal COOKIE ECHO chunk.
* We are the side that is being asked for an association.
*
* Section: 5.1 Normal Establishment of an Association, D
* D) Upon reception of the COOKIE ECHO chunk, Endpoint "Z" will reply
* with a COOKIE ACK chunk after building a TCB and moving to
* the ESTABLISHED state. A COOKIE ACK chunk may be bundled with
* any pending DATA chunks (and/or SACK chunks), but the COOKIE ACK
* chunk MUST be the first chunk in the packet.
*
* IMPLEMENTATION NOTE: An implementation may choose to send the
* Communication Up notification to the SCTP user upon reception
* of a valid COOKIE ECHO chunk.
*
* Verification Tag: 8.5.1 Exceptions in Verification Tag Rules
* D) Rules for packet carrying a COOKIE ECHO
*
* - When sending a COOKIE ECHO, the endpoint MUST use the value of the
* Initial Tag received in the INIT ACK.
*
* - The receiver of a COOKIE ECHO follows the procedures in Section 5.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_association *new_asoc;
sctp_init_chunk_t *peer_init;
struct sctp_chunk *repl;
struct sctp_ulpevent *ev, *ai_ev = NULL;
int error = 0;
struct sctp_chunk *err_chk_p;
struct sock *sk;
/* If the packet is an OOTB packet which is temporarily on the
* control endpoint, respond with an ABORT.
*/
if (ep == sctp_sk(net->sctp.ctl_sock)->ep) {
SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
}
/* Make sure that the COOKIE_ECHO chunk has a valid length.
* In this case, we check that we have enough for at least a
* chunk header. More detailed verification is done
* in sctp_unpack_cookie().
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* If the endpoint is not listening or if the number of associations
* on the TCP-style socket exceed the max backlog, respond with an
* ABORT.
*/
sk = ep->base.sk;
if (!sctp_sstate(sk, LISTENING) ||
(sctp_style(sk, TCP) && sk_acceptq_is_full(sk)))
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
chunk->subh.cookie_hdr =
(struct sctp_signed_cookie *)chunk->skb->data;
if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t)))
goto nomem;
/* 5.1 D) Upon reception of the COOKIE ECHO chunk, Endpoint
* "Z" will reply with a COOKIE ACK chunk after building a TCB
* and moving to the ESTABLISHED state.
*/
new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error,
&err_chk_p);
/* FIXME:
* If the re-build failed, what is the proper error path
* from here?
*
* [We should abort the association. --piggy]
*/
if (!new_asoc) {
/* FIXME: Several errors are possible. A bad cookie should
* be silently discarded, but think about logging it too.
*/
switch (error) {
case -SCTP_IERROR_NOMEM:
goto nomem;
case -SCTP_IERROR_STALE_COOKIE:
sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
err_chk_p);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
case -SCTP_IERROR_BAD_SIG:
default:
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
}
/* Delay state machine commands until later.
*
* Re-build the bind address for the association is done in
* the sctp_unpack_cookie() already.
*/
/* This is a brand-new association, so these are not yet side
* effects--it is safe to run them here.
*/
peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
if (!sctp_process_init(new_asoc, chunk,
&chunk->subh.cookie_hdr->c.peer_addr,
peer_init, GFP_ATOMIC))
goto nomem_init;
/* SCTP-AUTH: Now that we've populate required fields in
* sctp_process_init, set up the assocaition shared keys as
* necessary so that we can potentially authenticate the ACK
*/
error = sctp_auth_asoc_init_active_key(new_asoc, GFP_ATOMIC);
if (error)
goto nomem_init;
/* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
* is supposed to be authenticated and we have to do delayed
* authentication. We've just recreated the association using
* the information in the cookie and now it's much easier to
* do the authentication.
*/
if (chunk->auth_chunk) {
struct sctp_chunk auth;
sctp_ierror_t ret;
/* Make sure that we and the peer are AUTH capable */
if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/* set-up our fake chunk so that we can process it */
auth.skb = chunk->auth_chunk;
auth.asoc = chunk->asoc;
auth.sctp_hdr = chunk->sctp_hdr;
auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk,
sizeof(sctp_chunkhdr_t));
skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
auth.transport = chunk->transport;
ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
if (ret != SCTP_IERROR_NO_ERROR) {
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
}
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem_init;
/* RFC 2960 5.1 Normal Establishment of an Association
*
* D) IMPLEMENTATION NOTE: An implementation may choose to
* send the Communication Up notification to the SCTP user
* upon reception of a valid COOKIE ECHO chunk.
*/
ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, SCTP_COMM_UP, 0,
new_asoc->c.sinit_num_ostreams,
new_asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (!ev)
goto nomem_ev;
/* Sockets API Draft Section 5.3.1.6
* When a peer sends a Adaptation Layer Indication parameter , SCTP
* delivers this notification to inform the application that of the
* peers requested adaptation layer.
*/
if (new_asoc->peer.adaptation_ind) {
ai_ev = sctp_ulpevent_make_adaptation_indication(new_asoc,
GFP_ATOMIC);
if (!ai_ev)
goto nomem_aiev;
}
/* Add all the state machine commands now since we've created
* everything. This way we don't introduce memory corruptions
* during side-effect processing and correclty count established
* associations.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (new_asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
/* This will send the COOKIE ACK */
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
/* Queue the ASSOC_CHANGE event */
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
/* Send up the Adaptation Layer Indication event */
if (ai_ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ai_ev));
return SCTP_DISPOSITION_CONSUME;
nomem_aiev:
sctp_ulpevent_free(ev);
nomem_ev:
sctp_chunk_free(repl);
nomem_init:
sctp_association_free(new_asoc);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Respond to a normal COOKIE ACK chunk.
* We are the side that is being asked for an association.
*
* RFC 2960 5.1 Normal Establishment of an Association
*
* E) Upon reception of the COOKIE ACK, endpoint "A" will move from the
* COOKIE-ECHOED state to the ESTABLISHED state, stopping the T1-cookie
* timer. It may also notify its ULP about the successful
* establishment of the association with a Communication Up
* notification (see Section 10).
*
* Verification Tag:
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_ulpevent *ev;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Verify that the chunk length for the COOKIE-ACK is OK.
* If we don't do this, any bundled chunks may be junked.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Reset init error count upon receipt of COOKIE-ACK,
* to avoid problems with the managemement of this
* counter in stale cookie situations when a transition back
* from the COOKIE-ECHOED state to the COOKIE-WAIT
* state is performed.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_RESET, SCTP_NULL());
/* RFC 2960 5.1 Normal Establishment of an Association
*
* E) Upon reception of the COOKIE ACK, endpoint "A" will move
* from the COOKIE-ECHOED state to the ESTABLISHED state,
* stopping the T1-cookie timer.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
/* It may also notify its ULP about the successful
* establishment of the association with a Communication Up
* notification (see Section 10).
*/
ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP,
0, asoc->c.sinit_num_ostreams,
asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (!ev)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
/* Sockets API Draft Section 5.3.1.6
* When a peer sends a Adaptation Layer Indication parameter , SCTP
* delivers this notification to inform the application that of the
* peers requested adaptation layer.
*/
if (asoc->peer.adaptation_ind) {
ev = sctp_ulpevent_make_adaptation_indication(asoc, GFP_ATOMIC);
if (!ev)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
}
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/* Generate and sendout a heartbeat packet. */
static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_transport *transport = (struct sctp_transport *) arg;
struct sctp_chunk *reply;
/* Send a heartbeat to our peer. */
reply = sctp_make_heartbeat(asoc, transport);
if (!reply)
return SCTP_DISPOSITION_NOMEM;
/* Set rto_pending indicating that an RTT measurement
* is started with this heartbeat chunk.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_RTO_PENDING,
SCTP_TRANSPORT(transport));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
}
/* Generate a HEARTBEAT packet on the given transport. */
sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_transport *transport = (struct sctp_transport *) arg;
if (asoc->overall_error_count >= asoc->max_retrans) {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
}
/* Section 3.3.5.
* The Sender-specific Heartbeat Info field should normally include
* information about the sender's current time when this HEARTBEAT
* chunk is sent and the destination transport address to which this
* HEARTBEAT is sent (see Section 8.3).
*/
if (transport->param_flags & SPP_HB_ENABLE) {
if (SCTP_DISPOSITION_NOMEM ==
sctp_sf_heartbeat(ep, asoc, type, arg,
commands))
return SCTP_DISPOSITION_NOMEM;
/* Set transport error counter and association error counter
* when sending heartbeat.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT,
SCTP_TRANSPORT(transport));
}
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_IDLE,
SCTP_TRANSPORT(transport));
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE,
SCTP_TRANSPORT(transport));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Process an heartbeat request.
*
* Section: 8.3 Path Heartbeat
* The receiver of the HEARTBEAT should immediately respond with a
* HEARTBEAT ACK that contains the Heartbeat Information field copied
* from the received HEARTBEAT chunk.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
* When receiving an SCTP packet, the endpoint MUST ensure that the
* value in the Verification Tag field of the received SCTP packet
* matches its own Tag. If the received Verification Tag value does not
* match the receiver's own tag value, the receiver shall silently
* discard the packet and shall not process it any further except for
* those cases listed in Section 8.5.1 below.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_beat_8_3(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_paramhdr_t *param_hdr;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *reply;
size_t paylen = 0;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the HEARTBEAT chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* 8.3 The receiver of the HEARTBEAT should immediately
* respond with a HEARTBEAT ACK that contains the Heartbeat
* Information field copied from the received HEARTBEAT chunk.
*/
chunk->subh.hb_hdr = (sctp_heartbeathdr_t *) chunk->skb->data;
param_hdr = (sctp_paramhdr_t *) chunk->subh.hb_hdr;
paylen = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
if (ntohs(param_hdr->length) > paylen)
return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
param_hdr, commands);
if (!pskb_pull(chunk->skb, paylen))
goto nomem;
reply = sctp_make_heartbeat_ack(asoc, chunk, param_hdr, paylen);
if (!reply)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Process the returning HEARTBEAT ACK.
*
* Section: 8.3 Path Heartbeat
* Upon the receipt of the HEARTBEAT ACK, the sender of the HEARTBEAT
* should clear the error counter of the destination transport
* address to which the HEARTBEAT was sent, and mark the destination
* transport address as active if it is not so marked. The endpoint may
* optionally report to the upper layer when an inactive destination
* address is marked as active due to the reception of the latest
* HEARTBEAT ACK. The receiver of the HEARTBEAT ACK must also
* clear the association overall error count as well (as defined
* in section 8.1).
*
* The receiver of the HEARTBEAT ACK should also perform an RTT
* measurement for that destination transport address using the time
* value carried in the HEARTBEAT ACK chunk.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_backbeat_8_3(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
union sctp_addr from_addr;
struct sctp_transport *link;
sctp_sender_hb_info_t *hbinfo;
unsigned long max_interval;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the HEARTBEAT-ACK chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t) +
sizeof(sctp_sender_hb_info_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data;
/* Make sure that the length of the parameter is what we expect */
if (ntohs(hbinfo->param_hdr.length) !=
sizeof(sctp_sender_hb_info_t)) {
return SCTP_DISPOSITION_DISCARD;
}
from_addr = hbinfo->daddr;
link = sctp_assoc_lookup_paddr(asoc, &from_addr);
/* This should never happen, but lets log it if so. */
if (unlikely(!link)) {
if (from_addr.sa.sa_family == AF_INET6) {
net_warn_ratelimited("%s association %p could not find address %pI6\n",
__func__,
asoc,
&from_addr.v6.sin6_addr);
} else {
net_warn_ratelimited("%s association %p could not find address %pI4\n",
__func__,
asoc,
&from_addr.v4.sin_addr.s_addr);
}
return SCTP_DISPOSITION_DISCARD;
}
/* Validate the 64-bit random nonce. */
if (hbinfo->hb_nonce != link->hb_nonce)
return SCTP_DISPOSITION_DISCARD;
max_interval = link->hbinterval + link->rto;
/* Check if the timestamp looks valid. */
if (time_after(hbinfo->sent_at, jiffies) ||
time_after(jiffies, hbinfo->sent_at + max_interval)) {
pr_debug("%s: HEARTBEAT ACK with invalid timestamp received "
"for transport:%p\n", __func__, link);
return SCTP_DISPOSITION_DISCARD;
}
/* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of
* the HEARTBEAT should clear the error counter of the
* destination transport address to which the HEARTBEAT was
* sent and mark the destination transport address as active if
* it is not so marked.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_ON, SCTP_TRANSPORT(link));
return SCTP_DISPOSITION_CONSUME;
}
/* Helper function to send out an abort for the restart
* condition.
*/
static int sctp_sf_send_restart_abort(struct net *net, union sctp_addr *ssa,
struct sctp_chunk *init,
sctp_cmd_seq_t *commands)
{
int len;
struct sctp_packet *pkt;
union sctp_addr_param *addrparm;
struct sctp_errhdr *errhdr;
struct sctp_endpoint *ep;
char buffer[sizeof(struct sctp_errhdr)+sizeof(union sctp_addr_param)];
struct sctp_af *af = sctp_get_af_specific(ssa->v4.sin_family);
/* Build the error on the stack. We are way to malloc crazy
* throughout the code today.
*/
errhdr = (struct sctp_errhdr *)buffer;
addrparm = (union sctp_addr_param *)errhdr->variable;
/* Copy into a parm format. */
len = af->to_addr_param(ssa, addrparm);
len += sizeof(sctp_errhdr_t);
errhdr->cause = SCTP_ERROR_RESTART;
errhdr->length = htons(len);
/* Assign to the control socket. */
ep = sctp_sk(net->sctp.ctl_sock)->ep;
/* Association is NULL since this may be a restart attack and we
* want to send back the attacker's vtag.
*/
pkt = sctp_abort_pkt_new(net, ep, NULL, init, errhdr, len);
if (!pkt)
goto out;
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
/* Discard the rest of the inbound packet. */
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
out:
/* Even if there is no memory, treat as a failure so
* the packet will get dropped.
*/
return 0;
}
static bool list_has_sctp_addr(const struct list_head *list,
union sctp_addr *ipaddr)
{
struct sctp_transport *addr;
list_for_each_entry(addr, list, transports) {
if (sctp_cmp_addr_exact(ipaddr, &addr->ipaddr))
return true;
}
return false;
}
/* A restart is occurring, check to make sure no new addresses
* are being added as we may be under a takeover attack.
*/
static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc,
const struct sctp_association *asoc,
struct sctp_chunk *init,
sctp_cmd_seq_t *commands)
{
struct net *net = sock_net(new_asoc->base.sk);
struct sctp_transport *new_addr;
int ret = 1;
/* Implementor's Guide - Section 5.2.2
* ...
* Before responding the endpoint MUST check to see if the
* unexpected INIT adds new addresses to the association. If new
* addresses are added to the association, the endpoint MUST respond
* with an ABORT..
*/
/* Search through all current addresses and make sure
* we aren't adding any new ones.
*/
list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list,
transports) {
if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
&new_addr->ipaddr)) {
sctp_sf_send_restart_abort(net, &new_addr->ipaddr, init,
commands);
ret = 0;
break;
}
}
/* Return success if all addresses were found. */
return ret;
}
/* Populate the verification/tie tags based on overlapping INIT
* scenario.
*
* Note: Do not use in CLOSED or SHUTDOWN-ACK-SENT state.
*/
static void sctp_tietags_populate(struct sctp_association *new_asoc,
const struct sctp_association *asoc)
{
switch (asoc->state) {
/* 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State */
case SCTP_STATE_COOKIE_WAIT:
new_asoc->c.my_vtag = asoc->c.my_vtag;
new_asoc->c.my_ttag = asoc->c.my_vtag;
new_asoc->c.peer_ttag = 0;
break;
case SCTP_STATE_COOKIE_ECHOED:
new_asoc->c.my_vtag = asoc->c.my_vtag;
new_asoc->c.my_ttag = asoc->c.my_vtag;
new_asoc->c.peer_ttag = asoc->c.peer_vtag;
break;
/* 5.2.2 Unexpected INIT in States Other than CLOSED, COOKIE-ECHOED,
* COOKIE-WAIT and SHUTDOWN-ACK-SENT
*/
default:
new_asoc->c.my_ttag = asoc->c.my_vtag;
new_asoc->c.peer_ttag = asoc->c.peer_vtag;
break;
}
/* Other parameters for the endpoint SHOULD be copied from the
* existing parameters of the association (e.g. number of
* outbound streams) into the INIT ACK and cookie.
*/
new_asoc->rwnd = asoc->rwnd;
new_asoc->c.sinit_num_ostreams = asoc->c.sinit_num_ostreams;
new_asoc->c.sinit_max_instreams = asoc->c.sinit_max_instreams;
new_asoc->c.initial_tsn = asoc->c.initial_tsn;
}
/*
* Compare vtag/tietag values to determine unexpected COOKIE-ECHO
* handling action.
*
* RFC 2960 5.2.4 Handle a COOKIE ECHO when a TCB exists.
*
* Returns value representing action to be taken. These action values
* correspond to Action/Description values in RFC 2960, Table 2.
*/
static char sctp_tietags_compare(struct sctp_association *new_asoc,
const struct sctp_association *asoc)
{
/* In this case, the peer may have restarted. */
if ((asoc->c.my_vtag != new_asoc->c.my_vtag) &&
(asoc->c.peer_vtag != new_asoc->c.peer_vtag) &&
(asoc->c.my_vtag == new_asoc->c.my_ttag) &&
(asoc->c.peer_vtag == new_asoc->c.peer_ttag))
return 'A';
/* Collision case B. */
if ((asoc->c.my_vtag == new_asoc->c.my_vtag) &&
((asoc->c.peer_vtag != new_asoc->c.peer_vtag) ||
(0 == asoc->c.peer_vtag))) {
return 'B';
}
/* Collision case D. */
if ((asoc->c.my_vtag == new_asoc->c.my_vtag) &&
(asoc->c.peer_vtag == new_asoc->c.peer_vtag))
return 'D';
/* Collision case C. */
if ((asoc->c.my_vtag != new_asoc->c.my_vtag) &&
(asoc->c.peer_vtag == new_asoc->c.peer_vtag) &&
(0 == new_asoc->c.my_ttag) &&
(0 == new_asoc->c.peer_ttag))
return 'C';
/* No match to any of the special cases; discard this packet. */
return 'E';
}
/* Common helper routine for both duplicate and simulataneous INIT
* chunk handling.
*/
static sctp_disposition_t sctp_sf_do_unexpected_init(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, sctp_cmd_seq_t *commands)
{
sctp_disposition_t retval;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *repl;
struct sctp_association *new_asoc;
struct sctp_chunk *err_chunk;
struct sctp_packet *packet;
sctp_unrecognized_param_t *unk_param;
int len;
/* 6.10 Bundling
* An endpoint MUST NOT bundle INIT, INIT ACK or
* SHUTDOWN COMPLETE with any other chunks.
*
* IG Section 2.11.2
* Furthermore, we require that the receiver of an INIT chunk MUST
* enforce these rules by silently discarding an arriving packet
* with an INIT chunk that is bundled with other chunks.
*/
if (!chunk->singleton)
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* 3.1 A packet containing an INIT chunk MUST have a zero Verification
* Tag.
*/
if (chunk->sctp_hdr->vtag != 0)
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
/* Make sure that the INIT chunk has a valid length.
* In this case, we generate a protocol violation since we have
* an association established.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Grab the INIT header. */
chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data;
/* Tag the variable length parameters. */
chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t));
/* Verify the INIT chunk before processing it. */
err_chunk = NULL;
if (!sctp_verify_init(net, ep, asoc, chunk->chunk_hdr->type,
(sctp_init_chunk_t *)chunk->chunk_hdr, chunk,
&err_chunk)) {
/* This chunk contains fatal error. It is to be discarded.
* Send an ABORT, with causes if there is any.
*/
if (err_chunk) {
packet = sctp_abort_pkt_new(net, ep, asoc, arg,
(__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t),
ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t));
if (packet) {
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
retval = SCTP_DISPOSITION_CONSUME;
} else {
retval = SCTP_DISPOSITION_NOMEM;
}
goto cleanup;
} else {
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg,
commands);
}
}
/*
* Other parameters for the endpoint SHOULD be copied from the
* existing parameters of the association (e.g. number of
* outbound streams) into the INIT ACK and cookie.
* FIXME: We are copying parameters from the endpoint not the
* association.
*/
new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC);
if (!new_asoc)
goto nomem;
if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0)
goto nomem;
/* In the outbound INIT ACK the endpoint MUST copy its current
* Verification Tag and Peers Verification tag into a reserved
* place (local tie-tag and per tie-tag) within the state cookie.
*/
if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk),
(sctp_init_chunk_t *)chunk->chunk_hdr,
GFP_ATOMIC))
goto nomem;
/* Make sure no new addresses are being added during the
* restart. Do not do this check for COOKIE-WAIT state,
* since there are no peer addresses to check against.
* Upon return an ABORT will have been sent if needed.
*/
if (!sctp_state(asoc, COOKIE_WAIT)) {
if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk,
commands)) {
retval = SCTP_DISPOSITION_CONSUME;
goto nomem_retval;
}
}
sctp_tietags_populate(new_asoc, asoc);
/* B) "Z" shall respond immediately with an INIT ACK chunk. */
/* If there are errors need to be reported for unknown parameters,
* make sure to reserve enough room in the INIT ACK for them.
*/
len = 0;
if (err_chunk) {
len = ntohs(err_chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t);
}
repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
if (!repl)
goto nomem;
/* If there are errors need to be reported for unknown parameters,
* include them in the outgoing INIT ACK as "Unrecognized parameter"
* parameter.
*/
if (err_chunk) {
/* Get the "Unrecognized parameter" parameter(s) out of the
* ERROR chunk generated by sctp_verify_init(). Since the
* error cause code for "unknown parameter" and the
* "Unrecognized parameter" type is the same, we can
* construct the parameters in INIT ACK by copying the
* ERROR causes over.
*/
unk_param = (sctp_unrecognized_param_t *)
((__u8 *)(err_chunk->chunk_hdr) +
sizeof(sctp_chunkhdr_t));
/* Replace the cause code with the "Unrecognized parameter"
* parameter type.
*/
sctp_addto_chunk(repl, len, unk_param);
}
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
/*
* Note: After sending out INIT ACK with the State Cookie parameter,
* "Z" MUST NOT allocate any resources for this new association.
* Otherwise, "Z" will be vulnerable to resource attacks.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
retval = SCTP_DISPOSITION_CONSUME;
return retval;
nomem:
retval = SCTP_DISPOSITION_NOMEM;
nomem_retval:
if (new_asoc)
sctp_association_free(new_asoc);
cleanup:
if (err_chunk)
sctp_chunk_free(err_chunk);
return retval;
}
/*
* Handle simultaneous INIT.
* This means we started an INIT and then we got an INIT request from
* our peer.
*
* Section: 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State (Item B)
* This usually indicates an initialization collision, i.e., each
* endpoint is attempting, at about the same time, to establish an
* association with the other endpoint.
*
* Upon receipt of an INIT in the COOKIE-WAIT or COOKIE-ECHOED state, an
* endpoint MUST respond with an INIT ACK using the same parameters it
* sent in its original INIT chunk (including its Verification Tag,
* unchanged). These original parameters are combined with those from the
* newly received INIT chunk. The endpoint shall also generate a State
* Cookie with the INIT ACK. The endpoint uses the parameters sent in its
* INIT to calculate the State Cookie.
*
* After that, the endpoint MUST NOT change its state, the T1-init
* timer shall be left running and the corresponding TCB MUST NOT be
* destroyed. The normal procedures for handling State Cookies when
* a TCB exists will resolve the duplicate INITs to a single association.
*
* For an endpoint that is in the COOKIE-ECHOED state it MUST populate
* its Tie-Tags with the Tag information of itself and its peer (see
* section 5.2.2 for a description of the Tie-Tags).
*
* Verification Tag: Not explicit, but an INIT can not have a valid
* verification tag, so we skip the check.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_2_1_siminit(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* Call helper to do the real work for both simulataneous and
* duplicate INIT chunk handling.
*/
return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
}
/*
* Handle duplicated INIT messages. These are usually delayed
* restransmissions.
*
* Section: 5.2.2 Unexpected INIT in States Other than CLOSED,
* COOKIE-ECHOED and COOKIE-WAIT
*
* Unless otherwise stated, upon reception of an unexpected INIT for
* this association, the endpoint shall generate an INIT ACK with a
* State Cookie. In the outbound INIT ACK the endpoint MUST copy its
* current Verification Tag and peer's Verification Tag into a reserved
* place within the state cookie. We shall refer to these locations as
* the Peer's-Tie-Tag and the Local-Tie-Tag. The outbound SCTP packet
* containing this INIT ACK MUST carry a Verification Tag value equal to
* the Initiation Tag found in the unexpected INIT. And the INIT ACK
* MUST contain a new Initiation Tag (randomly generated see Section
* 5.3.1). Other parameters for the endpoint SHOULD be copied from the
* existing parameters of the association (e.g. number of outbound
* streams) into the INIT ACK and cookie.
*
* After sending out the INIT ACK, the endpoint shall take no further
* actions, i.e., the existing association, including its current state,
* and the corresponding TCB MUST NOT be changed.
*
* Note: Only when a TCB exists and the association is not in a COOKIE-
* WAIT state are the Tie-Tags populated. For a normal association INIT
* (i.e. the endpoint is in a COOKIE-WAIT state), the Tie-Tags MUST be
* set to 0 (indicating that no previous TCB existed). The INIT ACK and
* State Cookie are populated as specified in section 5.2.1.
*
* Verification Tag: Not specified, but an INIT has no way of knowing
* what the verification tag could be, so we ignore it.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_2_2_dupinit(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* Call helper to do the real work for both simulataneous and
* duplicate INIT chunk handling.
*/
return sctp_sf_do_unexpected_init(net, ep, asoc, type, arg, commands);
}
/*
* Unexpected INIT-ACK handler.
*
* Section 5.2.3
* If an INIT ACK received by an endpoint in any state other than the
* COOKIE-WAIT state, the endpoint should discard the INIT ACK chunk.
* An unexpected INIT ACK usually indicates the processing of an old or
* duplicated INIT chunk.
*/
sctp_disposition_t sctp_sf_do_5_2_3_initack(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, sctp_cmd_seq_t *commands)
{
/* Per the above section, we'll discard the chunk if we have an
* endpoint. If this is an OOTB INIT-ACK, treat it as such.
*/
if (ep == sctp_sk(net->sctp.ctl_sock)->ep)
return sctp_sf_ootb(net, ep, asoc, type, arg, commands);
else
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
}
/* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A')
*
* Section 5.2.4
* A) In this case, the peer may have restarted.
*/
static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_association *new_asoc)
{
sctp_init_chunk_t *peer_init;
struct sctp_ulpevent *ev;
struct sctp_chunk *repl;
struct sctp_chunk *err;
sctp_disposition_t disposition;
/* new_asoc is a brand-new association, so these are not yet
* side effects--it is safe to run them here.
*/
peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
GFP_ATOMIC))
goto nomem;
/* Make sure no new addresses are being added during the
* restart. Though this is a pretty complicated attack
* since you'd have to get inside the cookie.
*/
if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
return SCTP_DISPOSITION_CONSUME;
}
/* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
* the peer has restarted (Action A), it MUST NOT setup a new
* association but instead resend the SHUTDOWN ACK and send an ERROR
* chunk with a "Cookie Received while Shutting Down" error cause to
* its peer.
*/
if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) {
disposition = sctp_sf_do_9_2_reshutack(net, ep, asoc,
SCTP_ST_CHUNK(chunk->chunk_hdr->type),
chunk, commands);
if (SCTP_DISPOSITION_NOMEM == disposition)
goto nomem;
err = sctp_make_op_error(asoc, chunk,
SCTP_ERROR_COOKIE_IN_SHUTDOWN,
NULL, 0, 0);
if (err)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err));
return SCTP_DISPOSITION_CONSUME;
}
/* For now, stop pending T3-rtx and SACK timers, fail any unsent/unacked
* data. Consider the optional choice of resending of this data.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_SACK));
sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL());
/* Stop pending T4-rto timer, teardown ASCONF queue, ASCONF-ACK queue
* and ASCONF-ACK cache.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_ASCONF_QUEUE, SCTP_NULL());
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem;
/* Report association restart to upper layer. */
ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0,
new_asoc->c.sinit_num_ostreams,
new_asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (!ev)
goto nomem_ev;
/* Update the content of current association. */
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
if (sctp_state(asoc, SHUTDOWN_PENDING) &&
(sctp_sstate(asoc->base.sk, CLOSING) ||
sock_flag(asoc->base.sk, SOCK_DEAD))) {
/* if were currently in SHUTDOWN_PENDING, but the socket
* has been closed by user, don't transition to ESTABLISHED.
* Instead trigger SHUTDOWN bundled with COOKIE_ACK.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
SCTP_ST_CHUNK(0), NULL,
commands);
} else {
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
}
return SCTP_DISPOSITION_CONSUME;
nomem_ev:
sctp_chunk_free(repl);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'B')
*
* Section 5.2.4
* B) In this case, both sides may be attempting to start an association
* at about the same time but the peer endpoint started its INIT
* after responding to the local endpoint's INIT
*/
/* This case represents an initialization collision. */
static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_association *new_asoc)
{
sctp_init_chunk_t *peer_init;
struct sctp_chunk *repl;
/* new_asoc is a brand-new association, so these are not yet
* side effects--it is safe to run them here.
*/
peer_init = &chunk->subh.cookie_hdr->c.peer_init[0];
if (!sctp_process_init(new_asoc, chunk, sctp_source(chunk), peer_init,
GFP_ATOMIC))
goto nomem;
/* Update the content of current association. */
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
/* RFC 2960 5.1 Normal Establishment of an Association
*
* D) IMPLEMENTATION NOTE: An implementation may choose to
* send the Communication Up notification to the SCTP user
* upon reception of a valid COOKIE ECHO chunk.
*
* Sadly, this needs to be implemented as a side-effect, because
* we are not guaranteed to have set the association id of the real
* association and so these notifications need to be delayed until
* the association id is allocated.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_CHANGE, SCTP_U8(SCTP_COMM_UP));
/* Sockets API Draft Section 5.3.1.6
* When a peer sends a Adaptation Layer Indication parameter , SCTP
* delivers this notification to inform the application that of the
* peers requested adaptation layer.
*
* This also needs to be done as a side effect for the same reason as
* above.
*/
if (asoc->peer.adaptation_ind)
sctp_add_cmd_sf(commands, SCTP_CMD_ADAPTATION_IND, SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'C')
*
* Section 5.2.4
* C) In this case, the local endpoint's cookie has arrived late.
* Before it arrived, the local endpoint sent an INIT and received an
* INIT-ACK and finally sent a COOKIE ECHO with the peer's same tag
* but a new tag of its own.
*/
/* This case represents an initialization collision. */
static sctp_disposition_t sctp_sf_do_dupcook_c(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_association *new_asoc)
{
/* The cookie should be silently discarded.
* The endpoint SHOULD NOT change states and should leave
* any timers running.
*/
return SCTP_DISPOSITION_DISCARD;
}
/* Unexpected COOKIE-ECHO handler lost chunk (Table 2, action 'D')
*
* Section 5.2.4
*
* D) When both local and remote tags match the endpoint should always
* enter the ESTABLISHED state, if it has not already done so.
*/
/* This case represents an initialization collision. */
static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_association *new_asoc)
{
struct sctp_ulpevent *ev = NULL, *ai_ev = NULL;
struct sctp_chunk *repl;
/* Clarification from Implementor's Guide:
* D) When both local and remote tags match the endpoint should
* enter the ESTABLISHED state, if it is in the COOKIE-ECHOED state.
* It should stop any cookie timer that may be running and send
* a COOKIE ACK.
*/
/* Don't accidentally move back into established state. */
if (asoc->state < SCTP_STATE_ESTABLISHED) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_ESTABLISHED));
SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START,
SCTP_NULL());
/* RFC 2960 5.1 Normal Establishment of an Association
*
* D) IMPLEMENTATION NOTE: An implementation may choose
* to send the Communication Up notification to the
* SCTP user upon reception of a valid COOKIE
* ECHO chunk.
*/
ev = sctp_ulpevent_make_assoc_change(asoc, 0,
SCTP_COMM_UP, 0,
asoc->c.sinit_num_ostreams,
asoc->c.sinit_max_instreams,
NULL, GFP_ATOMIC);
if (!ev)
goto nomem;
/* Sockets API Draft Section 5.3.1.6
* When a peer sends a Adaptation Layer Indication parameter,
* SCTP delivers this notification to inform the application
* that of the peers requested adaptation layer.
*/
if (asoc->peer.adaptation_ind) {
ai_ev = sctp_ulpevent_make_adaptation_indication(asoc,
GFP_ATOMIC);
if (!ai_ev)
goto nomem;
}
}
repl = sctp_make_cookie_ack(new_asoc, chunk);
if (!repl)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
if (ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
if (ai_ev)
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ai_ev));
return SCTP_DISPOSITION_CONSUME;
nomem:
if (ai_ev)
sctp_ulpevent_free(ai_ev);
if (ev)
sctp_ulpevent_free(ev);
return SCTP_DISPOSITION_NOMEM;
}
/*
* Handle a duplicate COOKIE-ECHO. This usually means a cookie-carrying
* chunk was retransmitted and then delayed in the network.
*
* Section: 5.2.4 Handle a COOKIE ECHO when a TCB exists
*
* Verification Tag: None. Do cookie validation.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_disposition_t retval;
struct sctp_chunk *chunk = arg;
struct sctp_association *new_asoc;
int error = 0;
char action;
struct sctp_chunk *err_chk_p;
/* Make sure that the chunk has a valid length from the protocol
* perspective. In this case check to make sure we have at least
* enough for the chunk header. Cookie length verification is
* done later.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* "Decode" the chunk. We have no optional parameters so we
* are in good shape.
*/
chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data;
if (!pskb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) -
sizeof(sctp_chunkhdr_t)))
goto nomem;
/* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie
* of a duplicate COOKIE ECHO match the Verification Tags of the
* current association, consider the State Cookie valid even if
* the lifespan is exceeded.
*/
new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error,
&err_chk_p);
/* FIXME:
* If the re-build failed, what is the proper error path
* from here?
*
* [We should abort the association. --piggy]
*/
if (!new_asoc) {
/* FIXME: Several errors are possible. A bad cookie should
* be silently discarded, but think about logging it too.
*/
switch (error) {
case -SCTP_IERROR_NOMEM:
goto nomem;
case -SCTP_IERROR_STALE_COOKIE:
sctp_send_stale_cookie_err(net, ep, asoc, chunk, commands,
err_chk_p);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
case -SCTP_IERROR_BAD_SIG:
default:
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
}
/* Compare the tie_tag in cookie with the verification tag of
* current association.
*/
action = sctp_tietags_compare(new_asoc, asoc);
switch (action) {
case 'A': /* Association restart. */
retval = sctp_sf_do_dupcook_a(net, ep, asoc, chunk, commands,
new_asoc);
break;
case 'B': /* Collision case B. */
retval = sctp_sf_do_dupcook_b(net, ep, asoc, chunk, commands,
new_asoc);
break;
case 'C': /* Collision case C. */
retval = sctp_sf_do_dupcook_c(net, ep, asoc, chunk, commands,
new_asoc);
break;
case 'D': /* Collision case D. */
retval = sctp_sf_do_dupcook_d(net, ep, asoc, chunk, commands,
new_asoc);
break;
default: /* Discard packet for all others. */
retval = sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
break;
}
/* Delete the tempory new association. */
sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
/* Restore association pointer to provide SCTP command interpeter
* with a valid context in case it needs to manipulate
* the queues */
sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC,
SCTP_ASOC((struct sctp_association *)asoc));
return retval;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Process an ABORT. (SHUTDOWN-PENDING state)
*
* See sctp_sf_do_9_1_abort().
*/
sctp_disposition_t sctp_sf_shutdown_pending_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
if (!sctp_vtag_verify_either(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the ABORT chunk has a valid length.
* Since this is an ABORT chunk, we have to discard it
* because of the following text:
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
* Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* ADD-IP: Special case for ABORT chunks
* F4) One special consideration is that ABORT Chunks arriving
* destined to the IP address being deleted MUST be
* ignored (see Section 5.3.1 for further details).
*/
if (SCTP_ADDR_DEL ==
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
}
/*
* Process an ABORT. (SHUTDOWN-SENT state)
*
* See sctp_sf_do_9_1_abort().
*/
sctp_disposition_t sctp_sf_shutdown_sent_abort(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
if (!sctp_vtag_verify_either(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the ABORT chunk has a valid length.
* Since this is an ABORT chunk, we have to discard it
* because of the following text:
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
* Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* ADD-IP: Special case for ABORT chunks
* F4) One special consideration is that ABORT Chunks arriving
* destined to the IP address being deleted MUST be
* ignored (see Section 5.3.1 for further details).
*/
if (SCTP_ADDR_DEL ==
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
/* Stop the T2-shutdown timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
/* Stop the T5-shutdown guard timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
}
/*
* Process an ABORT. (SHUTDOWN-ACK-SENT state)
*
* See sctp_sf_do_9_1_abort().
*/
sctp_disposition_t sctp_sf_shutdown_ack_sent_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* The same T2 timer, so we should be able to use
* common function with the SHUTDOWN-SENT state.
*/
return sctp_sf_shutdown_sent_abort(net, ep, asoc, type, arg, commands);
}
/*
* Handle an Error received in COOKIE_ECHOED state.
*
* Only handle the error type of stale COOKIE Error, the other errors will
* be ignored.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_cookie_echoed_err(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_errhdr_t *err;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the ERROR chunk has a valid length.
* The parameter walking depends on this as well.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Process the error here */
/* FUTURE FIXME: When PR-SCTP related and other optional
* parms are emitted, this will have to change to handle multiple
* errors.
*/
sctp_walk_errors(err, chunk->chunk_hdr) {
if (SCTP_ERROR_STALE_COOKIE == err->cause)
return sctp_sf_do_5_2_6_stale(net, ep, asoc, type,
arg, commands);
}
/* It is possible to have malformed error causes, and that
* will cause us to end the walk early. However, since
* we are discarding the packet, there should be no adverse
* affects.
*/
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/*
* Handle a Stale COOKIE Error
*
* Section: 5.2.6 Handle Stale COOKIE Error
* If the association is in the COOKIE-ECHOED state, the endpoint may elect
* one of the following three alternatives.
* ...
* 3) Send a new INIT chunk to the endpoint, adding a Cookie
* Preservative parameter requesting an extension to the lifetime of
* the State Cookie. When calculating the time extension, an
* implementation SHOULD use the RTT information measured based on the
* previous COOKIE ECHO / ERROR exchange, and should add no more
* than 1 second beyond the measured RTT, due to long State Cookie
* lifetimes making the endpoint more subject to a replay attack.
*
* Verification Tag: Not explicit, but safe to ignore.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
static sctp_disposition_t sctp_sf_do_5_2_6_stale(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
time_t stale;
sctp_cookie_preserve_param_t bht;
sctp_errhdr_t *err;
struct sctp_chunk *reply;
struct sctp_bind_addr *bp;
int attempts = asoc->init_err_counter + 1;
if (attempts > asoc->max_init_attempts) {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(SCTP_ERROR_STALE_COOKIE));
return SCTP_DISPOSITION_DELETE_TCB;
}
err = (sctp_errhdr_t *)(chunk->skb->data);
/* When calculating the time extension, an implementation
* SHOULD use the RTT information measured based on the
* previous COOKIE ECHO / ERROR exchange, and should add no
* more than 1 second beyond the measured RTT, due to long
* State Cookie lifetimes making the endpoint more subject to
* a replay attack.
* Measure of Staleness's unit is usec. (1/1000000 sec)
* Suggested Cookie Life-span Increment's unit is msec.
* (1/1000 sec)
* In general, if you use the suggested cookie life, the value
* found in the field of measure of staleness should be doubled
* to give ample time to retransmit the new cookie and thus
* yield a higher probability of success on the reattempt.
*/
stale = ntohl(*(__be32 *)((u8 *)err + sizeof(sctp_errhdr_t)));
stale = (stale * 2) / 1000;
bht.param_hdr.type = SCTP_PARAM_COOKIE_PRESERVATIVE;
bht.param_hdr.length = htons(sizeof(bht));
bht.lifespan_increment = htonl(stale);
/* Build that new INIT chunk. */
bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
reply = sctp_make_init(asoc, bp, GFP_ATOMIC, sizeof(bht));
if (!reply)
goto nomem;
sctp_addto_chunk(reply, sizeof(bht), &bht);
/* Clear peer's init_tag cached in assoc as we are sending a new INIT */
sctp_add_cmd_sf(commands, SCTP_CMD_CLEAR_INIT_TAG, SCTP_NULL());
/* Stop pending T3-rtx and heartbeat timers */
sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
/* Delete non-primary peer ip addresses since we are transitioning
* back to the COOKIE-WAIT state
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL());
/* If we've sent any data bundled with COOKIE-ECHO we will need to
* resend
*/
sctp_add_cmd_sf(commands, SCTP_CMD_T1_RETRAN,
SCTP_TRANSPORT(asoc->peer.primary_path));
/* Cast away the const modifier, as we want to just
* rerun it through as a sideffect.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_COUNTER_INC, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_COOKIE_WAIT));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Process an ABORT.
*
* Section: 9.1
* After checking the Verification Tag, the receiving endpoint shall
* remove the association from its record, and shall report the
* termination to its upper layer.
*
* Verification Tag: 8.5.1 Exceptions in Verification Tag Rules
* B) Rules for packet carrying ABORT:
*
* - The endpoint shall always fill in the Verification Tag field of the
* outbound packet with the destination endpoint's tag value if it
* is known.
*
* - If the ABORT is sent in response to an OOTB packet, the endpoint
* MUST follow the procedure described in Section 8.4.
*
* - The receiver MUST accept the packet if the Verification Tag
* matches either its own tag, OR the tag of its peer. Otherwise, the
* receiver MUST silently discard the packet and take no further
* action.
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_9_1_abort(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
if (!sctp_vtag_verify_either(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the ABORT chunk has a valid length.
* Since this is an ABORT chunk, we have to discard it
* because of the following text:
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
* Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* ADD-IP: Special case for ABORT chunks
* F4) One special consideration is that ABORT Chunks arriving
* destined to the IP address being deleted MUST be
* ignored (see Section 5.3.1 for further details).
*/
if (SCTP_ADDR_DEL ==
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
}
static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
unsigned int len;
__be16 error = SCTP_ERROR_NO_ERROR;
/* See if we have an error cause code in the chunk. */
len = ntohs(chunk->chunk_hdr->length);
if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) {
sctp_errhdr_t *err;
sctp_walk_errors(err, chunk->chunk_hdr);
if ((void *)err != (void *)chunk->chunk_end)
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
}
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
/* ASSOC_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_PERR(error));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
/*
* Process an ABORT. (COOKIE-WAIT state)
*
* See sctp_sf_do_9_1_abort() above.
*/
sctp_disposition_t sctp_sf_cookie_wait_abort(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
unsigned int len;
__be16 error = SCTP_ERROR_NO_ERROR;
if (!sctp_vtag_verify_either(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the ABORT chunk has a valid length.
* Since this is an ABORT chunk, we have to discard it
* because of the following text:
* RFC 2960, Section 3.3.7
* If an endpoint receives an ABORT with a format error or for an
* association that doesn't exist, it MUST silently discard it.
* Because the length is "invalid", we can't really discard just
* as we do not know its true length. So, to be safe, discard the
* packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t)))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* See if we have an error cause code in the chunk. */
len = ntohs(chunk->chunk_hdr->length);
if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
error = ((sctp_errhdr_t *)chunk->skb->data)->cause;
return sctp_stop_t1_and_abort(net, commands, error, ECONNREFUSED, asoc,
chunk->transport);
}
/*
* Process an incoming ICMP as an ABORT. (COOKIE-WAIT state)
*/
sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
return sctp_stop_t1_and_abort(net, commands, SCTP_ERROR_NO_ERROR,
ENOPROTOOPT, asoc,
(struct sctp_transport *)arg);
}
/*
* Process an ABORT. (COOKIE-ECHOED state)
*/
sctp_disposition_t sctp_sf_cookie_echoed_abort(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* There is a single T1 timer, so we should be able to use
* common function with the COOKIE-WAIT state.
*/
return sctp_sf_cookie_wait_abort(net, ep, asoc, type, arg, commands);
}
/*
* Stop T1 timer and abort association with "INIT failed".
*
* This is common code called by several sctp_sf_*_abort() functions above.
*/
static sctp_disposition_t sctp_stop_t1_and_abort(struct net *net,
sctp_cmd_seq_t *commands,
__be16 error, int sk_err,
const struct sctp_association *asoc,
struct sctp_transport *transport)
{
pr_debug("%s: ABORT received (INIT)\n", __func__);
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err));
/* CMD_INIT_FAILED will DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(error));
return SCTP_DISPOSITION_ABORT;
}
/*
* sctp_sf_do_9_2_shut
*
* Section: 9.2
* Upon the reception of the SHUTDOWN, the peer endpoint shall
* - enter the SHUTDOWN-RECEIVED state,
*
* - stop accepting new data from its SCTP user
*
* - verify, by checking the Cumulative TSN Ack field of the chunk,
* that all its outstanding DATA chunks have been received by the
* SHUTDOWN sender.
*
* Once an endpoint as reached the SHUTDOWN-RECEIVED state it MUST NOT
* send a SHUTDOWN in response to a ULP request. And should discard
* subsequent SHUTDOWN chunks.
*
* If there are still outstanding DATA chunks left, the SHUTDOWN
* receiver shall continue to follow normal data transmission
* procedures defined in Section 6 until all outstanding DATA chunks
* are acknowledged; however, the SHUTDOWN receiver MUST NOT accept
* new data from its SCTP user.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_9_2_shutdown(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_shutdownhdr_t *sdh;
sctp_disposition_t disposition;
struct sctp_ulpevent *ev;
__u32 ctsn;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the SHUTDOWN chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk,
sizeof(struct sctp_shutdown_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Convert the elaborate header. */
sdh = (sctp_shutdownhdr_t *)chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_shutdownhdr_t));
chunk->subh.shutdown_hdr = sdh;
ctsn = ntohl(sdh->cum_tsn_ack);
if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn,
asoc->ctsn_ack_point);
return SCTP_DISPOSITION_DISCARD;
}
/* If Cumulative TSN Ack beyond the max tsn currently
* send, terminating the association and respond to the
* sender with an ABORT.
*/
if (!TSN_lt(ctsn, asoc->next_tsn))
return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
/* API 5.3.1.5 SCTP_SHUTDOWN_EVENT
* When a peer sends a SHUTDOWN, SCTP delivers this notification to
* inform the application that it should cease sending data.
*/
ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC);
if (!ev) {
disposition = SCTP_DISPOSITION_NOMEM;
goto out;
}
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
/* Upon the reception of the SHUTDOWN, the peer endpoint shall
* - enter the SHUTDOWN-RECEIVED state,
* - stop accepting new data from its SCTP user
*
* [This is implicit in the new state.]
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_SHUTDOWN_RECEIVED));
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_shutdown_ack(net, ep, asoc, type,
arg, commands);
}
if (SCTP_DISPOSITION_NOMEM == disposition)
goto out;
/* - verify, by checking the Cumulative TSN Ack field of the
* chunk, that all its outstanding DATA chunks have been
* received by the SHUTDOWN sender.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN,
SCTP_BE32(chunk->subh.shutdown_hdr->cum_tsn_ack));
out:
return disposition;
}
/*
* sctp_sf_do_9_2_shut_ctsn
*
* Once an endpoint has reached the SHUTDOWN-RECEIVED state,
* it MUST NOT send a SHUTDOWN in response to a ULP request.
* The Cumulative TSN Ack of the received SHUTDOWN chunk
* MUST be processed.
*/
sctp_disposition_t sctp_sf_do_9_2_shut_ctsn(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_shutdownhdr_t *sdh;
__u32 ctsn;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the SHUTDOWN chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk,
sizeof(struct sctp_shutdown_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
sdh = (sctp_shutdownhdr_t *)chunk->skb->data;
ctsn = ntohl(sdh->cum_tsn_ack);
if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn,
asoc->ctsn_ack_point);
return SCTP_DISPOSITION_DISCARD;
}
/* If Cumulative TSN Ack beyond the max tsn currently
* send, terminating the association and respond to the
* sender with an ABORT.
*/
if (!TSN_lt(ctsn, asoc->next_tsn))
return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
/* verify, by checking the Cumulative TSN Ack field of the
* chunk, that all its outstanding DATA chunks have been
* received by the SHUTDOWN sender.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN,
SCTP_BE32(sdh->cum_tsn_ack));
return SCTP_DISPOSITION_CONSUME;
}
/* RFC 2960 9.2
* If an endpoint is in SHUTDOWN-ACK-SENT state and receives an INIT chunk
* (e.g., if the SHUTDOWN COMPLETE was lost) with source and destination
* transport addresses (either in the IP addresses or in the INIT chunk)
* that belong to this association, it should discard the INIT chunk and
* retransmit the SHUTDOWN ACK chunk.
*/
sctp_disposition_t sctp_sf_do_9_2_reshutack(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = (struct sctp_chunk *) arg;
struct sctp_chunk *reply;
/* Make sure that the chunk has a valid length */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Since we are not going to really process this INIT, there
* is no point in verifying chunk boundries. Just generate
* the SHUTDOWN ACK.
*/
reply = sctp_make_shutdown_ack(asoc, chunk);
if (NULL == reply)
goto nomem;
/* Set the transport for the SHUTDOWN ACK chunk and the timeout for
* the T2-SHUTDOWN timer.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
/* and restart the T2-shutdown timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* sctp_sf_do_ecn_cwr
*
* Section: Appendix A: Explicit Congestion Notification
*
* CWR:
*
* RFC 2481 details a specific bit for a sender to send in the header of
* its next outbound TCP segment to indicate to its peer that it has
* reduced its congestion window. This is termed the CWR bit. For
* SCTP the same indication is made by including the CWR chunk.
* This chunk contains one data element, i.e. the TSN number that
* was sent in the ECNE chunk. This element represents the lowest
* TSN number in the datagram that was originally marked with the
* CE bit.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_ecn_cwr(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_cwrhdr_t *cwr;
struct sctp_chunk *chunk = arg;
u32 lowest_tsn;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
cwr = (sctp_cwrhdr_t *) chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_cwrhdr_t));
lowest_tsn = ntohl(cwr->lowest_tsn);
/* Does this CWR ack the last sent congestion notification? */
if (TSN_lte(asoc->last_ecne_tsn, lowest_tsn)) {
/* Stop sending ECNE. */
sctp_add_cmd_sf(commands,
SCTP_CMD_ECN_CWR,
SCTP_U32(lowest_tsn));
}
return SCTP_DISPOSITION_CONSUME;
}
/*
* sctp_sf_do_ecne
*
* Section: Appendix A: Explicit Congestion Notification
*
* ECN-Echo
*
* RFC 2481 details a specific bit for a receiver to send back in its
* TCP acknowledgements to notify the sender of the Congestion
* Experienced (CE) bit having arrived from the network. For SCTP this
* same indication is made by including the ECNE chunk. This chunk
* contains one data element, i.e. the lowest TSN associated with the IP
* datagram marked with the CE bit.....
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_ecne(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_ecnehdr_t *ecne;
struct sctp_chunk *chunk = arg;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
ecne = (sctp_ecnehdr_t *) chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_ecnehdr_t));
/* If this is a newer ECNE than the last CWR packet we sent out */
sctp_add_cmd_sf(commands, SCTP_CMD_ECN_ECNE,
SCTP_U32(ntohl(ecne->lowest_tsn)));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Section: 6.2 Acknowledgement on Reception of DATA Chunks
*
* The SCTP endpoint MUST always acknowledge the reception of each valid
* DATA chunk.
*
* The guidelines on delayed acknowledgement algorithm specified in
* Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an
* acknowledgement SHOULD be generated for at least every second packet
* (not every second DATA chunk) received, and SHOULD be generated within
* 200 ms of the arrival of any unacknowledged DATA chunk. In some
* situations it may be beneficial for an SCTP transmitter to be more
* conservative than the algorithms detailed in this document allow.
* However, an SCTP transmitter MUST NOT be more aggressive than the
* following algorithms allow.
*
* A SCTP receiver MUST NOT generate more than one SACK for every
* incoming packet, other than to update the offered window as the
* receiving application consumes new data.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_arg_t force = SCTP_NOFORCE();
int error;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
error = sctp_eat_data(asoc, chunk, commands);
switch (error) {
case SCTP_IERROR_NO_ERROR:
break;
case SCTP_IERROR_HIGH_TSN:
case SCTP_IERROR_BAD_STREAM:
SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
goto discard_noforce;
case SCTP_IERROR_DUP_TSN:
case SCTP_IERROR_IGNORE_TSN:
SCTP_INC_STATS(net, SCTP_MIB_IN_DATA_CHUNK_DISCARDS);
goto discard_force;
case SCTP_IERROR_NO_DATA:
goto consume;
case SCTP_IERROR_PROTO_VIOLATION:
return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
(u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
default:
BUG();
}
if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM)
force = SCTP_FORCE();
if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
/* If this is the last chunk in a packet, we need to count it
* toward sack generation. Note that we need to SACK every
* OTHER packet containing data chunks, EVEN IF WE DISCARD
* THEM. We elect to NOT generate SACK's if the chunk fails
* the verification tag test.
*
* RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks
*
* The SCTP endpoint MUST always acknowledge the reception of
* each valid DATA chunk.
*
* The guidelines on delayed acknowledgement algorithm
* specified in Section 4.2 of [RFC2581] SHOULD be followed.
* Specifically, an acknowledgement SHOULD be generated for at
* least every second packet (not every second DATA chunk)
* received, and SHOULD be generated within 200 ms of the
* arrival of any unacknowledged DATA chunk. In some
* situations it may be beneficial for an SCTP transmitter to
* be more conservative than the algorithms detailed in this
* document allow. However, an SCTP transmitter MUST NOT be
* more aggressive than the following algorithms allow.
*/
if (chunk->end_of_packet)
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
return SCTP_DISPOSITION_CONSUME;
discard_force:
/* RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks
*
* When a packet arrives with duplicate DATA chunk(s) and with
* no new DATA chunk(s), the endpoint MUST immediately send a
* SACK with no delay. If a packet arrives with duplicate
* DATA chunk(s) bundled with new DATA chunks, the endpoint
* MAY immediately send a SACK. Normally receipt of duplicate
* DATA chunks will occur when the original SACK chunk was lost
* and the peer's RTO has expired. The duplicate TSN number(s)
* SHOULD be reported in the SACK as duplicate.
*/
/* In our case, we split the MAY SACK advice up whether or not
* the last chunk is a duplicate.'
*/
if (chunk->end_of_packet)
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
return SCTP_DISPOSITION_DISCARD;
discard_noforce:
if (chunk->end_of_packet)
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, force);
return SCTP_DISPOSITION_DISCARD;
consume:
return SCTP_DISPOSITION_CONSUME;
}
/*
* sctp_sf_eat_data_fast_4_4
*
* Section: 4 (4)
* (4) In SHUTDOWN-SENT state the endpoint MUST acknowledge any received
* DATA chunks without delay.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_eat_data_fast_4_4(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
int error;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
error = sctp_eat_data(asoc, chunk, commands);
switch (error) {
case SCTP_IERROR_NO_ERROR:
case SCTP_IERROR_HIGH_TSN:
case SCTP_IERROR_DUP_TSN:
case SCTP_IERROR_IGNORE_TSN:
case SCTP_IERROR_BAD_STREAM:
break;
case SCTP_IERROR_NO_DATA:
goto consume;
case SCTP_IERROR_PROTO_VIOLATION:
return sctp_sf_abort_violation(net, ep, asoc, chunk, commands,
(u8 *)chunk->subh.data_hdr, sizeof(sctp_datahdr_t));
default:
BUG();
}
/* Go a head and force a SACK, since we are shutting down. */
/* Implementor's Guide.
*
* While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
* respond to each received packet containing one or more DATA chunk(s)
* with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer
*/
if (chunk->end_of_packet) {
/* We must delay the chunk creation since the cumulative
* TSN has not been updated yet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
}
consume:
return SCTP_DISPOSITION_CONSUME;
}
/*
* Section: 6.2 Processing a Received SACK
* D) Any time a SACK arrives, the endpoint performs the following:
*
* i) If Cumulative TSN Ack is less than the Cumulative TSN Ack Point,
* then drop the SACK. Since Cumulative TSN Ack is monotonically
* increasing, a SACK whose Cumulative TSN Ack is less than the
* Cumulative TSN Ack Point indicates an out-of-order SACK.
*
* ii) Set rwnd equal to the newly received a_rwnd minus the number
* of bytes still outstanding after processing the Cumulative TSN Ack
* and the Gap Ack Blocks.
*
* iii) If the SACK is missing a TSN that was previously
* acknowledged via a Gap Ack Block (e.g., the data receiver
* reneged on the data), then mark the corresponding DATA chunk
* as available for retransmit: Mark it as missing for fast
* retransmit as described in Section 7.2.4 and if no retransmit
* timer is running for the destination address to which the DATA
* chunk was originally transmitted, then T3-rtx is started for
* that destination address.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_eat_sack_6_2(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_sackhdr_t *sackh;
__u32 ctsn;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the SACK chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_sack_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Pull the SACK chunk from the data buffer */
sackh = sctp_sm_pull_sack(chunk);
/* Was this a bogus SACK? */
if (!sackh)
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
chunk->subh.sack_hdr = sackh;
ctsn = ntohl(sackh->cum_tsn_ack);
/* i) If Cumulative TSN Ack is less than the Cumulative TSN
* Ack Point, then drop the SACK. Since Cumulative TSN
* Ack is monotonically increasing, a SACK whose
* Cumulative TSN Ack is less than the Cumulative TSN Ack
* Point indicates an out-of-order SACK.
*/
if (TSN_lt(ctsn, asoc->ctsn_ack_point)) {
pr_debug("%s: ctsn:%x, ctsn_ack_point:%x\n", __func__, ctsn,
asoc->ctsn_ack_point);
return SCTP_DISPOSITION_DISCARD;
}
/* If Cumulative TSN Ack beyond the max tsn currently
* send, terminating the association and respond to the
* sender with an ABORT.
*/
if (!TSN_lt(ctsn, asoc->next_tsn))
return sctp_sf_violation_ctsn(net, ep, asoc, type, arg, commands);
/* Return this SACK for further processing. */
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_CHUNK(chunk));
/* Note: We do the rest of the work on the PROCESS_SACK
* sideeffect.
*/
return SCTP_DISPOSITION_CONSUME;
}
/*
* Generate an ABORT in response to a packet.
*
* Section: 8.4 Handle "Out of the blue" Packets, sctpimpguide 2.41
*
* 8) The receiver should respond to the sender of the OOTB packet with
* an ABORT. When sending the ABORT, the receiver of the OOTB packet
* MUST fill in the Verification Tag field of the outbound packet
* with the value found in the Verification Tag field of the OOTB
* packet and set the T-bit in the Chunk Flags to indicate that the
* Verification Tag is reflected. After sending this ABORT, the
* receiver of the OOTB packet shall discard the OOTB packet and take
* no further action.
*
* Verification Tag:
*
* The return value is the disposition of the chunk.
*/
static sctp_disposition_t sctp_sf_tabort_8_4_8(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_packet *packet = NULL;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *abort;
packet = sctp_ootb_pkt_new(net, asoc, chunk);
if (packet) {
/* Make an ABORT. The T bit will be set if the asoc
* is NULL.
*/
abort = sctp_make_abort(asoc, chunk, 0);
if (!abort) {
sctp_ootb_pkt_free(packet);
return SCTP_DISPOSITION_NOMEM;
}
/* Reflect vtag if T-Bit is set */
if (sctp_test_T_bit(abort))
packet->vtag = ntohl(chunk->sctp_hdr->vtag);
/* Set the skb to the belonging sock for accounting. */
abort->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, abort);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
return SCTP_DISPOSITION_CONSUME;
}
return SCTP_DISPOSITION_NOMEM;
}
/*
* Received an ERROR chunk from peer. Generate SCTP_REMOTE_ERROR
* event as ULP notification for each cause included in the chunk.
*
* API 5.3.1.3 - SCTP_REMOTE_ERROR
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_operr_notify(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_errhdr_t *err;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the ERROR chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
sctp_walk_errors(err, chunk->chunk_hdr);
if ((void *)err != (void *)chunk->chunk_end)
return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
(void *)err, commands);
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR,
SCTP_CHUNK(chunk));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Process an inbound SHUTDOWN ACK.
*
* From Section 9.2:
* Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall
* stop the T2-shutdown timer, send a SHUTDOWN COMPLETE chunk to its
* peer, and remove all record of the association.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_9_2_final(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_chunk *reply;
struct sctp_ulpevent *ev;
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* 10.2 H) SHUTDOWN COMPLETE notification
*
* When SCTP completes the shutdown procedures (section 9.2) this
* notification is passed to the upper layer.
*/
ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP,
0, 0, 0, NULL, GFP_ATOMIC);
if (!ev)
goto nomem;
/* ...send a SHUTDOWN COMPLETE chunk to its peer, */
reply = sctp_make_shutdown_complete(asoc, chunk);
if (!reply)
goto nomem_chunk;
/* Do all the commands now (after allocation), so that we
* have consistent state if memory allocation failes
*/
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev));
/* Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall
* stop the T2-shutdown timer,
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
/* ...and remove all record of the association. */
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
nomem_chunk:
sctp_ulpevent_free(ev);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* RFC 2960, 8.4 - Handle "Out of the blue" Packets, sctpimpguide 2.41.
*
* 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should
* respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE.
* When sending the SHUTDOWN COMPLETE, the receiver of the OOTB
* packet must fill in the Verification Tag field of the outbound
* packet with the Verification Tag received in the SHUTDOWN ACK and
* set the T-bit in the Chunk Flags to indicate that the Verification
* Tag is reflected.
*
* 8) The receiver should respond to the sender of the OOTB packet with
* an ABORT. When sending the ABORT, the receiver of the OOTB packet
* MUST fill in the Verification Tag field of the outbound packet
* with the value found in the Verification Tag field of the OOTB
* packet and set the T-bit in the Chunk Flags to indicate that the
* Verification Tag is reflected. After sending this ABORT, the
* receiver of the OOTB packet shall discard the OOTB packet and take
* no further action.
*/
sctp_disposition_t sctp_sf_ootb(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sk_buff *skb = chunk->skb;
sctp_chunkhdr_t *ch;
sctp_errhdr_t *err;
__u8 *ch_end;
int ootb_shut_ack = 0;
int ootb_cookie_ack = 0;
SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
ch = (sctp_chunkhdr_t *) chunk->chunk_hdr;
do {
/* Report violation if the chunk is less then minimal */
if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Now that we know we at least have a chunk header,
* do things that are type appropriate.
*/
if (SCTP_CID_SHUTDOWN_ACK == ch->type)
ootb_shut_ack = 1;
/* RFC 2960, Section 3.3.7
* Moreover, under any circumstances, an endpoint that
* receives an ABORT MUST NOT respond to that ABORT by
* sending an ABORT of its own.
*/
if (SCTP_CID_ABORT == ch->type)
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR
* or a COOKIE ACK the SCTP Packet should be silently
* discarded.
*/
if (SCTP_CID_COOKIE_ACK == ch->type)
ootb_cookie_ack = 1;
if (SCTP_CID_ERROR == ch->type) {
sctp_walk_errors(err, ch) {
if (SCTP_ERROR_STALE_COOKIE == err->cause) {
ootb_cookie_ack = 1;
break;
}
}
}
/* Report violation if chunk len overflows */
ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
if (ch_end > skb_tail_pointer(skb))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
ch = (sctp_chunkhdr_t *) ch_end;
} while (ch_end < skb_tail_pointer(skb));
if (ootb_shut_ack)
return sctp_sf_shut_8_4_5(net, ep, asoc, type, arg, commands);
else if (ootb_cookie_ack)
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
else
return sctp_sf_tabort_8_4_8(net, ep, asoc, type, arg, commands);
}
/*
* Handle an "Out of the blue" SHUTDOWN ACK.
*
* Section: 8.4 5, sctpimpguide 2.41.
*
* 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should
* respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE.
* When sending the SHUTDOWN COMPLETE, the receiver of the OOTB
* packet must fill in the Verification Tag field of the outbound
* packet with the Verification Tag received in the SHUTDOWN ACK and
* set the T-bit in the Chunk Flags to indicate that the Verification
* Tag is reflected.
*
* Inputs
* (endpoint, asoc, type, arg, commands)
*
* Outputs
* (sctp_disposition_t)
*
* The return value is the disposition of the chunk.
*/
static sctp_disposition_t sctp_sf_shut_8_4_5(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_packet *packet = NULL;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *shut;
packet = sctp_ootb_pkt_new(net, asoc, chunk);
if (packet) {
/* Make an SHUTDOWN_COMPLETE.
* The T bit will be set if the asoc is NULL.
*/
shut = sctp_make_shutdown_complete(asoc, chunk);
if (!shut) {
sctp_ootb_pkt_free(packet);
return SCTP_DISPOSITION_NOMEM;
}
/* Reflect vtag if T-Bit is set */
if (sctp_test_T_bit(shut))
packet->vtag = ntohl(chunk->sctp_hdr->vtag);
/* Set the skb to the belonging sock for accounting. */
shut->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, shut);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
/* If the chunk length is invalid, we don't want to process
* the reset of the packet.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* We need to discard the rest of the packet to prevent
* potential bomming attacks from additional bundled chunks.
* This is documented in SCTP Threats ID.
*/
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
return SCTP_DISPOSITION_NOMEM;
}
/*
* Handle SHUTDOWN ACK in COOKIE_ECHOED or COOKIE_WAIT state.
*
* Verification Tag: 8.5.1 E) Rules for packet carrying a SHUTDOWN ACK
* If the receiver is in COOKIE-ECHOED or COOKIE-WAIT state the
* procedures in section 8.4 SHOULD be followed, in other words it
* should be treated as an Out Of The Blue packet.
* [This means that we do NOT check the Verification Tag on these
* chunks. --piggy ]
*
*/
sctp_disposition_t sctp_sf_do_8_5_1_E_sa(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
/* Make sure that the SHUTDOWN_ACK chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
/* Although we do have an association in this case, it corresponds
* to a restarted association. So the packet is treated as an OOTB
* packet and the state function that handles OOTB SHUTDOWN_ACK is
* called with a NULL association.
*/
SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
return sctp_sf_shut_8_4_5(net, ep, NULL, type, arg, commands);
}
/* ADDIP Section 4.2 Upon reception of an ASCONF Chunk. */
sctp_disposition_t sctp_sf_do_asconf(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_chunk *asconf_ack = NULL;
struct sctp_paramhdr *err_param = NULL;
sctp_addiphdr_t *hdr;
__u32 serial;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/* ADD-IP: Section 4.1.1
* This chunk MUST be sent in an authenticated way by using
* the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
* is received unauthenticated it MUST be silently discarded as
* described in [I-D.ietf-tsvwg-sctp-auth].
*/
if (!net->sctp.addip_noauth && !chunk->auth)
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
/* Make sure that the ASCONF ADDIP chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
hdr = (sctp_addiphdr_t *)chunk->skb->data;
serial = ntohl(hdr->serial);
/* Verify the ASCONF chunk before processing it. */
if (!sctp_verify_asconf(asoc, chunk, true, &err_param))
return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
(void *)err_param, commands);
/* ADDIP 5.2 E1) Compare the value of the serial number to the value
* the endpoint stored in a new association variable
* 'Peer-Serial-Number'.
*/
if (serial == asoc->peer.addip_serial + 1) {
/* If this is the first instance of ASCONF in the packet,
* we can clean our old ASCONF-ACKs.
*/
if (!chunk->has_asconf)
sctp_assoc_clean_asconf_ack_cache(asoc);
/* ADDIP 5.2 E4) When the Sequence Number matches the next one
* expected, process the ASCONF as described below and after
* processing the ASCONF Chunk, append an ASCONF-ACK Chunk to
* the response packet and cache a copy of it (in the event it
* later needs to be retransmitted).
*
* Essentially, do V1-V5.
*/
asconf_ack = sctp_process_asconf((struct sctp_association *)
asoc, chunk);
if (!asconf_ack)
return SCTP_DISPOSITION_NOMEM;
} else if (serial < asoc->peer.addip_serial + 1) {
/* ADDIP 5.2 E2)
* If the value found in the Sequence Number is less than the
* ('Peer- Sequence-Number' + 1), simply skip to the next
* ASCONF, and include in the outbound response packet
* any previously cached ASCONF-ACK response that was
* sent and saved that matches the Sequence Number of the
* ASCONF. Note: It is possible that no cached ASCONF-ACK
* Chunk exists. This will occur when an older ASCONF
* arrives out of order. In such a case, the receiver
* should skip the ASCONF Chunk and not include ASCONF-ACK
* Chunk for that chunk.
*/
asconf_ack = sctp_assoc_lookup_asconf_ack(asoc, hdr->serial);
if (!asconf_ack)
return SCTP_DISPOSITION_DISCARD;
/* Reset the transport so that we select the correct one
* this time around. This is to make sure that we don't
* accidentally use a stale transport that's been removed.
*/
asconf_ack->transport = NULL;
} else {
/* ADDIP 5.2 E5) Otherwise, the ASCONF Chunk is discarded since
* it must be either a stale packet or from an attacker.
*/
return SCTP_DISPOSITION_DISCARD;
}
/* ADDIP 5.2 E6) The destination address of the SCTP packet
* containing the ASCONF-ACK Chunks MUST be the source address of
* the SCTP packet that held the ASCONF Chunks.
*
* To do this properly, we'll set the destination address of the chunk
* and at the transmit time, will try look up the transport to use.
* Since ASCONFs may be bundled, the correct transport may not be
* created until we process the entire packet, thus this workaround.
*/
asconf_ack->dest = chunk->source;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack));
if (asoc->new_transport) {
sctp_sf_heartbeat(ep, asoc, type, asoc->new_transport, commands);
((struct sctp_association *)asoc)->new_transport = NULL;
}
return SCTP_DISPOSITION_CONSUME;
}
/*
* ADDIP Section 4.3 General rules for address manipulation
* When building TLV parameters for the ASCONF Chunk that will add or
* delete IP addresses the D0 to D13 rules should be applied:
*/
sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type, void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *asconf_ack = arg;
struct sctp_chunk *last_asconf = asoc->addip_last_asconf;
struct sctp_chunk *abort;
struct sctp_paramhdr *err_param = NULL;
sctp_addiphdr_t *addip_hdr;
__u32 sent_serial, rcvd_serial;
if (!sctp_vtag_verify(asconf_ack, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/* ADD-IP, Section 4.1.2:
* This chunk MUST be sent in an authenticated way by using
* the mechanism defined in [I-D.ietf-tsvwg-sctp-auth]. If this chunk
* is received unauthenticated it MUST be silently discarded as
* described in [I-D.ietf-tsvwg-sctp-auth].
*/
if (!net->sctp.addip_noauth && !asconf_ack->auth)
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
/* Make sure that the ADDIP chunk has a valid length. */
if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data;
rcvd_serial = ntohl(addip_hdr->serial);
/* Verify the ASCONF-ACK chunk before processing it. */
if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param))
return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
(void *)err_param, commands);
if (last_asconf) {
addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr;
sent_serial = ntohl(addip_hdr->serial);
} else {
sent_serial = asoc->addip_serial - 1;
}
/* D0) If an endpoint receives an ASCONF-ACK that is greater than or
* equal to the next serial number to be used but no ASCONF chunk is
* outstanding the endpoint MUST ABORT the association. Note that a
* sequence number is greater than if it is no more than 2^^31-1
* larger than the current sequence number (using serial arithmetic).
*/
if (ADDIP_SERIAL_gte(rcvd_serial, sent_serial + 1) &&
!(asoc->addip_last_asconf)) {
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
if ((rcvd_serial == sent_serial) && asoc->addip_last_asconf) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
if (!sctp_process_asconf_ack((struct sctp_association *)asoc,
asconf_ack)) {
/* Successfully processed ASCONF_ACK. We can
* release the next asconf if we have one.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_NEXT_ASCONF,
SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
}
abort = sctp_make_abort(asoc, asconf_ack,
sizeof(sctp_errhdr_t));
if (abort) {
sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, 0);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(abort));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_ASCONF_ACK));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
return SCTP_DISPOSITION_DISCARD;
}
/*
* PR-SCTP Section 3.6 Receiver Side Implementation of PR-SCTP
*
* When a FORWARD TSN chunk arrives, the data receiver MUST first update
* its cumulative TSN point to the value carried in the FORWARD TSN
* chunk, and then MUST further advance its cumulative TSN point locally
* if possible.
* After the above processing, the data receiver MUST stop reporting any
* missing TSNs earlier than or equal to the new cumulative TSN point.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_fwdtsn_skip *skip;
__u16 len;
__u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/* Make sure that the FORWARD_TSN chunk has valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
pr_debug("%s: TSN 0x%x\n", __func__, tsn);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto discard_noforce;
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
goto discard_noforce;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Count this as receiving DATA. */
if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
/* FIXME: For now send a SACK, but DATA processing may
* send another.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE());
return SCTP_DISPOSITION_CONSUME;
discard_noforce:
return SCTP_DISPOSITION_DISCARD;
}
sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_fwdtsn_hdr *fwdtsn_hdr;
struct sctp_fwdtsn_skip *skip;
__u16 len;
__u32 tsn;
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/* Make sure that the FORWARD_TSN chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data;
chunk->subh.fwdtsn_hdr = fwdtsn_hdr;
len = ntohs(chunk->chunk_hdr->length);
len -= sizeof(struct sctp_chunkhdr);
skb_pull(chunk->skb, len);
tsn = ntohl(fwdtsn_hdr->new_cum_tsn);
pr_debug("%s: TSN 0x%x\n", __func__, tsn);
/* The TSN is too high--silently discard the chunk and count on it
* getting retransmitted later.
*/
if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
goto gen_shutdown;
/* Silently discard the chunk if stream-id is not valid */
sctp_walk_fwdtsn(skip, chunk) {
if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
goto gen_shutdown;
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
if (len > sizeof(struct sctp_fwdtsn_hdr))
sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
SCTP_CHUNK(chunk));
/* Go a head and force a SACK, since we are shutting down. */
gen_shutdown:
/* Implementor's Guide.
*
* While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately
* respond to each received packet containing one or more DATA chunk(s)
* with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer
*/
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
return SCTP_DISPOSITION_CONSUME;
}
/*
* SCTP-AUTH Section 6.3 Receiving authenticated chukns
*
* The receiver MUST use the HMAC algorithm indicated in the HMAC
* Identifier field. If this algorithm was not specified by the
* receiver in the HMAC-ALGO parameter in the INIT or INIT-ACK chunk
* during association setup, the AUTH chunk and all chunks after it MUST
* be discarded and an ERROR chunk SHOULD be sent with the error cause
* defined in Section 4.1.
*
* If an endpoint with no shared key receives a Shared Key Identifier
* other than 0, it MUST silently discard all authenticated chunks. If
* the endpoint has at least one endpoint pair shared key for the peer,
* it MUST use the key specified by the Shared Key Identifier if a
* key has been configured for that Shared Key Identifier. If no
* endpoint pair shared key has been configured for that Shared Key
* Identifier, all authenticated chunks MUST be silently discarded.
*
* Verification Tag: 8.5 Verification Tag [Normal verification]
*
* The return value is the disposition of the chunk.
*/
static sctp_ierror_t sctp_sf_authenticate(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
struct sctp_chunk *chunk)
{
struct sctp_authhdr *auth_hdr;
struct sctp_hmac *hmac;
unsigned int sig_len;
__u16 key_id;
__u8 *save_digest;
__u8 *digest;
/* Pull in the auth header, so we can do some more verification */
auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
chunk->subh.auth_hdr = auth_hdr;
skb_pull(chunk->skb, sizeof(struct sctp_authhdr));
/* Make sure that we support the HMAC algorithm from the auth
* chunk.
*/
if (!sctp_auth_asoc_verify_hmac_id(asoc, auth_hdr->hmac_id))
return SCTP_IERROR_AUTH_BAD_HMAC;
/* Make sure that the provided shared key identifier has been
* configured
*/
key_id = ntohs(auth_hdr->shkey_id);
if (key_id != asoc->active_key_id && !sctp_auth_get_shkey(asoc, key_id))
return SCTP_IERROR_AUTH_BAD_KEYID;
/* Make sure that the length of the signature matches what
* we expect.
*/
sig_len = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_auth_chunk_t);
hmac = sctp_auth_get_hmac(ntohs(auth_hdr->hmac_id));
if (sig_len != hmac->hmac_len)
return SCTP_IERROR_PROTO_VIOLATION;
/* Now that we've done validation checks, we can compute and
* verify the hmac. The steps involved are:
* 1. Save the digest from the chunk.
* 2. Zero out the digest in the chunk.
* 3. Compute the new digest
* 4. Compare saved and new digests.
*/
digest = auth_hdr->hmac;
skb_pull(chunk->skb, sig_len);
save_digest = kmemdup(digest, sig_len, GFP_ATOMIC);
if (!save_digest)
goto nomem;
memset(digest, 0, sig_len);
sctp_auth_calculate_hmac(asoc, chunk->skb,
(struct sctp_auth_chunk *)chunk->chunk_hdr,
GFP_ATOMIC);
/* Discard the packet if the digests do not match */
if (memcmp(save_digest, digest, sig_len)) {
kfree(save_digest);
return SCTP_IERROR_BAD_SIG;
}
kfree(save_digest);
chunk->auth = 1;
return SCTP_IERROR_NO_ERROR;
nomem:
return SCTP_IERROR_NOMEM;
}
sctp_disposition_t sctp_sf_eat_auth(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_authhdr *auth_hdr;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *err_chunk;
sctp_ierror_t error;
/* Make sure that the peer has AUTH capable */
if (!asoc->peer.auth_capable)
return sctp_sf_unk_chunk(net, ep, asoc, type, arg, commands);
if (!sctp_vtag_verify(chunk, asoc)) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
SCTP_NULL());
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
/* Make sure that the AUTH chunk has valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_auth_chunk)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
switch (error) {
case SCTP_IERROR_AUTH_BAD_HMAC:
/* Generate the ERROR chunk and discard the rest
* of the packet
*/
err_chunk = sctp_make_op_error(asoc, chunk,
SCTP_ERROR_UNSUP_HMAC,
&auth_hdr->hmac_id,
sizeof(__u16), 0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err_chunk));
}
/* Fall Through */
case SCTP_IERROR_AUTH_BAD_KEYID:
case SCTP_IERROR_BAD_SIG:
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
case SCTP_IERROR_PROTO_VIOLATION:
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
case SCTP_IERROR_NOMEM:
return SCTP_DISPOSITION_NOMEM;
default: /* Prevent gcc warnings */
break;
}
if (asoc->active_key_id != ntohs(auth_hdr->shkey_id)) {
struct sctp_ulpevent *ev;
ev = sctp_ulpevent_make_authkey(asoc, ntohs(auth_hdr->shkey_id),
SCTP_AUTH_NEWKEY, GFP_ATOMIC);
if (!ev)
return -ENOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP,
SCTP_ULPEVENT(ev));
}
return SCTP_DISPOSITION_CONSUME;
}
/*
* Process an unknown chunk.
*
* Section: 3.2. Also, 2.1 in the implementor's guide.
*
* Chunk Types are encoded such that the highest-order two bits specify
* the action that must be taken if the processing endpoint does not
* recognize the Chunk Type.
*
* 00 - Stop processing this SCTP packet and discard it, do not process
* any further chunks within it.
*
* 01 - Stop processing this SCTP packet and discard it, do not process
* any further chunks within it, and report the unrecognized
* chunk in an 'Unrecognized Chunk Type'.
*
* 10 - Skip this chunk and continue processing.
*
* 11 - Skip this chunk and continue processing, but report in an ERROR
* Chunk using the 'Unrecognized Chunk Type' cause of error.
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *unk_chunk = arg;
struct sctp_chunk *err_chunk;
sctp_chunkhdr_t *hdr;
pr_debug("%s: processing unknown chunk id:%d\n", __func__, type.chunk);
if (!sctp_vtag_verify(unk_chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the chunk has a valid length.
* Since we don't know the chunk type, we use a general
* chunkhdr structure to make a comparison.
*/
if (!sctp_chunk_length_valid(unk_chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
switch (type.chunk & SCTP_CID_ACTION_MASK) {
case SCTP_CID_ACTION_DISCARD:
/* Discard the packet. */
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
case SCTP_CID_ACTION_DISCARD_ERR:
/* Generate an ERROR chunk as response. */
hdr = unk_chunk->chunk_hdr;
err_chunk = sctp_make_op_error(asoc, unk_chunk,
SCTP_ERROR_UNKNOWN_CHUNK, hdr,
WORD_ROUND(ntohs(hdr->length)),
0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err_chunk));
}
/* Discard the packet. */
sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
return SCTP_DISPOSITION_CONSUME;
case SCTP_CID_ACTION_SKIP:
/* Skip the chunk. */
return SCTP_DISPOSITION_DISCARD;
case SCTP_CID_ACTION_SKIP_ERR:
/* Generate an ERROR chunk as response. */
hdr = unk_chunk->chunk_hdr;
err_chunk = sctp_make_op_error(asoc, unk_chunk,
SCTP_ERROR_UNKNOWN_CHUNK, hdr,
WORD_ROUND(ntohs(hdr->length)),
0);
if (err_chunk) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err_chunk));
}
/* Skip the chunk. */
return SCTP_DISPOSITION_CONSUME;
default:
break;
}
return SCTP_DISPOSITION_DISCARD;
}
/*
* Discard the chunk.
*
* Section: 0.2, 5.2.3, 5.2.5, 5.2.6, 6.0, 8.4.6, 8.5.1c, 9.2
* [Too numerous to mention...]
* Verification Tag: No verification needed.
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_discard_chunk(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
/* Make sure that the chunk has a valid length.
* Since we don't know the chunk type, we use a general
* chunkhdr structure to make a comparison.
*/
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
pr_debug("%s: chunk:%d is discarded\n", __func__, type.chunk);
return SCTP_DISPOSITION_DISCARD;
}
/*
* Discard the whole packet.
*
* Section: 8.4 2)
*
* 2) If the OOTB packet contains an ABORT chunk, the receiver MUST
* silently discard the OOTB packet and take no further action.
*
* Verification Tag: No verification necessary
*
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_pdiscard(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
return SCTP_DISPOSITION_CONSUME;
}
/*
* The other end is violating protocol.
*
* Section: Not specified
* Verification Tag: Not specified
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (asoc, reply_msg, msg_up, timers, counters)
*
* We simply tag the chunk as a violation. The state machine will log
* the violation and continue.
*/
sctp_disposition_t sctp_sf_violation(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
/* Make sure that the chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
return SCTP_DISPOSITION_VIOLATION;
}
/*
* Common function to handle a protocol violation.
*/
static sctp_disposition_t sctp_sf_abort_violation(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
void *arg,
sctp_cmd_seq_t *commands,
const __u8 *payload,
const size_t paylen)
{
struct sctp_packet *packet = NULL;
struct sctp_chunk *chunk = arg;
struct sctp_chunk *abort = NULL;
/* SCTP-AUTH, Section 6.3:
* It should be noted that if the receiver wants to tear
* down an association in an authenticated way only, the
* handling of malformed packets should not result in
* tearing down the association.
*
* This means that if we only want to abort associations
* in an authenticated way (i.e AUTH+ABORT), then we
* can't destroy this association just because the packet
* was malformed.
*/
if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
goto discard;
/* Make the abort chunk. */
abort = sctp_make_abort_violation(asoc, chunk, payload, paylen);
if (!abort)
goto nomem;
if (asoc) {
/* Treat INIT-ACK as a special case during COOKIE-WAIT. */
if (chunk->chunk_hdr->type == SCTP_CID_INIT_ACK &&
!asoc->peer.i.init_tag) {
sctp_initack_chunk_t *initack;
initack = (sctp_initack_chunk_t *)chunk->chunk_hdr;
if (!sctp_chunk_length_valid(chunk,
sizeof(sctp_initack_chunk_t)))
abort->chunk_hdr->flags |= SCTP_CHUNK_FLAG_T;
else {
unsigned int inittag;
inittag = ntohl(initack->init_hdr.init_tag);
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_INITTAG,
SCTP_U32(inittag));
}
}
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNREFUSED));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
} else {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
}
} else {
packet = sctp_ootb_pkt_new(net, asoc, chunk);
if (!packet)
goto nomem_pkt;
if (sctp_test_T_bit(abort))
packet->vtag = ntohl(chunk->sctp_hdr->vtag);
abort->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, abort);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
}
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
discard:
sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
return SCTP_DISPOSITION_ABORT;
nomem_pkt:
sctp_chunk_free(abort);
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Handle a protocol violation when the chunk length is invalid.
* "Invalid" length is identified as smaller than the minimal length a
* given chunk can be. For example, a SACK chunk has invalid length
* if its length is set to be smaller than the size of sctp_sack_chunk_t.
*
* We inform the other end by sending an ABORT with a Protocol Violation
* error code.
*
* Section: Not specified
* Verification Tag: Nothing to do
* Inputs
* (endpoint, asoc, chunk)
*
* Outputs
* (reply_msg, msg_up, counters)
*
* Generate an ABORT chunk and terminate the association.
*/
static sctp_disposition_t sctp_sf_violation_chunklen(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
static const char err_str[] = "The following chunk had invalid length:";
return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
/*
* Handle a protocol violation when the parameter length is invalid.
* If the length is smaller than the minimum length of a given parameter,
* or accumulated length in multi parameters exceeds the end of the chunk,
* the length is considered as invalid.
*/
static sctp_disposition_t sctp_sf_violation_paramlen(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, void *ext,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
struct sctp_paramhdr *param = ext;
struct sctp_chunk *abort = NULL;
if (sctp_auth_recv_cid(SCTP_CID_ABORT, asoc))
goto discard;
/* Make the abort chunk. */
abort = sctp_make_violation_paramlen(asoc, chunk, param);
if (!abort)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_PROTO_VIOLATION));
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
discard:
sctp_sf_pdiscard(net, ep, asoc, SCTP_ST_CHUNK(0), arg, commands);
return SCTP_DISPOSITION_ABORT;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/* Handle a protocol violation when the peer trying to advance the
* cumulative tsn ack to a point beyond the max tsn currently sent.
*
* We inform the other end by sending an ABORT with a Protocol Violation
* error code.
*/
static sctp_disposition_t sctp_sf_violation_ctsn(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
static const char err_str[] = "The cumulative tsn ack beyond the max tsn currently sent:";
return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
/* Handle protocol violation of an invalid chunk bundling. For example,
* when we have an association and we receive bundled INIT-ACK, or
* SHUDOWN-COMPLETE, our peer is clearly violationg the "MUST NOT bundle"
* statement from the specs. Additionally, there might be an attacker
* on the path and we may not want to continue this communication.
*/
static sctp_disposition_t sctp_sf_violation_chunk(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
static const char err_str[] = "The following chunk violates protocol:";
if (!asoc)
return sctp_sf_violation(net, ep, asoc, type, arg, commands);
return sctp_sf_abort_violation(net, ep, asoc, arg, commands, err_str,
sizeof(err_str));
}
/***************************************************************************
* These are the state functions for handling primitive (Section 10) events.
***************************************************************************/
/*
* sctp_sf_do_prm_asoc
*
* Section: 10.1 ULP-to-SCTP
* B) Associate
*
* Format: ASSOCIATE(local SCTP instance name, destination transport addr,
* outbound stream count)
* -> association id [,destination transport addr list] [,outbound stream
* count]
*
* This primitive allows the upper layer to initiate an association to a
* specific peer endpoint.
*
* The peer endpoint shall be specified by one of the transport addresses
* which defines the endpoint (see Section 1.4). If the local SCTP
* instance has not been initialized, the ASSOCIATE is considered an
* error.
* [This is not relevant for the kernel implementation since we do all
* initialization at boot time. It we hadn't initialized we wouldn't
* get anywhere near this code.]
*
* An association id, which is a local handle to the SCTP association,
* will be returned on successful establishment of the association. If
* SCTP is not able to open an SCTP association with the peer endpoint,
* an error is returned.
* [In the kernel implementation, the struct sctp_association needs to
* be created BEFORE causing this primitive to run.]
*
* Other association parameters may be returned, including the
* complete destination transport addresses of the peer as well as the
* outbound stream count of the local endpoint. One of the transport
* address from the returned destination addresses will be selected by
* the local endpoint as default primary path for sending SCTP packets
* to this peer. The returned "destination transport addr list" can
* be used by the ULP to change the default primary path or to force
* sending a packet to a specific transport address. [All of this
* stuff happens when the INIT ACK arrives. This is a NON-BLOCKING
* function.]
*
* Mandatory attributes:
*
* o local SCTP instance name - obtained from the INITIALIZE operation.
* [This is the argument asoc.]
* o destination transport addr - specified as one of the transport
* addresses of the peer endpoint with which the association is to be
* established.
* [This is asoc->peer.active_path.]
* o outbound stream count - the number of outbound streams the ULP
* would like to open towards this peer endpoint.
* [BUG: This is not currently implemented.]
* Optional attributes:
*
* None.
*
* The return value is a disposition.
*/
sctp_disposition_t sctp_sf_do_prm_asoc(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *repl;
struct sctp_association *my_asoc;
/* The comment below says that we enter COOKIE-WAIT AFTER
* sending the INIT, but that doesn't actually work in our
* implementation...
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_COOKIE_WAIT));
/* RFC 2960 5.1 Normal Establishment of an Association
*
* A) "A" first sends an INIT chunk to "Z". In the INIT, "A"
* must provide its Verification Tag (Tag_A) in the Initiate
* Tag field. Tag_A SHOULD be a random number in the range of
* 1 to 4294967295 (see 5.3.1 for Tag value selection). ...
*/
repl = sctp_make_init(asoc, &asoc->base.bind_addr, GFP_ATOMIC, 0);
if (!repl)
goto nomem;
/* Choose transport for INIT. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
SCTP_CHUNK(repl));
/* Cast away the const modifier, as we want to just
* rerun it through as a sideffect.
*/
my_asoc = (struct sctp_association *)asoc;
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(my_asoc));
/* After sending the INIT, "A" starts the T1-init timer and
* enters the COOKIE-WAIT state.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Process the SEND primitive.
*
* Section: 10.1 ULP-to-SCTP
* E) Send
*
* Format: SEND(association id, buffer address, byte count [,context]
* [,stream id] [,life time] [,destination transport address]
* [,unorder flag] [,no-bundle flag] [,payload protocol-id] )
* -> result
*
* This is the main method to send user data via SCTP.
*
* Mandatory attributes:
*
* o association id - local handle to the SCTP association
*
* o buffer address - the location where the user message to be
* transmitted is stored;
*
* o byte count - The size of the user data in number of bytes;
*
* Optional attributes:
*
* o context - an optional 32 bit integer that will be carried in the
* sending failure notification to the ULP if the transportation of
* this User Message fails.
*
* o stream id - to indicate which stream to send the data on. If not
* specified, stream 0 will be used.
*
* o life time - specifies the life time of the user data. The user data
* will not be sent by SCTP after the life time expires. This
* parameter can be used to avoid efforts to transmit stale
* user messages. SCTP notifies the ULP if the data cannot be
* initiated to transport (i.e. sent to the destination via SCTP's
* send primitive) within the life time variable. However, the
* user data will be transmitted if SCTP has attempted to transmit a
* chunk before the life time expired.
*
* o destination transport address - specified as one of the destination
* transport addresses of the peer endpoint to which this packet
* should be sent. Whenever possible, SCTP should use this destination
* transport address for sending the packets, instead of the current
* primary path.
*
* o unorder flag - this flag, if present, indicates that the user
* would like the data delivered in an unordered fashion to the peer
* (i.e., the U flag is set to 1 on all DATA chunks carrying this
* message).
*
* o no-bundle flag - instructs SCTP not to bundle this user data with
* other outbound DATA chunks. SCTP MAY still bundle even when
* this flag is present, when faced with network congestion.
*
* o payload protocol-id - A 32 bit unsigned integer that is to be
* passed to the peer indicating the type of payload protocol data
* being transmitted. This value is passed as opaque data by SCTP.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_prm_send(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_datamsg *msg = arg;
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_MSG, SCTP_DATAMSG(msg));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Process the SHUTDOWN primitive.
*
* Section: 10.1:
* C) Shutdown
*
* Format: SHUTDOWN(association id)
* -> result
*
* Gracefully closes an association. Any locally queued user data
* will be delivered to the peer. The association will be terminated only
* after the peer acknowledges all the SCTP packets sent. A success code
* will be returned on successful termination of the association. If
* attempting to terminate the association results in a failure, an error
* code shall be returned.
*
* Mandatory attributes:
*
* o association id - local handle to the SCTP association
*
* Optional attributes:
*
* None.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_9_2_prm_shutdown(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
int disposition;
/* From 9.2 Shutdown of an Association
* Upon receipt of the SHUTDOWN primitive from its upper
* layer, the endpoint enters SHUTDOWN-PENDING state and
* remains there until all outstanding data has been
* acknowledged by its peer. The endpoint accepts no new data
* from its upper layer, but retransmits data to the far end
* if necessary to fill gaps.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING));
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
arg, commands);
}
return disposition;
}
/*
* Process the ABORT primitive.
*
* Section: 10.1:
* C) Abort
*
* Format: Abort(association id [, cause code])
* -> result
*
* Ungracefully closes an association. Any locally queued user data
* will be discarded and an ABORT chunk is sent to the peer. A success code
* will be returned on successful abortion of the association. If
* attempting to abort the association results in a failure, an error
* code shall be returned.
*
* Mandatory attributes:
*
* o association id - local handle to the SCTP association
*
* Optional attributes:
*
* o cause code - reason of the abort to be passed to the peer
*
* None.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_9_1_prm_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* From 9.1 Abort of an Association
* Upon receipt of the ABORT primitive from its upper
* layer, the endpoint enters CLOSED state and
* discard all outstanding data has been
* acknowledged by its peer. The endpoint accepts no new data
* from its upper layer, but retransmits data to the far end
* if necessary to fill gaps.
*/
struct sctp_chunk *abort = arg;
sctp_disposition_t retval;
retval = SCTP_DISPOSITION_CONSUME;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
/* Even if we can't send the ABORT due to low memory delete the
* TCB. This is a departure from our typical NOMEM handling.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
/* Delete the established association. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_USER_ABORT));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return retval;
}
/* We tried an illegal operation on an association which is closed. */
sctp_disposition_t sctp_sf_error_closed(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, SCTP_ERROR(-EINVAL));
return SCTP_DISPOSITION_CONSUME;
}
/* We tried an illegal operation on an association which is shutting
* down.
*/
sctp_disposition_t sctp_sf_error_shutdown(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR,
SCTP_ERROR(-ESHUTDOWN));
return SCTP_DISPOSITION_CONSUME;
}
/*
* sctp_cookie_wait_prm_shutdown
*
* Section: 4 Note: 2
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* The RFC does not explicitly address this issue, but is the route through the
* state table when someone issues a shutdown while in COOKIE_WAIT state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(net, SCTP_MIB_SHUTDOWNS);
sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());
return SCTP_DISPOSITION_DELETE_TCB;
}
/*
* sctp_cookie_echoed_prm_shutdown
*
* Section: 4 Note: 2
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* The RFC does not explcitly address this issue, but is the route through the
* state table when someone issues a shutdown while in COOKIE_ECHOED state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg, sctp_cmd_seq_t *commands)
{
/* There is a single T1 timer, so we should be able to use
* common function with the COOKIE-WAIT state.
*/
return sctp_sf_cookie_wait_prm_shutdown(net, ep, asoc, type, arg, commands);
}
/*
* sctp_sf_cookie_wait_prm_abort
*
* Section: 4 Note: 2
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* The RFC does not explicitly address this issue, but is the route through the
* state table when someone issues an abort while in COOKIE_WAIT state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_cookie_wait_prm_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *abort = arg;
sctp_disposition_t retval;
/* Stop T1-init timer */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
retval = SCTP_DISPOSITION_CONSUME;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_CLOSED));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
/* Even if we can't send the ABORT due to low memory delete the
* TCB. This is a departure from our typical NOMEM handling.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNREFUSED));
/* Delete the established association. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(SCTP_ERROR_USER_ABORT));
return retval;
}
/*
* sctp_sf_cookie_echoed_prm_abort
*
* Section: 4 Note: 3
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* The RFC does not explcitly address this issue, but is the route through the
* state table when someone issues an abort while in COOKIE_ECHOED state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_cookie_echoed_prm_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* There is a single T1 timer, so we should be able to use
* common function with the COOKIE-WAIT state.
*/
return sctp_sf_cookie_wait_prm_abort(net, ep, asoc, type, arg, commands);
}
/*
* sctp_sf_shutdown_pending_prm_abort
*
* Inputs
* (endpoint, asoc)
*
* The RFC does not explicitly address this issue, but is the route through the
* state table when someone issues an abort while in SHUTDOWN-PENDING state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_shutdown_pending_prm_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* Stop the T5-shutdown guard timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
}
/*
* sctp_sf_shutdown_sent_prm_abort
*
* Inputs
* (endpoint, asoc)
*
* The RFC does not explicitly address this issue, but is the route through the
* state table when someone issues an abort while in SHUTDOWN-SENT state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_shutdown_sent_prm_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* Stop the T2-shutdown timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
/* Stop the T5-shutdown guard timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
return sctp_sf_do_9_1_prm_abort(net, ep, asoc, type, arg, commands);
}
/*
* sctp_sf_cookie_echoed_prm_abort
*
* Inputs
* (endpoint, asoc)
*
* The RFC does not explcitly address this issue, but is the route through the
* state table when someone issues an abort while in COOKIE_ECHOED state.
*
* Outputs
* (timers)
*/
sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
/* The same T2 timer, so we should be able to use
* common function with the SHUTDOWN-SENT state.
*/
return sctp_sf_shutdown_sent_prm_abort(net, ep, asoc, type, arg, commands);
}
/*
* Process the REQUESTHEARTBEAT primitive
*
* 10.1 ULP-to-SCTP
* J) Request Heartbeat
*
* Format: REQUESTHEARTBEAT(association id, destination transport address)
*
* -> result
*
* Instructs the local endpoint to perform a HeartBeat on the specified
* destination transport address of the given association. The returned
* result should indicate whether the transmission of the HEARTBEAT
* chunk to the destination address is successful.
*
* Mandatory attributes:
*
* o association id - local handle to the SCTP association
*
* o destination transport address - the transport address of the
* association on which a heartbeat should be issued.
*/
sctp_disposition_t sctp_sf_do_prm_requestheartbeat(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
if (SCTP_DISPOSITION_NOMEM == sctp_sf_heartbeat(ep, asoc, type,
(struct sctp_transport *)arg, commands))
return SCTP_DISPOSITION_NOMEM;
/*
* RFC 2960 (bis), section 8.3
*
* D) Request an on-demand HEARTBEAT on a specific destination
* transport address of a given association.
*
* The endpoint should increment the respective error counter of
* the destination transport address each time a HEARTBEAT is sent
* to that address and not acknowledged within one RTO.
*
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_HB_SENT,
SCTP_TRANSPORT(arg));
return SCTP_DISPOSITION_CONSUME;
}
/*
* ADDIP Section 4.1 ASCONF Chunk Procedures
* When an endpoint has an ASCONF signaled change to be sent to the
* remote endpoint it should do A1 to A9
*/
sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = arg;
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk));
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Ignore the primitive event
*
* The return value is the disposition of the primitive.
*/
sctp_disposition_t sctp_sf_ignore_primitive(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
pr_debug("%s: primitive type:%d is ignored\n", __func__,
type.primitive);
return SCTP_DISPOSITION_DISCARD;
}
/***************************************************************************
* These are the state functions for the OTHER events.
***************************************************************************/
/*
* When the SCTP stack has no more user data to send or retransmit, this
* notification is given to the user. Also, at the time when a user app
* subscribes to this event, if there is no data to be sent or
* retransmit, the stack will immediately send up this notification.
*/
sctp_disposition_t sctp_sf_do_no_pending_tsn(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_ulpevent *event;
event = sctp_ulpevent_make_sender_dry_event(asoc, GFP_ATOMIC);
if (!event)
return SCTP_DISPOSITION_NOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(event));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Start the shutdown negotiation.
*
* From Section 9.2:
* Once all its outstanding data has been acknowledged, the endpoint
* shall send a SHUTDOWN chunk to its peer including in the Cumulative
* TSN Ack field the last sequential TSN it has received from the peer.
* It shall then start the T2-shutdown timer and enter the SHUTDOWN-SENT
* state. If the timer expires, the endpoint must re-send the SHUTDOWN
* with the updated last sequential TSN received from its peer.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *reply;
/* Once all its outstanding data has been acknowledged, the
* endpoint shall send a SHUTDOWN chunk to its peer including
* in the Cumulative TSN Ack field the last sequential TSN it
* has received from the peer.
*/
reply = sctp_make_shutdown(asoc, NULL);
if (!reply)
goto nomem;
/* Set the transport for the SHUTDOWN chunk and the timeout for the
* T2-shutdown timer.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
/* It shall then start the T2-shutdown timer */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
/* RFC 4960 Section 9.2
* The sender of the SHUTDOWN MAY also start an overall guard timer
* 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
/* and enter the SHUTDOWN-SENT state. */
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_SHUTDOWN_SENT));
/* sctp-implguide 2.10 Issues with Heartbeating and failover
*
* HEARTBEAT ... is discontinued after sending either SHUTDOWN
* or SHUTDOWN-ACK.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Generate a SHUTDOWN ACK now that everything is SACK'd.
*
* From Section 9.2:
*
* If it has no more outstanding DATA chunks, the SHUTDOWN receiver
* shall send a SHUTDOWN ACK and start a T2-shutdown timer of its own,
* entering the SHUTDOWN-ACK-SENT state. If the timer expires, the
* endpoint must re-send the SHUTDOWN ACK.
*
* The return value is the disposition.
*/
sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = (struct sctp_chunk *) arg;
struct sctp_chunk *reply;
/* There are 2 ways of getting here:
* 1) called in response to a SHUTDOWN chunk
* 2) called when SCTP_EVENT_NO_PENDING_TSN event is issued.
*
* For the case (2), the arg parameter is set to NULL. We need
* to check that we have a chunk before accessing it's fields.
*/
if (chunk) {
if (!sctp_vtag_verify(chunk, asoc))
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
/* Make sure that the SHUTDOWN chunk has a valid length. */
if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk_t)))
return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
commands);
}
/* If it has no more outstanding DATA chunks, the SHUTDOWN receiver
* shall send a SHUTDOWN ACK ...
*/
reply = sctp_make_shutdown_ack(asoc, chunk);
if (!reply)
goto nomem;
/* Set the transport for the SHUTDOWN ACK chunk and the timeout for
* the T2-shutdown timer.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
/* and start/restart a T2-shutdown timer of its own, */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
/* Enter the SHUTDOWN-ACK-SENT state. */
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_SHUTDOWN_ACK_SENT));
/* sctp-implguide 2.10 Issues with Heartbeating and failover
*
* HEARTBEAT ... is discontinued after sending either SHUTDOWN
* or SHUTDOWN-ACK.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* Ignore the event defined as other
*
* The return value is the disposition of the event.
*/
sctp_disposition_t sctp_sf_ignore_other(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
pr_debug("%s: the event other type:%d is ignored\n",
__func__, type.other);
return SCTP_DISPOSITION_DISCARD;
}
/************************************************************
* These are the state functions for handling timeout events.
************************************************************/
/*
* RTX Timeout
*
* Section: 6.3.3 Handle T3-rtx Expiration
*
* Whenever the retransmission timer T3-rtx expires for a destination
* address, do the following:
* [See below]
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_do_6_3_3_rtx(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_transport *transport = arg;
SCTP_INC_STATS(net, SCTP_MIB_T3_RTX_EXPIREDS);
if (asoc->overall_error_count >= asoc->max_retrans) {
if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING) {
/*
* We are here likely because the receiver had its rwnd
* closed for a while and we have not been able to
* transmit the locally queued data within the maximum
* retransmission attempts limit. Start the T5
* shutdown guard timer to give the receiver one last
* chance and some additional time to recover before
* aborting.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START_ONCE,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
} else {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
/* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
}
}
/* E1) For the destination address for which the timer
* expires, adjust its ssthresh with rules defined in Section
* 7.2.3 and set the cwnd <- MTU.
*/
/* E2) For the destination address for which the timer
* expires, set RTO <- RTO * 2 ("back off the timer"). The
* maximum value discussed in rule C7 above (RTO.max) may be
* used to provide an upper bound to this doubling operation.
*/
/* E3) Determine how many of the earliest (i.e., lowest TSN)
* outstanding DATA chunks for the address for which the
* T3-rtx has expired will fit into a single packet, subject
* to the MTU constraint for the path corresponding to the
* destination transport address to which the retransmission
* is being sent (this may be different from the address for
* which the timer expires [see Section 6.4]). Call this
* value K. Bundle and retransmit those K DATA chunks in a
* single packet to the destination endpoint.
*
* Note: Any DATA chunks that were sent to the address for
* which the T3-rtx timer expired but did not fit in one MTU
* (rule E3 above), should be marked for retransmission and
* sent as soon as cwnd allows (normally when a SACK arrives).
*/
/* Do some failure management (Section 8.2). */
sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport));
/* NB: Rules E4 and F1 are implicit in R1. */
sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(transport));
return SCTP_DISPOSITION_CONSUME;
}
/*
* Generate delayed SACK on timeout
*
* Section: 6.2 Acknowledgement on Reception of DATA Chunks
*
* The guidelines on delayed acknowledgement algorithm specified in
* Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an
* acknowledgement SHOULD be generated for at least every second packet
* (not every second DATA chunk) received, and SHOULD be generated
* within 200 ms of the arrival of any unacknowledged DATA chunk. In
* some situations it may be beneficial for an SCTP transmitter to be
* more conservative than the algorithms detailed in this document
* allow. However, an SCTP transmitter MUST NOT be more aggressive than
* the following algorithms allow.
*/
sctp_disposition_t sctp_sf_do_6_2_sack(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
SCTP_INC_STATS(net, SCTP_MIB_DELAY_SACK_EXPIREDS);
sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE());
return SCTP_DISPOSITION_CONSUME;
}
/*
* sctp_sf_t1_init_timer_expire
*
* Section: 4 Note: 2
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* RFC 2960 Section 4 Notes
* 2) If the T1-init timer expires, the endpoint MUST retransmit INIT
* and re-start the T1-init timer without changing state. This MUST
* be repeated up to 'Max.Init.Retransmits' times. After that, the
* endpoint MUST abort the initialization process and report the
* error to SCTP user.
*
* Outputs
* (timers, events)
*
*/
sctp_disposition_t sctp_sf_t1_init_timer_expire(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *repl = NULL;
struct sctp_bind_addr *bp;
int attempts = asoc->init_err_counter + 1;
pr_debug("%s: timer T1 expired (INIT)\n", __func__);
SCTP_INC_STATS(net, SCTP_MIB_T1_INIT_EXPIREDS);
if (attempts <= asoc->max_init_attempts) {
bp = (struct sctp_bind_addr *) &asoc->base.bind_addr;
repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0);
if (!repl)
return SCTP_DISPOSITION_NOMEM;
/* Choose transport for INIT. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
SCTP_CHUNK(repl));
/* Issue a sideeffect to do the needed accounting. */
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
} else {
pr_debug("%s: giving up on INIT, attempts:%d "
"max_init_attempts:%d\n", __func__, attempts,
asoc->max_init_attempts);
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
return SCTP_DISPOSITION_DELETE_TCB;
}
return SCTP_DISPOSITION_CONSUME;
}
/*
* sctp_sf_t1_cookie_timer_expire
*
* Section: 4 Note: 2
* Verification Tag:
* Inputs
* (endpoint, asoc)
*
* RFC 2960 Section 4 Notes
* 3) If the T1-cookie timer expires, the endpoint MUST retransmit
* COOKIE ECHO and re-start the T1-cookie timer without changing
* state. This MUST be repeated up to 'Max.Init.Retransmits' times.
* After that, the endpoint MUST abort the initialization process and
* report the error to SCTP user.
*
* Outputs
* (timers, events)
*
*/
sctp_disposition_t sctp_sf_t1_cookie_timer_expire(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *repl = NULL;
int attempts = asoc->init_err_counter + 1;
pr_debug("%s: timer T1 expired (COOKIE-ECHO)\n", __func__);
SCTP_INC_STATS(net, SCTP_MIB_T1_COOKIE_EXPIREDS);
if (attempts <= asoc->max_init_attempts) {
repl = sctp_make_cookie_echo(asoc, NULL);
if (!repl)
return SCTP_DISPOSITION_NOMEM;
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_CHOOSE_TRANSPORT,
SCTP_CHUNK(repl));
/* Issue a sideeffect to do the needed accounting. */
sctp_add_cmd_sf(commands, SCTP_CMD_COOKIEECHO_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl));
} else {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
return SCTP_DISPOSITION_DELETE_TCB;
}
return SCTP_DISPOSITION_CONSUME;
}
/* RFC2960 9.2 If the timer expires, the endpoint must re-send the SHUTDOWN
* with the updated last sequential TSN received from its peer.
*
* An endpoint should limit the number of retransmissions of the
* SHUTDOWN chunk to the protocol parameter 'Association.Max.Retrans'.
* If this threshold is exceeded the endpoint should destroy the TCB and
* MUST report the peer endpoint unreachable to the upper layer (and
* thus the association enters the CLOSED state). The reception of any
* packet from its peer (i.e. as the peer sends all of its queued DATA
* chunks) should clear the endpoint's retransmission count and restart
* the T2-Shutdown timer, giving its peer ample opportunity to transmit
* all of its queued DATA chunks that have not yet been sent.
*/
sctp_disposition_t sctp_sf_t2_timer_expire(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *reply = NULL;
pr_debug("%s: timer T2 expired\n", __func__);
SCTP_INC_STATS(net, SCTP_MIB_T2_SHUTDOWN_EXPIREDS);
((struct sctp_association *)asoc)->shutdown_retries++;
if (asoc->overall_error_count >= asoc->max_retrans) {
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
/* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
}
switch (asoc->state) {
case SCTP_STATE_SHUTDOWN_SENT:
reply = sctp_make_shutdown(asoc, NULL);
break;
case SCTP_STATE_SHUTDOWN_ACK_SENT:
reply = sctp_make_shutdown_ack(asoc, NULL);
break;
default:
BUG();
break;
}
if (!reply)
goto nomem;
/* Do some failure management (Section 8.2).
* If we remove the transport an SHUTDOWN was last sent to, don't
* do failure management.
*/
if (asoc->shutdown_last_sent_to)
sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE,
SCTP_TRANSPORT(asoc->shutdown_last_sent_to));
/* Set the transport for the SHUTDOWN/ACK chunk and the timeout for
* the T2-shutdown timer.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply));
/* Restart the T2-shutdown timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
return SCTP_DISPOSITION_CONSUME;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/*
* ADDIP Section 4.1 ASCONF CHunk Procedures
* If the T4 RTO timer expires the endpoint should do B1 to B5
*/
sctp_disposition_t sctp_sf_t4_timer_expire(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *chunk = asoc->addip_last_asconf;
struct sctp_transport *transport = chunk->transport;
SCTP_INC_STATS(net, SCTP_MIB_T4_RTO_EXPIREDS);
/* ADDIP 4.1 B1) Increment the error counters and perform path failure
* detection on the appropriate destination address as defined in
* RFC2960 [5] section 8.1 and 8.2.
*/
if (transport)
sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE,
SCTP_TRANSPORT(transport));
/* Reconfig T4 timer and transport. */
sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk));
/* ADDIP 4.1 B2) Increment the association error counters and perform
* endpoint failure detection on the association as defined in
* RFC2960 [5] section 8.1 and 8.2.
* association error counter is incremented in SCTP_CMD_STRIKE.
*/
if (asoc->overall_error_count >= asoc->max_retrans) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_ABORT;
}
/* ADDIP 4.1 B3) Back-off the destination address RTO value to which
* the ASCONF chunk was sent by doubling the RTO timer value.
* This is done in SCTP_CMD_STRIKE.
*/
/* ADDIP 4.1 B4) Re-transmit the ASCONF Chunk last sent and if possible
* choose an alternate destination address (please refer to RFC2960
* [5] section 6.4.1). An endpoint MUST NOT add new parameters to this
* chunk, it MUST be the same (including its serial number) as the last
* ASCONF sent.
*/
sctp_chunk_hold(asoc->addip_last_asconf);
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(asoc->addip_last_asconf));
/* ADDIP 4.1 B5) Restart the T-4 RTO timer. Note that if a different
* destination is selected, then the RTO used will be that of the new
* destination address.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO));
return SCTP_DISPOSITION_CONSUME;
}
/* sctpimpguide-05 Section 2.12.2
* The sender of the SHUTDOWN MAY also start an overall guard timer
* 'T5-shutdown-guard' to bound the overall time for shutdown sequence.
* At the expiration of this timer the sender SHOULD abort the association
* by sending an ABORT chunk.
*/
sctp_disposition_t sctp_sf_t5_timer_expire(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
struct sctp_chunk *reply = NULL;
pr_debug("%s: timer T5 expired\n", __func__);
SCTP_INC_STATS(net, SCTP_MIB_T5_SHUTDOWN_GUARD_EXPIREDS);
reply = sctp_make_abort(asoc, NULL, 0);
if (!reply)
goto nomem;
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply));
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ETIMEDOUT));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_ERROR));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_DISPOSITION_DELETE_TCB;
nomem:
return SCTP_DISPOSITION_NOMEM;
}
/* Handle expiration of AUTOCLOSE timer. When the autoclose timer expires,
* the association is automatically closed by starting the shutdown process.
* The work that needs to be done is same as when SHUTDOWN is initiated by
* the user. So this routine looks same as sctp_sf_do_9_2_prm_shutdown().
*/
sctp_disposition_t sctp_sf_autoclose_timer_expire(
struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
int disposition;
SCTP_INC_STATS(net, SCTP_MIB_AUTOCLOSE_EXPIREDS);
/* From 9.2 Shutdown of an Association
* Upon receipt of the SHUTDOWN primitive from its upper
* layer, the endpoint enters SHUTDOWN-PENDING state and
* remains there until all outstanding data has been
* acknowledged by its peer. The endpoint accepts no new data
* from its upper layer, but retransmits data to the far end
* if necessary to fill gaps.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING));
disposition = SCTP_DISPOSITION_CONSUME;
if (sctp_outq_is_empty(&asoc->outqueue)) {
disposition = sctp_sf_do_9_2_start_shutdown(net, ep, asoc, type,
arg, commands);
}
return disposition;
}
/*****************************************************************************
* These are sa state functions which could apply to all types of events.
****************************************************************************/
/*
* This table entry is not implemented.
*
* Inputs
* (endpoint, asoc, chunk)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_not_impl(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
return SCTP_DISPOSITION_NOT_IMPL;
}
/*
* This table entry represents a bug.
*
* Inputs
* (endpoint, asoc, chunk)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_bug(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
return SCTP_DISPOSITION_BUG;
}
/*
* This table entry represents the firing of a timer in the wrong state.
* Since timer deletion cannot be guaranteed a timer 'may' end up firing
* when the association is in the wrong state. This event should
* be ignored, so as to prevent any rearming of the timer.
*
* Inputs
* (endpoint, asoc, chunk)
*
* The return value is the disposition of the chunk.
*/
sctp_disposition_t sctp_sf_timer_ignore(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const sctp_subtype_t type,
void *arg,
sctp_cmd_seq_t *commands)
{
pr_debug("%s: timer %d ignored\n", __func__, type.chunk);
return SCTP_DISPOSITION_CONSUME;
}
/********************************************************************
* 2nd Level Abstractions
********************************************************************/
/* Pull the SACK chunk based on the SACK header. */
static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk)
{
struct sctp_sackhdr *sack;
unsigned int len;
__u16 num_blocks;
__u16 num_dup_tsns;
/* Protect ourselves from reading too far into
* the skb from a bogus sender.
*/
sack = (struct sctp_sackhdr *) chunk->skb->data;
num_blocks = ntohs(sack->num_gap_ack_blocks);
num_dup_tsns = ntohs(sack->num_dup_tsns);
len = sizeof(struct sctp_sackhdr);
len += (num_blocks + num_dup_tsns) * sizeof(__u32);
if (len > chunk->skb->len)
return NULL;
skb_pull(chunk->skb, len);
return sack;
}
/* Create an ABORT packet to be sent as a response, with the specified
* error causes.
*/
static struct sctp_packet *sctp_abort_pkt_new(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
struct sctp_chunk *chunk,
const void *payload,
size_t paylen)
{
struct sctp_packet *packet;
struct sctp_chunk *abort;
packet = sctp_ootb_pkt_new(net, asoc, chunk);
if (packet) {
/* Make an ABORT.
* The T bit will be set if the asoc is NULL.
*/
abort = sctp_make_abort(asoc, chunk, paylen);
if (!abort) {
sctp_ootb_pkt_free(packet);
return NULL;
}
/* Reflect vtag if T-Bit is set */
if (sctp_test_T_bit(abort))
packet->vtag = ntohl(chunk->sctp_hdr->vtag);
/* Add specified error causes, i.e., payload, to the
* end of the chunk.
*/
sctp_addto_chunk(abort, paylen, payload);
/* Set the skb to the belonging sock for accounting. */
abort->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, abort);
}
return packet;
}
/* Allocate a packet for responding in the OOTB conditions. */
static struct sctp_packet *sctp_ootb_pkt_new(struct net *net,
const struct sctp_association *asoc,
const struct sctp_chunk *chunk)
{
struct sctp_packet *packet;
struct sctp_transport *transport;
__u16 sport;
__u16 dport;
__u32 vtag;
/* Get the source and destination port from the inbound packet. */
sport = ntohs(chunk->sctp_hdr->dest);
dport = ntohs(chunk->sctp_hdr->source);
/* The V-tag is going to be the same as the inbound packet if no
* association exists, otherwise, use the peer's vtag.
*/
if (asoc) {
/* Special case the INIT-ACK as there is no peer's vtag
* yet.
*/
switch (chunk->chunk_hdr->type) {
case SCTP_CID_INIT_ACK:
{
sctp_initack_chunk_t *initack;
initack = (sctp_initack_chunk_t *)chunk->chunk_hdr;
vtag = ntohl(initack->init_hdr.init_tag);
break;
}
default:
vtag = asoc->peer.i.init_tag;
break;
}
} else {
/* Special case the INIT and stale COOKIE_ECHO as there is no
* vtag yet.
*/
switch (chunk->chunk_hdr->type) {
case SCTP_CID_INIT:
{
sctp_init_chunk_t *init;
init = (sctp_init_chunk_t *)chunk->chunk_hdr;
vtag = ntohl(init->init_hdr.init_tag);
break;
}
default:
vtag = ntohl(chunk->sctp_hdr->vtag);
break;
}
}
/* Make a transport for the bucket, Eliza... */
transport = sctp_transport_new(net, sctp_source(chunk), GFP_ATOMIC);
if (!transport)
goto nomem;
/* Cache a route for the transport with the chunk's destination as
* the source address.
*/
sctp_transport_route(transport, (union sctp_addr *)&chunk->dest,
sctp_sk(net->sctp.ctl_sock));
packet = sctp_packet_init(&transport->packet, transport, sport, dport);
packet = sctp_packet_config(packet, vtag, 0);
return packet;
nomem:
return NULL;
}
/* Free the packet allocated earlier for responding in the OOTB condition. */
void sctp_ootb_pkt_free(struct sctp_packet *packet)
{
sctp_transport_free(packet->transport);
}
/* Send a stale cookie error when a invalid COOKIE ECHO chunk is found */
static void sctp_send_stale_cookie_err(struct net *net,
const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands,
struct sctp_chunk *err_chunk)
{
struct sctp_packet *packet;
if (err_chunk) {
packet = sctp_ootb_pkt_new(net, asoc, chunk);
if (packet) {
struct sctp_signed_cookie *cookie;
/* Override the OOTB vtag from the cookie. */
cookie = chunk->subh.cookie_hdr;
packet->vtag = cookie->c.peer_vtag;
/* Set the skb to the belonging sock for accounting. */
err_chunk->skb->sk = ep->base.sk;
sctp_packet_append_chunk(packet, err_chunk);
sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT,
SCTP_PACKET(packet));
SCTP_INC_STATS(net, SCTP_MIB_OUTCTRLCHUNKS);
} else
sctp_chunk_free (err_chunk);
}
}
/* Process a data chunk */
static int sctp_eat_data(const struct sctp_association *asoc,
struct sctp_chunk *chunk,
sctp_cmd_seq_t *commands)
{
sctp_datahdr_t *data_hdr;
struct sctp_chunk *err;
size_t datalen;
sctp_verb_t deliver;
int tmp;
__u32 tsn;
struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
struct sock *sk = asoc->base.sk;
struct net *net = sock_net(sk);
u16 ssn;
u16 sid;
u8 ordered = 0;
data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data;
skb_pull(chunk->skb, sizeof(sctp_datahdr_t));
tsn = ntohl(data_hdr->tsn);
pr_debug("%s: TSN 0x%x\n", __func__, tsn);
/* ASSERT: Now skb->data is really the user data. */
/* Process ECN based congestion.
*
* Since the chunk structure is reused for all chunks within
* a packet, we use ecn_ce_done to track if we've already
* done CE processing for this packet.
*
* We need to do ECN processing even if we plan to discard the
* chunk later.
*/
if (!chunk->ecn_ce_done) {
struct sctp_af *af;
chunk->ecn_ce_done = 1;
af = sctp_get_af_specific(
ipver2af(ip_hdr(chunk->skb)->version));
if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) {
/* Do real work as sideffect. */
sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE,
SCTP_U32(tsn));
}
}
tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn);
if (tmp < 0) {
/* The TSN is too high--silently discard the chunk and
* count on it getting retransmitted later.
*/
if (chunk->asoc)
chunk->asoc->stats.outofseqtsns++;
return SCTP_IERROR_HIGH_TSN;
} else if (tmp > 0) {
/* This is a duplicate. Record it. */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn));
return SCTP_IERROR_DUP_TSN;
}
/* This is a new TSN. */
/* Discard if there is no room in the receive window.
* Actually, allow a little bit of overflow (up to a MTU).
*/
datalen = ntohs(chunk->chunk_hdr->length);
datalen -= sizeof(sctp_data_chunk_t);
deliver = SCTP_CMD_CHUNK_ULP;
/* Think about partial delivery. */
if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) {
/* Even if we don't accept this chunk there is
* memory pressure.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL());
}
/* Spill over rwnd a little bit. Note: While allowed, this spill over
* seems a bit troublesome in that frag_point varies based on
* PMTU. In cases, such as loopback, this might be a rather
* large spill over.
*/
if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over ||
(datalen > asoc->rwnd + asoc->frag_point))) {
/* If this is the next TSN, consider reneging to make
* room. Note: Playing nice with a confused sender. A
* malicious sender can still eat up all our buffer
* space and in the future we may want to detect and
* do more drastic reneging.
*/
if (sctp_tsnmap_has_gap(map) &&
(sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
pr_debug("%s: reneging for tsn:%u\n", __func__, tsn);
deliver = SCTP_CMD_RENEGE;
} else {
pr_debug("%s: discard tsn:%u len:%zu, rwnd:%d\n",
__func__, tsn, datalen, asoc->rwnd);
return SCTP_IERROR_IGNORE_TSN;
}
}
/*
* Also try to renege to limit our memory usage in the event that
* we are under memory pressure
* If we can't renege, don't worry about it, the sk_rmem_schedule
* in sctp_ulpevent_make_rcvmsg will drop the frame if we grow our
* memory usage too much
*/
if (*sk->sk_prot_creator->memory_pressure) {
if (sctp_tsnmap_has_gap(map) &&
(sctp_tsnmap_get_ctsn(map) + 1) == tsn) {
pr_debug("%s: under pressure, reneging for tsn:%u\n",
__func__, tsn);
deliver = SCTP_CMD_RENEGE;
}
}
/*
* Section 3.3.10.9 No User Data (9)
*
* Cause of error
* ---------------
* No User Data: This error cause is returned to the originator of a
* DATA chunk if a received DATA chunk has no user data.
*/
if (unlikely(0 == datalen)) {
err = sctp_make_abort_no_data(asoc, chunk, tsn);
if (err) {
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err));
}
/* We are going to ABORT, so we might as well stop
* processing the rest of the chunks in the packet.
*/
sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL());
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
SCTP_ERROR(ECONNABORTED));
sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
SCTP_PERR(SCTP_ERROR_NO_DATA));
SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
return SCTP_IERROR_NO_DATA;
}
chunk->data_accepted = 1;
/* Note: Some chunks may get overcounted (if we drop) or overcounted
* if we renege and the chunk arrives again.
*/
if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
SCTP_INC_STATS(net, SCTP_MIB_INUNORDERCHUNKS);
if (chunk->asoc)
chunk->asoc->stats.iuodchunks++;
} else {
SCTP_INC_STATS(net, SCTP_MIB_INORDERCHUNKS);
if (chunk->asoc)
chunk->asoc->stats.iodchunks++;
ordered = 1;
}
/* RFC 2960 6.5 Stream Identifier and Stream Sequence Number
*
* If an endpoint receive a DATA chunk with an invalid stream
* identifier, it shall acknowledge the reception of the DATA chunk
* following the normal procedure, immediately send an ERROR chunk
* with cause set to "Invalid Stream Identifier" (See Section 3.3.10)
* and discard the DATA chunk.
*/
sid = ntohs(data_hdr->stream);
if (sid >= asoc->c.sinit_max_instreams) {
/* Mark tsn as received even though we drop it */
sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn));
err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM,
&data_hdr->stream,
sizeof(data_hdr->stream),
sizeof(u16));
if (err)
sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
SCTP_CHUNK(err));
return SCTP_IERROR_BAD_STREAM;
}
/* Check to see if the SSN is possible for this TSN.
* The biggest gap we can record is 4K wide. Since SSNs wrap
* at an unsigned short, there is no way that an SSN can
* wrap and for a valid TSN. We can simply check if the current
* SSN is smaller then the next expected one. If it is, it wrapped
* and is invalid.
*/
ssn = ntohs(data_hdr->ssn);
if (ordered && SSN_lt(ssn, sctp_ssn_peek(&asoc->ssnmap->in, sid))) {
return SCTP_IERROR_PROTO_VIOLATION;
}
/* Send the data up to the user. Note: Schedule the
* SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK
* chunk needs the updated rwnd.
*/
sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk));
return SCTP_IERROR_NO_ERROR;
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_2166_2 |
crossvul-cpp_data_bad_3486_9 | /*
* Handle unaligned accesses by emulation.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
* Copyright (C) 1999 Silicon Graphics, Inc.
*
* This file contains exception handler for address error exception with the
* special capability to execute faulting instructions in software. The
* handler does not try to handle the case when the program counter points
* to an address not aligned to a word boundary.
*
* Putting data to unaligned addresses is a bad practice even on Intel where
* only the performance is affected. Much worse is that such code is non-
* portable. Due to several programs that die on MIPS due to alignment
* problems I decided to implement this handler anyway though I originally
* didn't intend to do this at all for user code.
*
* For now I enable fixing of address errors by default to make life easier.
* I however intend to disable this somewhen in the future when the alignment
* problems with user programs have been fixed. For programmers this is the
* right way to go.
*
* Fixing address errors is a per process option. The option is inherited
* across fork(2) and execve(2) calls. If you really want to use the
* option in your user programs - I discourage the use of the software
* emulation strongly - use the following code in your userland stuff:
*
* #include <sys/sysmips.h>
*
* ...
* sysmips(MIPS_FIXADE, x);
* ...
*
* The argument x is 0 for disabling software emulation, enabled otherwise.
*
* Below a little program to play around with this feature.
*
* #include <stdio.h>
* #include <sys/sysmips.h>
*
* struct foo {
* unsigned char bar[8];
* };
*
* main(int argc, char *argv[])
* {
* struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
* unsigned int *p = (unsigned int *) (x.bar + 3);
* int i;
*
* if (argc > 1)
* sysmips(MIPS_FIXADE, atoi(argv[1]));
*
* printf("*p = %08lx\n", *p);
*
* *p = 0xdeadface;
*
* for(i = 0; i <= 7; i++)
* printf("%02x ", x.bar[i]);
* printf("\n");
* }
*
* Coprocessor loads are not supported; I think this case is unimportant
* in the practice.
*
* TODO: Handle ndc (attempted store to doubleword in uncached memory)
* exception for the R6000.
* A store crossing a page boundary might be executed only partially.
* Undo the partial store in this case.
*/
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/sched.h>
#include <linux/debugfs.h>
#include <linux/perf_event.h>
#include <asm/asm.h>
#include <asm/branch.h>
#include <asm/byteorder.h>
#include <asm/cop2.h>
#include <asm/inst.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#define STR(x) __STR(x)
#define __STR(x) #x
enum {
UNALIGNED_ACTION_QUIET,
UNALIGNED_ACTION_SIGNAL,
UNALIGNED_ACTION_SHOW,
};
#ifdef CONFIG_DEBUG_FS
static u32 unaligned_instructions;
static u32 unaligned_action;
#else
#define unaligned_action UNALIGNED_ACTION_QUIET
#endif
extern void show_registers(struct pt_regs *regs);
static void emulate_load_store_insn(struct pt_regs *regs,
void __user *addr, unsigned int __user *pc)
{
union mips_instruction insn;
unsigned long value;
unsigned int res;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, 0, regs, 0);
/*
* This load never faults.
*/
__get_user(insn.word, pc);
switch (insn.i_format.opcode) {
/*
* These are instructions that a compiler doesn't generate. We
* can assume therefore that the code is MIPS-aware and
* really buggy. Emulating these instructions would break the
* semantics anyway.
*/
case ll_op:
case lld_op:
case sc_op:
case scd_op:
/*
* For these instructions the only way to create an address
* error is an attempted access to kernel/supervisor address
* space.
*/
case ldl_op:
case ldr_op:
case lwl_op:
case lwr_op:
case sdl_op:
case sdr_op:
case swl_op:
case swr_op:
case lb_op:
case lbu_op:
case sb_op:
goto sigbus;
/*
* The remaining opcodes are the ones that are really of interest.
*/
case lh_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
__asm__ __volatile__ (".set\tnoat\n"
#ifdef __BIG_ENDIAN
"1:\tlb\t%0, 0(%2)\n"
"2:\tlbu\t$1, 1(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlb\t%0, 1(%2)\n"
"2:\tlbu\t$1, 0(%2)\n\t"
#endif
"sll\t%0, 0x8\n\t"
"or\t%0, $1\n\t"
"li\t%1, 0\n"
"3:\t.set\tat\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lw_op:
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tlwl\t%0, (%2)\n"
"2:\tlwr\t%0, 3(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlwl\t%0, 3(%2)\n"
"2:\tlwr\t%0, (%2)\n\t"
#endif
"li\t%1, 0\n"
"3:\t.section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lhu_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
__asm__ __volatile__ (
".set\tnoat\n"
#ifdef __BIG_ENDIAN
"1:\tlbu\t%0, 0(%2)\n"
"2:\tlbu\t$1, 1(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlbu\t%0, 1(%2)\n"
"2:\tlbu\t$1, 0(%2)\n\t"
#endif
"sll\t%0, 0x8\n\t"
"or\t%0, $1\n\t"
"li\t%1, 0\n"
"3:\t.set\tat\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
case lwu_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tlwl\t%0, (%2)\n"
"2:\tlwr\t%0, 3(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tlwl\t%0, 3(%2)\n"
"2:\tlwr\t%0, (%2)\n\t"
#endif
"dsll\t%0, %0, 32\n\t"
"dsrl\t%0, %0, 32\n\t"
"li\t%1, 0\n"
"3:\t.section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case ld_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (!access_ok(VERIFY_READ, addr, 8))
goto sigbus;
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tldl\t%0, (%2)\n"
"2:\tldr\t%0, 7(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tldl\t%0, 7(%2)\n"
"2:\tldr\t%0, (%2)\n\t"
#endif
"li\t%1, 0\n"
"3:\t.section\t.fixup,\"ax\"\n\t"
"4:\tli\t%1, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=&r" (value), "=r" (res)
: "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
regs->regs[insn.i_format.rt] = value;
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case sh_op:
if (!access_ok(VERIFY_WRITE, addr, 2))
goto sigbus;
value = regs->regs[insn.i_format.rt];
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
".set\tnoat\n"
"1:\tsb\t%1, 1(%2)\n\t"
"srl\t$1, %1, 0x8\n"
"2:\tsb\t$1, 0(%2)\n\t"
".set\tat\n\t"
#endif
#ifdef __LITTLE_ENDIAN
".set\tnoat\n"
"1:\tsb\t%1, 0(%2)\n\t"
"srl\t$1,%1, 0x8\n"
"2:\tsb\t$1, 1(%2)\n\t"
".set\tat\n\t"
#endif
"li\t%0, 0\n"
"3:\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%0, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=r" (res)
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
break;
case sw_op:
if (!access_ok(VERIFY_WRITE, addr, 4))
goto sigbus;
value = regs->regs[insn.i_format.rt];
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tswl\t%1,(%2)\n"
"2:\tswr\t%1, 3(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tswl\t%1, 3(%2)\n"
"2:\tswr\t%1, (%2)\n\t"
#endif
"li\t%0, 0\n"
"3:\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%0, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=r" (res)
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
break;
case sd_op:
#ifdef CONFIG_64BIT
/*
* A 32-bit kernel might be running on a 64-bit processor. But
* if we're on a 32-bit processor and an i-cache incoherency
* or race makes us see a 64-bit instruction here the sdl/sdr
* would blow up, so for now we don't handle unaligned 64-bit
* instructions on 32-bit kernels.
*/
if (!access_ok(VERIFY_WRITE, addr, 8))
goto sigbus;
value = regs->regs[insn.i_format.rt];
__asm__ __volatile__ (
#ifdef __BIG_ENDIAN
"1:\tsdl\t%1,(%2)\n"
"2:\tsdr\t%1, 7(%2)\n\t"
#endif
#ifdef __LITTLE_ENDIAN
"1:\tsdl\t%1, 7(%2)\n"
"2:\tsdr\t%1, (%2)\n\t"
#endif
"li\t%0, 0\n"
"3:\n\t"
".section\t.fixup,\"ax\"\n\t"
"4:\tli\t%0, %3\n\t"
"j\t3b\n\t"
".previous\n\t"
".section\t__ex_table,\"a\"\n\t"
STR(PTR)"\t1b, 4b\n\t"
STR(PTR)"\t2b, 4b\n\t"
".previous"
: "=r" (res)
: "r" (value), "r" (addr), "i" (-EFAULT));
if (res)
goto fault;
compute_return_epc(regs);
break;
#endif /* CONFIG_64BIT */
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
case lwc1_op:
case ldc1_op:
case swc1_op:
case sdc1_op:
/*
* I herewith declare: this does not happen. So send SIGBUS.
*/
goto sigbus;
/*
* COP2 is available to implementor for application specific use.
* It's up to applications to register a notifier chain and do
* whatever they have to do, including possible sending of signals.
*/
case lwc2_op:
cu2_notifier_call_chain(CU2_LWC2_OP, regs);
break;
case ldc2_op:
cu2_notifier_call_chain(CU2_LDC2_OP, regs);
break;
case swc2_op:
cu2_notifier_call_chain(CU2_SWC2_OP, regs);
break;
case sdc2_op:
cu2_notifier_call_chain(CU2_SDC2_OP, regs);
break;
default:
/*
* Pheeee... We encountered an yet unknown instruction or
* cache coherence problem. Die sucker, die ...
*/
goto sigill;
}
#ifdef CONFIG_DEBUG_FS
unaligned_instructions++;
#endif
return;
fault:
/* Did we have an exception handler installed? */
if (fixup_exception(regs))
return;
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGSEGV, current);
return;
sigbus:
die_if_kernel("Unhandled kernel unaligned access", regs);
force_sig(SIGBUS, current);
return;
sigill:
die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs);
force_sig(SIGILL, current);
}
asmlinkage void do_ade(struct pt_regs *regs)
{
unsigned int __user *pc;
mm_segment_t seg;
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
1, 0, regs, regs->cp0_badvaddr);
/*
* Did we catch a fault trying to load an instruction?
* Or are we running in MIPS16 mode?
*/
if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1))
goto sigbus;
pc = (unsigned int __user *) exception_epc(regs);
if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
goto sigbus;
if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
goto sigbus;
else if (unaligned_action == UNALIGNED_ACTION_SHOW)
show_registers(regs);
/*
* Do branch emulation only if we didn't forward the exception.
* This is all so but ugly ...
*/
seg = get_fs();
if (!user_mode(regs))
set_fs(KERNEL_DS);
emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
set_fs(seg);
return;
sigbus:
die_if_kernel("Kernel unaligned instruction access", regs);
force_sig(SIGBUS, current);
/*
* XXX On return from the signal handler we should advance the epc
*/
}
#ifdef CONFIG_DEBUG_FS
extern struct dentry *mips_debugfs_dir;
static int __init debugfs_unaligned(void)
{
struct dentry *d;
if (!mips_debugfs_dir)
return -ENODEV;
d = debugfs_create_u32("unaligned_instructions", S_IRUGO,
mips_debugfs_dir, &unaligned_instructions);
if (!d)
return -ENOMEM;
d = debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
mips_debugfs_dir, &unaligned_action);
if (!d)
return -ENOMEM;
return 0;
}
__initcall(debugfs_unaligned);
#endif
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3486_9 |
crossvul-cpp_data_good_905_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% BBBB M M PPPP %
% B B MM MM P P %
% BBBB M M M PPPP %
% B B M M P %
% BBBB M M P %
% %
% %
% Read/Write Microsoft Windows Bitmap Image Format %
% %
% Software Design %
% Cristy %
% Glenn Randers-Pehrson %
% December 2001 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/colormap-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/static.h"
#include "MagickCore/string_.h"
#include "MagickCore/module.h"
#include "MagickCore/transform.h"
/*
Macro definitions (from Windows wingdi.h).
*/
#undef BI_JPEG
#define BI_JPEG 4
#undef BI_PNG
#define BI_PNG 5
#if !defined(MAGICKCORE_WINDOWS_SUPPORT) || defined(__MINGW32__)
#undef BI_RGB
#define BI_RGB 0
#undef BI_RLE8
#define BI_RLE8 1
#undef BI_RLE4
#define BI_RLE4 2
#undef BI_BITFIELDS
#define BI_BITFIELDS 3
#undef LCS_CALIBRATED_RBG
#define LCS_CALIBRATED_RBG 0
#undef LCS_sRGB
#define LCS_sRGB 1
#undef LCS_WINDOWS_COLOR_SPACE
#define LCS_WINDOWS_COLOR_SPACE 2
#undef PROFILE_LINKED
#define PROFILE_LINKED 3
#undef PROFILE_EMBEDDED
#define PROFILE_EMBEDDED 4
#undef LCS_GM_BUSINESS
#define LCS_GM_BUSINESS 1 /* Saturation */
#undef LCS_GM_GRAPHICS
#define LCS_GM_GRAPHICS 2 /* Relative */
#undef LCS_GM_IMAGES
#define LCS_GM_IMAGES 4 /* Perceptual */
#undef LCS_GM_ABS_COLORIMETRIC
#define LCS_GM_ABS_COLORIMETRIC 8 /* Absolute */
#endif
/*
Enumerated declaractions.
*/
typedef enum
{
UndefinedSubtype,
RGB555,
RGB565,
ARGB4444,
ARGB1555
} BMPSubtype;
/*
Typedef declarations.
*/
typedef struct _BMPInfo
{
unsigned int
file_size,
ba_offset,
offset_bits,
size;
ssize_t
width,
height;
unsigned short
planes,
bits_per_pixel;
unsigned int
compression,
image_size,
x_pixels,
y_pixels,
number_colors,
red_mask,
green_mask,
blue_mask,
alpha_mask,
colors_important;
long
colorspace;
PrimaryInfo
red_primary,
green_primary,
blue_primary,
gamma_scale;
} BMPInfo;
/*
Forward declarations.
*/
static MagickBooleanType
WriteBMPImage(const ImageInfo *,Image *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e c o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DecodeImage unpacks the packed image pixels into runlength-encoded
% pixel packets.
%
% The format of the DecodeImage method is:
%
% MagickBooleanType DecodeImage(Image *image,const size_t compression,
% unsigned char *pixels,const size_t number_pixels)
%
% A description of each parameter follows:
%
% o image: the address of a structure of type Image.
%
% o compression: Zero means uncompressed. A value of 1 means the
% compressed pixels are runlength encoded for a 256-color bitmap.
% A value of 2 means a 16-color bitmap. A value of 3 means bitfields
% encoding.
%
% o pixels: The address of a byte (8 bits) array of pixel data created by
% the decoding process.
%
% o number_pixels: The number of pixels.
%
*/
static MagickBooleanType DecodeImage(Image *image,const size_t compression,
unsigned char *pixels,const size_t number_pixels)
{
int
byte,
count;
register ssize_t
i,
x;
register unsigned char
*p,
*q;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (unsigned char *) NULL);
(void) memset(pixels,0,number_pixels*sizeof(*pixels));
byte=0;
x=0;
p=pixels;
q=pixels+number_pixels;
for (y=0; y < (ssize_t) image->rows; )
{
MagickBooleanType
status;
if ((p < pixels) || (p > q))
break;
count=ReadBlobByte(image);
if (count == EOF)
break;
if (count > 0)
{
/*
Encoded mode.
*/
count=(int) MagickMin((ssize_t) count,(ssize_t) (q-p));
byte=ReadBlobByte(image);
if (byte == EOF)
break;
if (compression == BI_RLE8)
{
for (i=0; i < (ssize_t) count; i++)
*p++=(unsigned char) byte;
}
else
{
for (i=0; i < (ssize_t) count; i++)
*p++=(unsigned char)
((i & 0x01) != 0 ? (byte & 0x0f) : ((byte >> 4) & 0x0f));
}
x+=count;
}
else
{
/*
Escape mode.
*/
count=ReadBlobByte(image);
if (count == EOF)
break;
if (count == 0x01)
return(MagickTrue);
switch (count)
{
case 0x00:
{
/*
End of line.
*/
x=0;
y++;
p=pixels+y*image->columns;
break;
}
case 0x02:
{
/*
Delta mode.
*/
x+=ReadBlobByte(image);
y+=ReadBlobByte(image);
p=pixels+y*image->columns+x;
break;
}
default:
{
/*
Absolute mode.
*/
count=(int) MagickMin((ssize_t) count,(ssize_t) (q-p));
if (compression == BI_RLE8)
for (i=0; i < (ssize_t) count; i++)
{
byte=ReadBlobByte(image);
if (byte == EOF)
break;
*p++=(unsigned char) byte;
}
else
for (i=0; i < (ssize_t) count; i++)
{
if ((i & 0x01) == 0)
{
byte=ReadBlobByte(image);
if (byte == EOF)
break;
}
*p++=(unsigned char)
((i & 0x01) != 0 ? (byte & 0x0f) : ((byte >> 4) & 0x0f));
}
x+=count;
/*
Read pad byte.
*/
if (compression == BI_RLE8)
{
if ((count & 0x01) != 0)
if (ReadBlobByte(image) == EOF)
break;
}
else
if (((count & 0x03) == 1) || ((count & 0x03) == 2))
if (ReadBlobByte(image) == EOF)
break;
break;
}
}
}
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
(void) ReadBlobByte(image); /* end of line */
(void) ReadBlobByte(image);
return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E n c o d e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EncodeImage compresses pixels using a runlength encoded format.
%
% The format of the EncodeImage method is:
%
% static MagickBooleanType EncodeImage(Image *image,
% const size_t bytes_per_line,const unsigned char *pixels,
% unsigned char *compressed_pixels)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o bytes_per_line: the number of bytes in a scanline of compressed pixels
%
% o pixels: The address of a byte (8 bits) array of pixel data created by
% the compression process.
%
% o compressed_pixels: The address of a byte (8 bits) array of compressed
% pixel data.
%
*/
static size_t EncodeImage(Image *image,const size_t bytes_per_line,
const unsigned char *pixels,unsigned char *compressed_pixels)
{
MagickBooleanType
status;
register const unsigned char
*p;
register ssize_t
i,
x;
register unsigned char
*q;
ssize_t
y;
/*
Runlength encode pixels.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pixels != (const unsigned char *) NULL);
assert(compressed_pixels != (unsigned char *) NULL);
p=pixels;
q=compressed_pixels;
i=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
for (x=0; x < (ssize_t) bytes_per_line; x+=i)
{
/*
Determine runlength.
*/
for (i=1; ((x+i) < (ssize_t) bytes_per_line); i++)
if ((i == 255) || (*(p+i) != *p))
break;
*q++=(unsigned char) i;
*q++=(*p);
p+=i;
}
/*
End of line.
*/
*q++=(unsigned char) 0x00;
*q++=(unsigned char) 0x00;
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
/*
End of bitmap.
*/
*q++=(unsigned char) 0x00;
*q++=(unsigned char) 0x01;
return((size_t) (q-compressed_pixels));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s B M P %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsBMP() returns MagickTrue if the image format type, identified by the
% magick string, is BMP.
%
% The format of the IsBMP method is:
%
% MagickBooleanType IsBMP(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsBMP(const unsigned char *magick,const size_t length)
{
if (length < 2)
return(MagickFalse);
if ((LocaleNCompare((char *) magick,"BA",2) == 0) ||
(LocaleNCompare((char *) magick,"BM",2) == 0) ||
(LocaleNCompare((char *) magick,"IC",2) == 0) ||
(LocaleNCompare((char *) magick,"PI",2) == 0) ||
(LocaleNCompare((char *) magick,"CI",2) == 0) ||
(LocaleNCompare((char *) magick,"CP",2) == 0))
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d B M P I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadBMPImage() reads a Microsoft Windows bitmap image file, Version
% 2, 3 (for Windows or NT), or 4, and returns it. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ReadBMPImage method is:
%
% image=ReadBMPImage(image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadBMPImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
BMPInfo
bmp_info;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
offset,
profile_data,
profile_size,
start_position;
MemoryInfo
*pixel_info;
Quantum
index;
register Quantum
*q;
register ssize_t
i,
x;
register unsigned char
*p;
size_t
bit,
bytes_per_line,
length;
ssize_t
count,
y;
unsigned char
magick[12],
*pixels;
unsigned int
blue,
green,
offset_bits,
red;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImage(image_info,exception);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Determine if this a BMP file.
*/
(void) memset(&bmp_info,0,sizeof(bmp_info));
bmp_info.ba_offset=0;
start_position=0;
offset_bits=0;
count=ReadBlob(image,2,magick);
if (count != 2)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
do
{
PixelInfo
quantum_bits;
PixelPacket
shift;
/*
Verify BMP identifier.
*/
start_position=TellBlob(image)-2;
bmp_info.ba_offset=0;
while (LocaleNCompare((char *) magick,"BA",2) == 0)
{
bmp_info.file_size=ReadBlobLSBLong(image);
bmp_info.ba_offset=ReadBlobLSBLong(image);
bmp_info.offset_bits=ReadBlobLSBLong(image);
count=ReadBlob(image,2,magick);
if (count != 2)
break;
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule()," Magick: %c%c",
magick[0],magick[1]);
if ((count != 2) || ((LocaleNCompare((char *) magick,"BM",2) != 0) &&
(LocaleNCompare((char *) magick,"CI",2) != 0)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
bmp_info.file_size=ReadBlobLSBLong(image);
(void) ReadBlobLSBLong(image);
bmp_info.offset_bits=ReadBlobLSBLong(image);
bmp_info.size=ReadBlobLSBLong(image);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule()," BMP size: %u",
bmp_info.size);
profile_data=0;
profile_size=0;
if (bmp_info.size == 12)
{
/*
OS/2 BMP image file.
*/
(void) CopyMagickString(image->magick,"BMP2",MagickPathExtent);
bmp_info.width=(ssize_t) ((short) ReadBlobLSBShort(image));
bmp_info.height=(ssize_t) ((short) ReadBlobLSBShort(image));
bmp_info.planes=ReadBlobLSBShort(image);
bmp_info.bits_per_pixel=ReadBlobLSBShort(image);
bmp_info.x_pixels=0;
bmp_info.y_pixels=0;
bmp_info.number_colors=0;
bmp_info.compression=BI_RGB;
bmp_info.image_size=0;
bmp_info.alpha_mask=0;
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Format: OS/2 Bitmap");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Geometry: %.20gx%.20g",(double) bmp_info.width,(double)
bmp_info.height);
}
}
else
{
/*
Microsoft Windows BMP image file.
*/
if (bmp_info.size < 40)
ThrowReaderException(CorruptImageError,"NonOS2HeaderSizeError");
bmp_info.width=(ssize_t) ReadBlobLSBSignedLong(image);
bmp_info.height=(ssize_t) ReadBlobLSBSignedLong(image);
bmp_info.planes=ReadBlobLSBShort(image);
bmp_info.bits_per_pixel=ReadBlobLSBShort(image);
bmp_info.compression=ReadBlobLSBLong(image);
bmp_info.image_size=ReadBlobLSBLong(image);
bmp_info.x_pixels=ReadBlobLSBLong(image);
bmp_info.y_pixels=ReadBlobLSBLong(image);
bmp_info.number_colors=ReadBlobLSBLong(image);
if ((MagickSizeType) bmp_info.number_colors > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
bmp_info.colors_important=ReadBlobLSBLong(image);
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Format: MS Windows bitmap");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Geometry: %.20gx%.20g",(double) bmp_info.width,(double)
bmp_info.height);
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Bits per pixel: %.20g",(double) bmp_info.bits_per_pixel);
switch (bmp_info.compression)
{
case BI_RGB:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_RGB");
break;
}
case BI_RLE4:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_RLE4");
break;
}
case BI_RLE8:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_RLE8");
break;
}
case BI_BITFIELDS:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_BITFIELDS");
break;
}
case BI_PNG:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_PNG");
break;
}
case BI_JPEG:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: BI_JPEG");
break;
}
default:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression: UNKNOWN (%u)",bmp_info.compression);
}
}
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Number of colors: %u",bmp_info.number_colors);
}
bmp_info.red_mask=ReadBlobLSBLong(image);
bmp_info.green_mask=ReadBlobLSBLong(image);
bmp_info.blue_mask=ReadBlobLSBLong(image);
if (bmp_info.size > 40)
{
double
gamma;
/*
Read color management information.
*/
bmp_info.alpha_mask=ReadBlobLSBLong(image);
bmp_info.colorspace=ReadBlobLSBSignedLong(image);
/*
Decode 2^30 fixed point formatted CIE primaries.
*/
# define BMP_DENOM ((double) 0x40000000)
bmp_info.red_primary.x=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.red_primary.y=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.red_primary.z=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.green_primary.x=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.green_primary.y=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.green_primary.z=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.blue_primary.x=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.blue_primary.y=(double) ReadBlobLSBLong(image)/BMP_DENOM;
bmp_info.blue_primary.z=(double) ReadBlobLSBLong(image)/BMP_DENOM;
gamma=bmp_info.red_primary.x+bmp_info.red_primary.y+
bmp_info.red_primary.z;
gamma=PerceptibleReciprocal(gamma);
bmp_info.red_primary.x*=gamma;
bmp_info.red_primary.y*=gamma;
image->chromaticity.red_primary.x=bmp_info.red_primary.x;
image->chromaticity.red_primary.y=bmp_info.red_primary.y;
gamma=bmp_info.green_primary.x+bmp_info.green_primary.y+
bmp_info.green_primary.z;
gamma=PerceptibleReciprocal(gamma);
bmp_info.green_primary.x*=gamma;
bmp_info.green_primary.y*=gamma;
image->chromaticity.green_primary.x=bmp_info.green_primary.x;
image->chromaticity.green_primary.y=bmp_info.green_primary.y;
gamma=bmp_info.blue_primary.x+bmp_info.blue_primary.y+
bmp_info.blue_primary.z;
gamma=PerceptibleReciprocal(gamma);
bmp_info.blue_primary.x*=gamma;
bmp_info.blue_primary.y*=gamma;
image->chromaticity.blue_primary.x=bmp_info.blue_primary.x;
image->chromaticity.blue_primary.y=bmp_info.blue_primary.y;
/*
Decode 16^16 fixed point formatted gamma_scales.
*/
bmp_info.gamma_scale.x=(double) ReadBlobLSBLong(image)/0x10000;
bmp_info.gamma_scale.y=(double) ReadBlobLSBLong(image)/0x10000;
bmp_info.gamma_scale.z=(double) ReadBlobLSBLong(image)/0x10000;
/*
Compute a single gamma from the BMP 3-channel gamma.
*/
image->gamma=(bmp_info.gamma_scale.x+bmp_info.gamma_scale.y+
bmp_info.gamma_scale.z)/3.0;
}
else
(void) CopyMagickString(image->magick,"BMP3",MagickPathExtent);
if (bmp_info.size > 108)
{
size_t
intent;
/*
Read BMP Version 5 color management information.
*/
intent=ReadBlobLSBLong(image);
switch ((int) intent)
{
case LCS_GM_BUSINESS:
{
image->rendering_intent=SaturationIntent;
break;
}
case LCS_GM_GRAPHICS:
{
image->rendering_intent=RelativeIntent;
break;
}
case LCS_GM_IMAGES:
{
image->rendering_intent=PerceptualIntent;
break;
}
case LCS_GM_ABS_COLORIMETRIC:
{
image->rendering_intent=AbsoluteIntent;
break;
}
}
profile_data=(MagickOffsetType)ReadBlobLSBLong(image);
profile_size=(MagickOffsetType)ReadBlobLSBLong(image);
(void) ReadBlobLSBLong(image); /* Reserved byte */
}
}
if ((MagickSizeType) bmp_info.file_size > GetBlobSize(image))
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"LengthAndFilesizeDoNotMatch","`%s'",image->filename);
else
if ((MagickSizeType) bmp_info.file_size < GetBlobSize(image))
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"LengthAndFilesizeDoNotMatch","`%s'",
image->filename);
if (bmp_info.width <= 0)
ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize");
if (bmp_info.height == 0)
ThrowReaderException(CorruptImageError,"NegativeOrZeroImageSize");
if (bmp_info.planes != 1)
ThrowReaderException(CorruptImageError,"StaticPlanesValueNotEqualToOne");
if ((bmp_info.bits_per_pixel != 1) && (bmp_info.bits_per_pixel != 4) &&
(bmp_info.bits_per_pixel != 8) && (bmp_info.bits_per_pixel != 16) &&
(bmp_info.bits_per_pixel != 24) && (bmp_info.bits_per_pixel != 32))
ThrowReaderException(CorruptImageError,"UnsupportedBitsPerPixel");
if (bmp_info.bits_per_pixel < 16 &&
bmp_info.number_colors > (1U << bmp_info.bits_per_pixel))
ThrowReaderException(CorruptImageError,"UnrecognizedNumberOfColors");
if ((bmp_info.compression == 1) && (bmp_info.bits_per_pixel != 8))
ThrowReaderException(CorruptImageError,"UnsupportedBitsPerPixel");
if ((bmp_info.compression == 2) && (bmp_info.bits_per_pixel != 4))
ThrowReaderException(CorruptImageError,"UnsupportedBitsPerPixel");
if ((bmp_info.compression == 3) && (bmp_info.bits_per_pixel < 16))
ThrowReaderException(CorruptImageError,"UnsupportedBitsPerPixel");
switch (bmp_info.compression)
{
case BI_RGB:
image->compression=NoCompression;
break;
case BI_RLE8:
case BI_RLE4:
image->compression=RLECompression;
break;
case BI_BITFIELDS:
break;
case BI_JPEG:
ThrowReaderException(CoderError,"JPEGCompressNotSupported");
case BI_PNG:
ThrowReaderException(CoderError,"PNGCompressNotSupported");
default:
ThrowReaderException(CorruptImageError,"UnrecognizedImageCompression");
}
image->columns=(size_t) MagickAbsoluteValue(bmp_info.width);
image->rows=(size_t) MagickAbsoluteValue(bmp_info.height);
image->depth=bmp_info.bits_per_pixel <= 8 ? bmp_info.bits_per_pixel : 8;
image->alpha_trait=((bmp_info.alpha_mask != 0) &&
(bmp_info.compression == BI_BITFIELDS)) ? BlendPixelTrait :
UndefinedPixelTrait;
if (bmp_info.bits_per_pixel < 16)
{
size_t
one;
image->storage_class=PseudoClass;
image->colors=bmp_info.number_colors;
one=1;
if (image->colors == 0)
image->colors=one << bmp_info.bits_per_pixel;
}
image->resolution.x=(double) bmp_info.x_pixels/100.0;
image->resolution.y=(double) bmp_info.y_pixels/100.0;
image->units=PixelsPerCentimeterResolution;
if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0))
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
status=SetImageExtent(image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImageList(image));
if (image->storage_class == PseudoClass)
{
unsigned char
*bmp_colormap;
size_t
packet_size;
/*
Read BMP raster colormap.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading colormap of %.20g colors",(double) image->colors);
if (AcquireImageColormap(image,image->colors,exception) == MagickFalse)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
bmp_colormap=(unsigned char *) AcquireQuantumMemory((size_t)
image->colors,4*sizeof(*bmp_colormap));
if (bmp_colormap == (unsigned char *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
if ((bmp_info.size == 12) || (bmp_info.size == 64))
packet_size=3;
else
packet_size=4;
offset=SeekBlob(image,start_position+14+bmp_info.size,SEEK_SET);
if (offset < 0)
{
bmp_colormap=(unsigned char *) RelinquishMagickMemory(bmp_colormap);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
count=ReadBlob(image,packet_size*image->colors,bmp_colormap);
if (count != (ssize_t) (packet_size*image->colors))
{
bmp_colormap=(unsigned char *) RelinquishMagickMemory(bmp_colormap);
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
}
p=bmp_colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
image->colormap[i].blue=(MagickRealType) ScaleCharToQuantum(*p++);
image->colormap[i].green=(MagickRealType) ScaleCharToQuantum(*p++);
image->colormap[i].red=(MagickRealType) ScaleCharToQuantum(*p++);
if (packet_size == 4)
p++;
}
bmp_colormap=(unsigned char *) RelinquishMagickMemory(bmp_colormap);
}
/*
Read image data.
*/
if (bmp_info.offset_bits == offset_bits)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
offset_bits=bmp_info.offset_bits;
offset=SeekBlob(image,start_position+bmp_info.offset_bits,SEEK_SET);
if (offset < 0)
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
if (bmp_info.compression == BI_RLE4)
bmp_info.bits_per_pixel<<=1;
bytes_per_line=4*((image->columns*bmp_info.bits_per_pixel+31)/32);
length=(size_t) bytes_per_line*image->rows;
if ((MagickSizeType) (length/256) > GetBlobSize(image))
ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile");
if ((bmp_info.compression == BI_RGB) ||
(bmp_info.compression == BI_BITFIELDS))
{
pixel_info=AcquireVirtualMemory(image->rows,
MagickMax(bytes_per_line,image->columns+256UL)*sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Reading pixels (%.20g bytes)",(double) length);
count=ReadBlob(image,length,pixels);
if (count != (ssize_t) length)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,
"InsufficientImageDataInFile");
}
}
else
{
/*
Convert run-length encoded raster pixels.
*/
pixel_info=AcquireVirtualMemory(image->rows,
MagickMax(bytes_per_line,image->columns+256UL)*sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
status=DecodeImage(image,bmp_info.compression,pixels,
image->columns*image->rows);
if (status == MagickFalse)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,
"UnableToRunlengthDecodeImage");
}
}
/*
Convert BMP raster image to pixel packets.
*/
if (bmp_info.compression == BI_RGB)
{
/*
We should ignore the alpha value in BMP3 files but there have been
reports about 32 bit files with alpha. We do a quick check to see if
the alpha channel contains a value that is not zero (default value).
If we find a non zero value we asume the program that wrote the file
wants to use the alpha channel.
*/
if ((image->alpha_trait == UndefinedPixelTrait) &&
(bmp_info.size == 40) && (bmp_info.bits_per_pixel == 32))
{
bytes_per_line=4*(image->columns);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
for (x=0; x < (ssize_t) image->columns; x++)
{
if (*(p+3) != 0)
{
image->alpha_trait=BlendPixelTrait;
y=-1;
break;
}
p+=4;
}
}
}
bmp_info.alpha_mask=image->alpha_trait != UndefinedPixelTrait ?
0xff000000U : 0U;
bmp_info.red_mask=0x00ff0000U;
bmp_info.green_mask=0x0000ff00U;
bmp_info.blue_mask=0x000000ffU;
if (bmp_info.bits_per_pixel == 16)
{
/*
RGB555.
*/
bmp_info.red_mask=0x00007c00U;
bmp_info.green_mask=0x000003e0U;
bmp_info.blue_mask=0x0000001fU;
}
}
(void) memset(&shift,0,sizeof(shift));
(void) memset(&quantum_bits,0,sizeof(quantum_bits));
if ((bmp_info.bits_per_pixel == 16) || (bmp_info.bits_per_pixel == 32))
{
register unsigned int
sample;
/*
Get shift and quantum bits info from bitfield masks.
*/
if (bmp_info.red_mask != 0)
while (((bmp_info.red_mask << shift.red) & 0x80000000UL) == 0)
{
shift.red++;
if (shift.red >= 32U)
break;
}
if (bmp_info.green_mask != 0)
while (((bmp_info.green_mask << shift.green) & 0x80000000UL) == 0)
{
shift.green++;
if (shift.green >= 32U)
break;
}
if (bmp_info.blue_mask != 0)
while (((bmp_info.blue_mask << shift.blue) & 0x80000000UL) == 0)
{
shift.blue++;
if (shift.blue >= 32U)
break;
}
if (bmp_info.alpha_mask != 0)
while (((bmp_info.alpha_mask << shift.alpha) & 0x80000000UL) == 0)
{
shift.alpha++;
if (shift.alpha >= 32U)
break;
}
sample=shift.red;
while (((bmp_info.red_mask << sample) & 0x80000000UL) != 0)
{
sample++;
if (sample >= 32U)
break;
}
quantum_bits.red=(MagickRealType) (sample-shift.red);
sample=shift.green;
while (((bmp_info.green_mask << sample) & 0x80000000UL) != 0)
{
sample++;
if (sample >= 32U)
break;
}
quantum_bits.green=(MagickRealType) (sample-shift.green);
sample=shift.blue;
while (((bmp_info.blue_mask << sample) & 0x80000000UL) != 0)
{
sample++;
if (sample >= 32U)
break;
}
quantum_bits.blue=(MagickRealType) (sample-shift.blue);
sample=shift.alpha;
while (((bmp_info.alpha_mask << sample) & 0x80000000UL) != 0)
{
sample++;
if (sample >= 32U)
break;
}
quantum_bits.alpha=(MagickRealType) (sample-shift.alpha);
}
switch (bmp_info.bits_per_pixel)
{
case 1:
{
/*
Convert bitmap scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-7); x+=8)
{
for (bit=0; bit < 8; bit++)
{
index=(Quantum) (((*p) & (0x80 >> bit)) != 0 ? 0x01 : 0x00);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
}
p++;
}
if ((image->columns % 8) != 0)
{
for (bit=0; bit < (image->columns % 8); bit++)
{
index=(Quantum) (((*p) & (0x80 >> bit)) != 0 ? 0x01 : 0x00);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
}
p++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image,exception);
break;
}
case 4:
{
/*
Convert PseudoColor scanline.
*/
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < ((ssize_t) image->columns-1); x+=2)
{
ValidateColormapValue(image,(ssize_t) ((*p >> 4) & 0x0f),&index,
exception);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
ValidateColormapValue(image,(ssize_t) (*p & 0x0f),&index,exception);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
p++;
}
if ((image->columns % 2) != 0)
{
ValidateColormapValue(image,(ssize_t) ((*p >> 4) & 0xf),&index,
exception);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
p++;
x++;
}
if (x < (ssize_t) image->columns)
break;
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image,exception);
break;
}
case 8:
{
/*
Convert PseudoColor scanline.
*/
if ((bmp_info.compression == BI_RLE8) ||
(bmp_info.compression == BI_RLE4))
bytes_per_line=image->columns;
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=(ssize_t) image->columns; x != 0; --x)
{
ValidateColormapValue(image,(ssize_t) *p++,&index,exception);
SetPixelIndex(image,index,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
offset=(MagickOffsetType) (image->rows-y-1);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
(void) SyncImage(image,exception);
break;
}
case 16:
{
unsigned int
alpha,
pixel;
/*
Convert bitfield encoded 16-bit PseudoColor scanline.
*/
if ((bmp_info.compression != BI_RGB) &&
(bmp_info.compression != BI_BITFIELDS))
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,
"UnrecognizedImageCompression");
}
bytes_per_line=2*(image->columns+image->columns % 2);
image->storage_class=DirectClass;
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=(unsigned int) (*p++);
pixel|=(*p++) << 8;
red=((pixel & bmp_info.red_mask) << shift.red) >> 16;
if (quantum_bits.red == 5)
red|=((red & 0xe000) >> 5);
if (quantum_bits.red <= 8)
red|=((red & 0xff00) >> 8);
green=((pixel & bmp_info.green_mask) << shift.green) >> 16;
if (quantum_bits.green == 5)
green|=((green & 0xe000) >> 5);
if (quantum_bits.green == 6)
green|=((green & 0xc000) >> 6);
if (quantum_bits.green <= 8)
green|=((green & 0xff00) >> 8);
blue=((pixel & bmp_info.blue_mask) << shift.blue) >> 16;
if (quantum_bits.blue == 5)
blue|=((blue & 0xe000) >> 5);
if (quantum_bits.blue <= 8)
blue|=((blue & 0xff00) >> 8);
SetPixelRed(image,ScaleShortToQuantum((unsigned short) red),q);
SetPixelGreen(image,ScaleShortToQuantum((unsigned short) green),q);
SetPixelBlue(image,ScaleShortToQuantum((unsigned short) blue),q);
SetPixelAlpha(image,OpaqueAlpha,q);
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=((pixel & bmp_info.alpha_mask) << shift.alpha) >> 16;
if (quantum_bits.alpha <= 8)
alpha|=((alpha & 0xff00) >> 8);
SetPixelAlpha(image,ScaleShortToQuantum(
(unsigned short) alpha),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
offset=(MagickOffsetType) (image->rows-y-1);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 24:
{
/*
Convert DirectColor scanline.
*/
bytes_per_line=4*((image->columns*24+31)/32);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelBlue(image,ScaleCharToQuantum(*p++),q);
SetPixelGreen(image,ScaleCharToQuantum(*p++),q);
SetPixelRed(image,ScaleCharToQuantum(*p++),q);
SetPixelAlpha(image,OpaqueAlpha,q);
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
offset=(MagickOffsetType) (image->rows-y-1);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 32:
{
/*
Convert bitfield encoded DirectColor scanline.
*/
if ((bmp_info.compression != BI_RGB) &&
(bmp_info.compression != BI_BITFIELDS))
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,
"UnrecognizedImageCompression");
}
bytes_per_line=4*(image->columns);
for (y=(ssize_t) image->rows-1; y >= 0; y--)
{
unsigned int
alpha,
pixel;
p=pixels+(image->rows-y-1)*bytes_per_line;
q=QueueAuthenticPixels(image,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
pixel=(unsigned int) (*p++);
pixel|=((unsigned int) *p++ << 8);
pixel|=((unsigned int) *p++ << 16);
pixel|=((unsigned int) *p++ << 24);
red=((pixel & bmp_info.red_mask) << shift.red) >> 16;
if (quantum_bits.red == 8)
red|=(red >> 8);
green=((pixel & bmp_info.green_mask) << shift.green) >> 16;
if (quantum_bits.green == 8)
green|=(green >> 8);
blue=((pixel & bmp_info.blue_mask) << shift.blue) >> 16;
if (quantum_bits.blue == 8)
blue|=(blue >> 8);
SetPixelRed(image,ScaleShortToQuantum((unsigned short) red),q);
SetPixelGreen(image,ScaleShortToQuantum((unsigned short) green),q);
SetPixelBlue(image,ScaleShortToQuantum((unsigned short) blue),q);
SetPixelAlpha(image,OpaqueAlpha,q);
if (image->alpha_trait != UndefinedPixelTrait)
{
alpha=((pixel & bmp_info.alpha_mask) << shift.alpha) >> 16;
if (quantum_bits.alpha == 8)
alpha|=(alpha >> 8);
SetPixelAlpha(image,ScaleShortToQuantum(
(unsigned short) alpha),q);
}
q+=GetPixelChannels(image);
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
break;
offset=(MagickOffsetType) (image->rows-y-1);
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,LoadImageTag,(MagickOffsetType)
(image->rows-y),image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
default:
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
}
pixel_info=RelinquishVirtualMemory(pixel_info);
if (y > 0)
break;
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
break;
}
if (bmp_info.height < 0)
{
Image
*flipped_image;
/*
Correct image orientation.
*/
flipped_image=FlipImage(image,exception);
if (flipped_image != (Image *) NULL)
{
DuplicateBlob(flipped_image,image);
ReplaceImageInList(&image, flipped_image);
image=flipped_image;
}
}
/*
Read embeded ICC profile
*/
if ((bmp_info.colorspace == 0x4D424544L) && (profile_data > 0) &&
(profile_size > 0))
{
StringInfo
*profile;
unsigned char
*datum;
offset=start_position+14+profile_data;
if ((offset < TellBlob(image)) ||
(SeekBlob(image,offset,SEEK_SET) != offset) ||
(GetBlobSize(image) < (MagickSizeType) (offset+profile_size)))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
profile=AcquireStringInfo((size_t) profile_size);
if (profile == (StringInfo *) NULL)
ThrowReaderException(CorruptImageError,"MemoryAllocationFailed");
datum=GetStringInfoDatum(profile);
if (ReadBlob(image,(size_t) profile_size,datum) == (ssize_t) profile_size)
{
MagickOffsetType
profile_size_orig;
/*
Trimming padded bytes.
*/
profile_size_orig=(MagickOffsetType) datum[0] << 24;
profile_size_orig|=(MagickOffsetType) datum[1] << 16;
profile_size_orig|=(MagickOffsetType) datum[2] << 8;
profile_size_orig|=(MagickOffsetType) datum[3];
if (profile_size_orig < profile_size)
SetStringInfoLength(profile,(size_t) profile_size_orig);
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
"Profile: ICC, %u bytes",(unsigned int) profile_size_orig);
(void) SetImageProfile(image,"icc",profile,exception);
}
profile=DestroyStringInfo(profile);
}
/*
Proceed to next image.
*/
if (image_info->number_scenes != 0)
if (image->scene >= (image_info->scene+image_info->number_scenes-1))
break;
offset=(MagickOffsetType) bmp_info.ba_offset;
if (offset != 0)
if ((offset < TellBlob(image)) ||
(SeekBlob(image,offset,SEEK_SET) != offset))
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
*magick='\0';
count=ReadBlob(image,2,magick);
if ((count == 2) && (IsBMP(magick,2) != MagickFalse))
{
/*
Acquire next image structure.
*/
AcquireNextImage(image_info,image,exception);
if (GetNextImageInList(image) == (Image *) NULL)
{
status=MagickFalse;
break;
}
image=SyncNextImageInList(image);
status=SetImageProgress(image,LoadImagesTag,TellBlob(image),
GetBlobSize(image));
if (status == MagickFalse)
break;
}
} while (IsBMP(magick,2) != MagickFalse);
(void) CloseBlob(image);
if (status == MagickFalse)
return(DestroyImageList(image));
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r B M P I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterBMPImage() adds attributes for the BMP image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterBMPImage method is:
%
% size_t RegisterBMPImage(void)
%
*/
ModuleExport size_t RegisterBMPImage(void)
{
MagickInfo
*entry;
entry=AcquireMagickInfo("BMP","BMP","Microsoft Windows bitmap image");
entry->decoder=(DecodeImageHandler *) ReadBMPImage;
entry->encoder=(EncodeImageHandler *) WriteBMPImage;
entry->magick=(IsImageFormatHandler *) IsBMP;
entry->flags^=CoderAdjoinFlag;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("BMP","BMP2","Microsoft Windows bitmap image (V2)");
entry->decoder=(DecodeImageHandler *) ReadBMPImage;
entry->encoder=(EncodeImageHandler *) WriteBMPImage;
entry->magick=(IsImageFormatHandler *) IsBMP;
entry->flags^=CoderAdjoinFlag;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
entry=AcquireMagickInfo("BMP","BMP3","Microsoft Windows bitmap image (V3)");
entry->decoder=(DecodeImageHandler *) ReadBMPImage;
entry->encoder=(EncodeImageHandler *) WriteBMPImage;
entry->magick=(IsImageFormatHandler *) IsBMP;
entry->flags^=CoderAdjoinFlag;
entry->flags|=CoderDecoderSeekableStreamFlag;
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r B M P I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterBMPImage() removes format registrations made by the
% BMP module from the list of supported formats.
%
% The format of the UnregisterBMPImage method is:
%
% UnregisterBMPImage(void)
%
*/
ModuleExport void UnregisterBMPImage(void)
{
(void) UnregisterMagickInfo("BMP");
(void) UnregisterMagickInfo("BMP2");
(void) UnregisterMagickInfo("BMP3");
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e B M P I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteBMPImage() writes an image in Microsoft Windows bitmap encoded
% image format, version 3 for Windows or (if the image has a matte channel)
% version 4.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteBMPImage(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WriteBMPImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
BMPInfo
bmp_info;
BMPSubtype
bmp_subtype;
const char
*option;
const StringInfo
*profile;
MagickBooleanType
have_color_info,
status;
MagickOffsetType
scene;
MemoryInfo
*pixel_info;
register const Quantum
*p;
register ssize_t
i,
x;
register unsigned char
*q;
size_t
bytes_per_line,
imageListLength,
type;
ssize_t
y;
unsigned char
*bmp_data,
*pixels;
MagickOffsetType
profile_data,
profile_size,
profile_size_pad;
/*
Open output image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception);
if (status == MagickFalse)
return(status);
if (((image->columns << 3) != (int) (image->columns << 3)) ||
((image->rows << 3) != (int) (image->rows << 3)))
ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit");
type=4;
if (LocaleCompare(image_info->magick,"BMP2") == 0)
type=2;
else
if (LocaleCompare(image_info->magick,"BMP3") == 0)
type=3;
option=GetImageOption(image_info,"bmp:format");
if (option != (char *) NULL)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Format=%s",option);
if (LocaleCompare(option,"bmp2") == 0)
type=2;
if (LocaleCompare(option,"bmp3") == 0)
type=3;
if (LocaleCompare(option,"bmp4") == 0)
type=4;
}
scene=0;
imageListLength=GetImageListLength(image);
do
{
/*
Initialize BMP raster file header.
*/
if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
(void) memset(&bmp_info,0,sizeof(bmp_info));
bmp_info.file_size=14+12;
if (type > 2)
bmp_info.file_size+=28;
bmp_info.offset_bits=bmp_info.file_size;
bmp_info.compression=BI_RGB;
bmp_info.red_mask=0x00ff0000U;
bmp_info.green_mask=0x0000ff00U;
bmp_info.blue_mask=0x000000ffU;
bmp_info.alpha_mask=0xff000000U;
bmp_subtype=UndefinedSubtype;
if ((image->storage_class == PseudoClass) && (image->colors > 256))
(void) SetImageStorageClass(image,DirectClass,exception);
if (image->storage_class != DirectClass)
{
/*
Colormapped BMP raster.
*/
bmp_info.bits_per_pixel=8;
if (image->colors <= 2)
bmp_info.bits_per_pixel=1;
else
if (image->colors <= 16)
bmp_info.bits_per_pixel=4;
else
if (image->colors <= 256)
bmp_info.bits_per_pixel=8;
if (image_info->compression == RLECompression)
bmp_info.bits_per_pixel=8;
bmp_info.number_colors=1U << bmp_info.bits_per_pixel;
if (image->alpha_trait != UndefinedPixelTrait)
(void) SetImageStorageClass(image,DirectClass,exception);
else
if ((size_t) bmp_info.number_colors < image->colors)
(void) SetImageStorageClass(image,DirectClass,exception);
else
{
bmp_info.file_size+=3*(1UL << bmp_info.bits_per_pixel);
bmp_info.offset_bits+=3*(1UL << bmp_info.bits_per_pixel);
if (type > 2)
{
bmp_info.file_size+=(1UL << bmp_info.bits_per_pixel);
bmp_info.offset_bits+=(1UL << bmp_info.bits_per_pixel);
}
}
}
if (image->storage_class == DirectClass)
{
/*
Full color BMP raster.
*/
bmp_info.number_colors=0;
option=GetImageOption(image_info,"bmp:subtype");
if (option != (const char *) NULL)
{
if (image->alpha_trait != UndefinedPixelTrait)
{
if (LocaleNCompare(option,"ARGB4444",8) == 0)
{
bmp_subtype=ARGB4444;
bmp_info.red_mask=0x00000f00U;
bmp_info.green_mask=0x000000f0U;
bmp_info.blue_mask=0x0000000fU;
bmp_info.alpha_mask=0x0000f000U;
}
else if (LocaleNCompare(option,"ARGB1555",8) == 0)
{
bmp_subtype=ARGB1555;
bmp_info.red_mask=0x00007c00U;
bmp_info.green_mask=0x000003e0U;
bmp_info.blue_mask=0x0000001fU;
bmp_info.alpha_mask=0x00008000U;
}
}
else
{
if (LocaleNCompare(option,"RGB555",6) == 0)
{
bmp_subtype=RGB555;
bmp_info.red_mask=0x00007c00U;
bmp_info.green_mask=0x000003e0U;
bmp_info.blue_mask=0x0000001fU;
bmp_info.alpha_mask=0U;
}
else if (LocaleNCompare(option,"RGB565",6) == 0)
{
bmp_subtype=RGB565;
bmp_info.red_mask=0x0000f800U;
bmp_info.green_mask=0x000007e0U;
bmp_info.blue_mask=0x0000001fU;
bmp_info.alpha_mask=0U;
}
}
}
if (bmp_subtype != UndefinedSubtype)
{
bmp_info.bits_per_pixel=16;
bmp_info.compression=BI_BITFIELDS;
}
else
{
bmp_info.bits_per_pixel=(unsigned short) ((type > 3) &&
(image->alpha_trait != UndefinedPixelTrait) ? 32 : 24);
bmp_info.compression=(unsigned int) ((type > 3) &&
(image->alpha_trait != UndefinedPixelTrait) ? BI_BITFIELDS : BI_RGB);
if ((type == 3) && (image->alpha_trait != UndefinedPixelTrait))
{
option=GetImageOption(image_info,"bmp3:alpha");
if (IsStringTrue(option))
bmp_info.bits_per_pixel=32;
}
}
}
bytes_per_line=4*((image->columns*bmp_info.bits_per_pixel+31)/32);
bmp_info.ba_offset=0;
profile=GetImageProfile(image,"icc");
have_color_info=(image->rendering_intent != UndefinedIntent) ||
(profile != (StringInfo *) NULL) || (image->gamma != 0.0) ? MagickTrue :
MagickFalse;
if (type == 2)
bmp_info.size=12;
else
if ((type == 3) || ((image->alpha_trait == UndefinedPixelTrait) &&
(have_color_info == MagickFalse)))
{
type=3;
bmp_info.size=40;
}
else
{
int
extra_size;
bmp_info.size=108;
extra_size=68;
if ((image->rendering_intent != UndefinedIntent) ||
(profile != (StringInfo *) NULL))
{
bmp_info.size=124;
extra_size+=16;
}
bmp_info.file_size+=extra_size;
bmp_info.offset_bits+=extra_size;
}
if (((ssize_t) image->columns != (ssize_t) ((signed int) image->columns)) ||
((ssize_t) image->rows != (ssize_t) ((signed int) image->rows)))
ThrowWriterException(ImageError,"WidthOrHeightExceedsLimit");
bmp_info.width=(ssize_t) image->columns;
bmp_info.height=(ssize_t) image->rows;
bmp_info.planes=1;
bmp_info.image_size=(unsigned int) (bytes_per_line*image->rows);
bmp_info.file_size+=bmp_info.image_size;
bmp_info.x_pixels=75*39;
bmp_info.y_pixels=75*39;
switch (image->units)
{
case UndefinedResolution:
case PixelsPerInchResolution:
{
bmp_info.x_pixels=(unsigned int) (100.0*image->resolution.x/2.54);
bmp_info.y_pixels=(unsigned int) (100.0*image->resolution.y/2.54);
break;
}
case PixelsPerCentimeterResolution:
{
bmp_info.x_pixels=(unsigned int) (100.0*image->resolution.x);
bmp_info.y_pixels=(unsigned int) (100.0*image->resolution.y);
break;
}
}
bmp_info.colors_important=bmp_info.number_colors;
/*
Convert MIFF to BMP raster pixels.
*/
pixel_info=AcquireVirtualMemory(image->rows,MagickMax(bytes_per_line,
image->columns+256UL)*sizeof(*pixels));
if (pixel_info == (MemoryInfo *) NULL)
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info);
(void) memset(pixels,0,(size_t) bmp_info.image_size);
switch (bmp_info.bits_per_pixel)
{
case 1:
{
size_t
bit,
byte;
/*
Convert PseudoClass image to a BMP monochrome image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
ssize_t
offset;
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
bit=0;
byte=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=1;
byte|=GetPixelIndex(image,p) != 0 ? 0x01 : 0x00;
bit++;
if (bit == 8)
{
*q++=(unsigned char) byte;
bit=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (bit != 0)
{
*q++=(unsigned char) (byte << (8-bit));
x++;
}
offset=(ssize_t) (image->columns+7)/8;
for (x=offset; x < (ssize_t) bytes_per_line; x++)
*q++=0x00;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 4:
{
unsigned int
byte,
nibble;
ssize_t
offset;
/*
Convert PseudoClass image to a BMP monochrome image.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
nibble=0;
byte=0;
for (x=0; x < (ssize_t) image->columns; x++)
{
byte<<=4;
byte|=((unsigned int) GetPixelIndex(image,p) & 0x0f);
nibble++;
if (nibble == 2)
{
*q++=(unsigned char) byte;
nibble=0;
byte=0;
}
p+=GetPixelChannels(image);
}
if (nibble != 0)
{
*q++=(unsigned char) (byte << 4);
x++;
}
offset=(ssize_t) (image->columns+1)/2;
for (x=offset; x < (ssize_t) bytes_per_line; x++)
*q++=0x00;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 8:
{
/*
Convert PseudoClass packet to BMP pixel.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=(unsigned char) GetPixelIndex(image,p);
p+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) bytes_per_line; x++)
*q++=0x00;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 16:
{
/*
Convert DirectClass packet to BMP BGR888.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
for (x=0; x < (ssize_t) image->columns; x++)
{
unsigned short
pixel;
pixel=0;
if (bmp_subtype == ARGB4444)
{
pixel=(unsigned short) (ScaleQuantumToAny(
GetPixelAlpha(image,p),15) << 12);
pixel|=(unsigned short) (ScaleQuantumToAny(
GetPixelRed(image,p),15) << 8);
pixel|=(unsigned short) (ScaleQuantumToAny(
GetPixelGreen(image,p),15) << 4);
pixel|=(unsigned short) (ScaleQuantumToAny(
GetPixelBlue(image,p),15));
}
else if (bmp_subtype == RGB565)
{
pixel=(unsigned short) (ScaleQuantumToAny(
GetPixelRed(image,p),31) << 11);
pixel|=(unsigned short) (ScaleQuantumToAny(
GetPixelGreen(image,p),63) << 5);
pixel|=(unsigned short) (ScaleQuantumToAny(
GetPixelBlue(image,p),31));
}
else
{
if (bmp_subtype == ARGB1555)
pixel=(unsigned short) (ScaleQuantumToAny(
GetPixelAlpha(image,p),1) << 15);
pixel|=(unsigned short) (ScaleQuantumToAny(
GetPixelRed(image,p),31) << 10);
pixel|=(unsigned short) (ScaleQuantumToAny(
GetPixelGreen(image,p),31) << 5);
pixel|=(unsigned short) (ScaleQuantumToAny(
GetPixelBlue(image,p),31));
}
*((unsigned short *) q)=pixel;
q+=2;
p+=GetPixelChannels(image);
}
for (x=2L*(ssize_t) image->columns; x < (ssize_t) bytes_per_line; x++)
*q++=0x00;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 24:
{
/*
Convert DirectClass packet to BMP BGR888.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
p+=GetPixelChannels(image);
}
for (x=3L*(ssize_t) image->columns; x < (ssize_t) bytes_per_line; x++)
*q++=0x00;
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
case 32:
{
/*
Convert DirectClass packet to ARGB8888 pixel.
*/
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
q=pixels+(image->rows-y-1)*bytes_per_line;
for (x=0; x < (ssize_t) image->columns; x++)
{
*q++=ScaleQuantumToChar(GetPixelBlue(image,p));
*q++=ScaleQuantumToChar(GetPixelGreen(image,p));
*q++=ScaleQuantumToChar(GetPixelRed(image,p));
*q++=ScaleQuantumToChar(GetPixelAlpha(image,p));
p+=GetPixelChannels(image);
}
if (image->previous == (Image *) NULL)
{
status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,
image->rows);
if (status == MagickFalse)
break;
}
}
break;
}
}
if ((type > 2) && (bmp_info.bits_per_pixel == 8))
if (image_info->compression != NoCompression)
{
MemoryInfo
*rle_info;
/*
Convert run-length encoded raster pixels.
*/
rle_info=AcquireVirtualMemory((size_t) (2*(bytes_per_line+2)+2),
(image->rows+2)*sizeof(*pixels));
if (rle_info == (MemoryInfo *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
bmp_data=(unsigned char *) GetVirtualMemoryBlob(rle_info);
bmp_info.file_size-=bmp_info.image_size;
bmp_info.image_size=(unsigned int) EncodeImage(image,bytes_per_line,
pixels,bmp_data);
bmp_info.file_size+=bmp_info.image_size;
pixel_info=RelinquishVirtualMemory(pixel_info);
pixel_info=rle_info;
pixels=bmp_data;
bmp_info.compression=BI_RLE8;
}
/*
Write BMP for Windows, all versions, 14-byte header.
*/
if (image->debug != MagickFalse)
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Writing BMP version %.20g datastream",(double) type);
if (image->storage_class == DirectClass)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Storage class=DirectClass");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Storage class=PseudoClass");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Image depth=%.20g",(double) image->depth);
if (image->alpha_trait != UndefinedPixelTrait)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Matte=True");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Matte=MagickFalse");
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" BMP bits_per_pixel=%.20g",(double) bmp_info.bits_per_pixel);
switch ((int) bmp_info.compression)
{
case BI_RGB:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression=BI_RGB");
break;
}
case BI_RLE8:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression=BI_RLE8");
break;
}
case BI_BITFIELDS:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression=BI_BITFIELDS");
break;
}
default:
{
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Compression=UNKNOWN (%u)",bmp_info.compression);
break;
}
}
if (bmp_info.number_colors == 0)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Number_colors=unspecified");
else
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Number_colors=%u",bmp_info.number_colors);
}
profile_data=0;
profile_size=0;
profile_size_pad=0;
if (profile != (StringInfo *) NULL)
{
profile_data=(MagickOffsetType) bmp_info.file_size-14; /* from head of BMP info header */
profile_size=(MagickOffsetType) GetStringInfoLength(profile);
if ((profile_size % 4) > 0)
profile_size_pad=4-(profile_size%4);
bmp_info.file_size+=profile_size+profile_size_pad;
}
(void) WriteBlob(image,2,(unsigned char *) "BM");
(void) WriteBlobLSBLong(image,bmp_info.file_size);
(void) WriteBlobLSBLong(image,bmp_info.ba_offset); /* always 0 */
(void) WriteBlobLSBLong(image,bmp_info.offset_bits);
if (type == 2)
{
/*
Write 12-byte version 2 bitmap header.
*/
(void) WriteBlobLSBLong(image,bmp_info.size);
(void) WriteBlobLSBSignedShort(image,(signed short) bmp_info.width);
(void) WriteBlobLSBSignedShort(image,(signed short) bmp_info.height);
(void) WriteBlobLSBShort(image,bmp_info.planes);
(void) WriteBlobLSBShort(image,bmp_info.bits_per_pixel);
}
else
{
/*
Write 40-byte version 3+ bitmap header.
*/
(void) WriteBlobLSBLong(image,bmp_info.size);
(void) WriteBlobLSBSignedLong(image,(signed int) bmp_info.width);
(void) WriteBlobLSBSignedLong(image,(signed int) bmp_info.height);
(void) WriteBlobLSBShort(image,bmp_info.planes);
(void) WriteBlobLSBShort(image,bmp_info.bits_per_pixel);
(void) WriteBlobLSBLong(image,bmp_info.compression);
(void) WriteBlobLSBLong(image,bmp_info.image_size);
(void) WriteBlobLSBLong(image,bmp_info.x_pixels);
(void) WriteBlobLSBLong(image,bmp_info.y_pixels);
(void) WriteBlobLSBLong(image,bmp_info.number_colors);
(void) WriteBlobLSBLong(image,bmp_info.colors_important);
}
if ((type > 3) && ((image->alpha_trait != UndefinedPixelTrait) ||
(have_color_info != MagickFalse)))
{
/*
Write the rest of the 108-byte BMP Version 4 header.
*/
(void) WriteBlobLSBLong(image,bmp_info.red_mask);
(void) WriteBlobLSBLong(image,bmp_info.green_mask);
(void) WriteBlobLSBLong(image,bmp_info.blue_mask);
(void) WriteBlobLSBLong(image,bmp_info.alpha_mask);
if (profile != (StringInfo *) NULL)
(void) WriteBlobLSBLong(image,0x4D424544U); /* PROFILE_EMBEDDED */
else
(void) WriteBlobLSBLong(image,0x73524742U); /* sRGB */
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.red_primary.x*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.red_primary.y*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
((1.000f-(image->chromaticity.red_primary.x+
image->chromaticity.red_primary.y))*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.green_primary.x*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.green_primary.y*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
((1.000f-(image->chromaticity.green_primary.x+
image->chromaticity.green_primary.y))*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.blue_primary.x*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(image->chromaticity.blue_primary.y*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
((1.000f-(image->chromaticity.blue_primary.x+
image->chromaticity.blue_primary.y))*0x40000000));
(void) WriteBlobLSBLong(image,(unsigned int)
(bmp_info.gamma_scale.x*0x10000));
(void) WriteBlobLSBLong(image,(unsigned int)
(bmp_info.gamma_scale.y*0x10000));
(void) WriteBlobLSBLong(image,(unsigned int)
(bmp_info.gamma_scale.z*0x10000));
if ((image->rendering_intent != UndefinedIntent) ||
(profile != (StringInfo *) NULL))
{
ssize_t
intent;
switch ((int) image->rendering_intent)
{
case SaturationIntent:
{
intent=LCS_GM_BUSINESS;
break;
}
case RelativeIntent:
{
intent=LCS_GM_GRAPHICS;
break;
}
case PerceptualIntent:
{
intent=LCS_GM_IMAGES;
break;
}
case AbsoluteIntent:
{
intent=LCS_GM_ABS_COLORIMETRIC;
break;
}
default:
{
intent=0;
break;
}
}
(void) WriteBlobLSBLong(image,(unsigned int) intent);
(void) WriteBlobLSBLong(image,(unsigned int) profile_data);
(void) WriteBlobLSBLong(image,(unsigned int)
(profile_size+profile_size_pad));
(void) WriteBlobLSBLong(image,0x00); /* reserved */
}
}
if (image->storage_class == PseudoClass)
{
unsigned char
*bmp_colormap;
/*
Dump colormap to file.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Colormap: %.20g entries",(double) image->colors);
bmp_colormap=(unsigned char *) AcquireQuantumMemory((size_t) (1UL <<
bmp_info.bits_per_pixel),4*sizeof(*bmp_colormap));
if (bmp_colormap == (unsigned char *) NULL)
{
pixel_info=RelinquishVirtualMemory(pixel_info);
ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed");
}
q=bmp_colormap;
for (i=0; i < (ssize_t) MagickMin((ssize_t) image->colors,(ssize_t) bmp_info.number_colors); i++)
{
*q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].blue));
*q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].green));
*q++=ScaleQuantumToChar(ClampToQuantum(image->colormap[i].red));
if (type > 2)
*q++=(unsigned char) 0x0;
}
for ( ; i < (ssize_t) (1UL << bmp_info.bits_per_pixel); i++)
{
*q++=(unsigned char) 0x00;
*q++=(unsigned char) 0x00;
*q++=(unsigned char) 0x00;
if (type > 2)
*q++=(unsigned char) 0x00;
}
if (type <= 2)
(void) WriteBlob(image,(size_t) (3*(1L << bmp_info.bits_per_pixel)),
bmp_colormap);
else
(void) WriteBlob(image,(size_t) (4*(1L << bmp_info.bits_per_pixel)),
bmp_colormap);
bmp_colormap=(unsigned char *) RelinquishMagickMemory(bmp_colormap);
}
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Pixels: %u bytes",bmp_info.image_size);
(void) WriteBlob(image,(size_t) bmp_info.image_size,pixels);
if (profile != (StringInfo *) NULL)
{
if (image->debug != MagickFalse)
(void) LogMagickEvent(CoderEvent,GetMagickModule(),
" Profile: %g bytes",(double) profile_size+profile_size_pad);
(void) WriteBlob(image,(size_t) profile_size,GetStringInfoDatum(profile));
if (profile_size_pad > 0) /* padding for 4 bytes multiple */
(void) WriteBlob(image,(size_t) profile_size_pad,"\0\0\0");
}
pixel_info=RelinquishVirtualMemory(pixel_info);
if (GetNextImageInList(image) == (Image *) NULL)
break;
image=SyncNextImageInList(image);
status=SetImageProgress(image,SaveImagesTag,scene++,imageListLength);
if (status == MagickFalse)
break;
} while (image_info->adjoin != MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_905_0 |
crossvul-cpp_data_bad_3434_0 | /*
* threads.c: Thread support internal calls
*
* Author:
* Dick Porter (dick@ximian.com)
* Paolo Molaro (lupus@ximian.com)
* Patrik Torstensson (patrik.torstensson@labs2.com)
*
* Copyright 2001-2003 Ximian, Inc (http://www.ximian.com)
* Copyright 2004-2009 Novell, Inc (http://www.novell.com)
*/
#include <config.h>
#include <glib.h>
#include <signal.h>
#include <string.h>
#if defined(__OpenBSD__)
#include <pthread.h>
#include <pthread_np.h>
#endif
#include <mono/metadata/object.h>
#include <mono/metadata/domain-internals.h>
#include <mono/metadata/profiler-private.h>
#include <mono/metadata/threads.h>
#include <mono/metadata/threadpool.h>
#include <mono/metadata/threads-types.h>
#include <mono/metadata/exception.h>
#include <mono/metadata/environment.h>
#include <mono/metadata/monitor.h>
#include <mono/metadata/gc-internal.h>
#include <mono/metadata/marshal.h>
#include <mono/io-layer/io-layer.h>
#ifndef HOST_WIN32
#include <mono/io-layer/threads.h>
#endif
#include <mono/metadata/object-internals.h>
#include <mono/metadata/mono-debug-debugger.h>
#include <mono/utils/mono-compiler.h>
#include <mono/utils/mono-mmap.h>
#include <mono/utils/mono-membar.h>
#include <mono/utils/mono-time.h>
#include <mono/utils/hazard-pointer.h>
#include <mono/metadata/gc-internal.h>
#ifdef PLATFORM_ANDROID
#include <errno.h>
extern int tkill (pid_t tid, int signal);
#endif
/*#define THREAD_DEBUG(a) do { a; } while (0)*/
#define THREAD_DEBUG(a)
/*#define THREAD_WAIT_DEBUG(a) do { a; } while (0)*/
#define THREAD_WAIT_DEBUG(a)
/*#define LIBGC_DEBUG(a) do { a; } while (0)*/
#define LIBGC_DEBUG(a)
#define SPIN_TRYLOCK(i) (InterlockedCompareExchange (&(i), 1, 0) == 0)
#define SPIN_LOCK(i) do { \
if (SPIN_TRYLOCK (i)) \
break; \
} while (1)
#define SPIN_UNLOCK(i) i = 0
/* Provide this for systems with glib < 2.6 */
#ifndef G_GSIZE_FORMAT
# if GLIB_SIZEOF_LONG == 8
# define G_GSIZE_FORMAT "lu"
# else
# define G_GSIZE_FORMAT "u"
# endif
#endif
struct StartInfo
{
guint32 (*func)(void *);
MonoThread *obj;
MonoObject *delegate;
void *start_arg;
};
typedef union {
gint32 ival;
gfloat fval;
} IntFloatUnion;
typedef union {
gint64 ival;
gdouble fval;
} LongDoubleUnion;
typedef struct _MonoThreadDomainTls MonoThreadDomainTls;
struct _MonoThreadDomainTls {
MonoThreadDomainTls *next;
guint32 offset;
guint32 size;
};
typedef struct {
int idx;
int offset;
MonoThreadDomainTls *freelist;
} StaticDataInfo;
/* Number of cached culture objects in the MonoThread->cached_culture_info array
* (per-type): we use the first NUM entries for CultureInfo and the last for
* UICultureInfo. So the size of the array is really NUM_CACHED_CULTURES * 2.
*/
#define NUM_CACHED_CULTURES 4
#define CULTURES_START_IDX 0
#define UICULTURES_START_IDX NUM_CACHED_CULTURES
/* Controls access to the 'threads' hash table */
#define mono_threads_lock() EnterCriticalSection (&threads_mutex)
#define mono_threads_unlock() LeaveCriticalSection (&threads_mutex)
static CRITICAL_SECTION threads_mutex;
/* Controls access to context static data */
#define mono_contexts_lock() EnterCriticalSection (&contexts_mutex)
#define mono_contexts_unlock() LeaveCriticalSection (&contexts_mutex)
static CRITICAL_SECTION contexts_mutex;
/* Holds current status of static data heap */
static StaticDataInfo thread_static_info;
static StaticDataInfo context_static_info;
/* The hash of existing threads (key is thread ID, value is
* MonoInternalThread*) that need joining before exit
*/
static MonoGHashTable *threads=NULL;
/*
* Threads which are starting up and they are not in the 'threads' hash yet.
* When handle_store is called for a thread, it will be removed from this hash table.
* Protected by mono_threads_lock ().
*/
static MonoGHashTable *threads_starting_up = NULL;
/* Maps a MonoThread to its start argument */
/* Protected by mono_threads_lock () */
static MonoGHashTable *thread_start_args = NULL;
/* The TLS key that holds the MonoObject assigned to each thread */
static guint32 current_object_key = -1;
#ifdef MONO_HAVE_FAST_TLS
/* we need to use both the Tls* functions and __thread because
* the gc needs to see all the threads
*/
MONO_FAST_TLS_DECLARE(tls_current_object);
#define SET_CURRENT_OBJECT(x) do { \
MONO_FAST_TLS_SET (tls_current_object, x); \
TlsSetValue (current_object_key, x); \
} while (FALSE)
#define GET_CURRENT_OBJECT() ((MonoInternalThread*) MONO_FAST_TLS_GET (tls_current_object))
#else
#define SET_CURRENT_OBJECT(x) TlsSetValue (current_object_key, x)
#define GET_CURRENT_OBJECT() (MonoInternalThread*) TlsGetValue (current_object_key)
#endif
/* function called at thread start */
static MonoThreadStartCB mono_thread_start_cb = NULL;
/* function called at thread attach */
static MonoThreadAttachCB mono_thread_attach_cb = NULL;
/* function called at thread cleanup */
static MonoThreadCleanupFunc mono_thread_cleanup_fn = NULL;
/* function called to notify the runtime about a pending exception on the current thread */
static MonoThreadNotifyPendingExcFunc mono_thread_notify_pending_exc_fn = NULL;
/* The default stack size for each thread */
static guint32 default_stacksize = 0;
#define default_stacksize_for_thread(thread) ((thread)->stack_size? (thread)->stack_size: default_stacksize)
static void thread_adjust_static_data (MonoInternalThread *thread);
static void mono_free_static_data (gpointer* static_data, gboolean threadlocal);
static void mono_init_static_data_info (StaticDataInfo *static_data);
static guint32 mono_alloc_static_data_slot (StaticDataInfo *static_data, guint32 size, guint32 align);
static gboolean mono_thread_resume (MonoInternalThread* thread);
static void mono_thread_start (MonoThread *thread);
static void signal_thread_state_change (MonoInternalThread *thread);
static MonoException* mono_thread_execute_interruption (MonoInternalThread *thread);
static void ref_stack_destroy (gpointer rs);
/* Spin lock for InterlockedXXX 64 bit functions */
#define mono_interlocked_lock() EnterCriticalSection (&interlocked_mutex)
#define mono_interlocked_unlock() LeaveCriticalSection (&interlocked_mutex)
static CRITICAL_SECTION interlocked_mutex;
/* global count of thread interruptions requested */
static gint32 thread_interruption_requested = 0;
/* Event signaled when a thread changes its background mode */
static HANDLE background_change_event;
static gboolean shutting_down = FALSE;
guint32
mono_thread_get_tls_key (void)
{
return current_object_key;
}
gint32
mono_thread_get_tls_offset (void)
{
int offset;
MONO_THREAD_VAR_OFFSET (tls_current_object,offset);
return offset;
}
/* handle_store() and handle_remove() manage the array of threads that
* still need to be waited for when the main thread exits.
*
* If handle_store() returns FALSE the thread must not be started
* because Mono is shutting down.
*/
static gboolean handle_store(MonoThread *thread)
{
mono_threads_lock ();
THREAD_DEBUG (g_message ("%s: thread %p ID %"G_GSIZE_FORMAT, __func__, thread, (gsize)thread->internal_thread->tid));
if (threads_starting_up)
mono_g_hash_table_remove (threads_starting_up, thread);
if (shutting_down) {
mono_threads_unlock ();
return FALSE;
}
if(threads==NULL) {
MONO_GC_REGISTER_ROOT_FIXED (threads);
threads=mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_VALUE_GC);
}
/* We don't need to duplicate thread->handle, because it is
* only closed when the thread object is finalized by the GC.
*/
g_assert (thread->internal_thread);
mono_g_hash_table_insert(threads, (gpointer)(gsize)(thread->internal_thread->tid),
thread->internal_thread);
mono_threads_unlock ();
return TRUE;
}
static gboolean handle_remove(MonoInternalThread *thread)
{
gboolean ret;
gsize tid = thread->tid;
THREAD_DEBUG (g_message ("%s: thread ID %"G_GSIZE_FORMAT, __func__, tid));
mono_threads_lock ();
if (threads) {
/* We have to check whether the thread object for the
* tid is still the same in the table because the
* thread might have been destroyed and the tid reused
* in the meantime, in which case the tid would be in
* the table, but with another thread object.
*/
if (mono_g_hash_table_lookup (threads, (gpointer)tid) == thread) {
mono_g_hash_table_remove (threads, (gpointer)tid);
ret = TRUE;
} else {
ret = FALSE;
}
}
else
ret = FALSE;
mono_threads_unlock ();
/* Don't close the handle here, wait for the object finalizer
* to do it. Otherwise, the following race condition applies:
*
* 1) Thread exits (and handle_remove() closes the handle)
*
* 2) Some other handle is reassigned the same slot
*
* 3) Another thread tries to join the first thread, and
* blocks waiting for the reassigned handle to be signalled
* (which might never happen). This is possible, because the
* thread calling Join() still has a reference to the first
* thread's object.
*/
return ret;
}
static void ensure_synch_cs_set (MonoInternalThread *thread)
{
CRITICAL_SECTION *synch_cs;
if (thread->synch_cs != NULL) {
return;
}
synch_cs = g_new0 (CRITICAL_SECTION, 1);
InitializeCriticalSection (synch_cs);
if (InterlockedCompareExchangePointer ((gpointer *)&thread->synch_cs,
synch_cs, NULL) != NULL) {
/* Another thread must have installed this CS */
DeleteCriticalSection (synch_cs);
g_free (synch_cs);
}
}
/*
* NOTE: this function can be called also for threads different from the current one:
* make sure no code called from it will ever assume it is run on the thread that is
* getting cleaned up.
*/
static void thread_cleanup (MonoInternalThread *thread)
{
g_assert (thread != NULL);
if (thread->abort_state_handle) {
mono_gchandle_free (thread->abort_state_handle);
thread->abort_state_handle = 0;
}
thread->abort_exc = NULL;
thread->current_appcontext = NULL;
/*
* This is necessary because otherwise we might have
* cross-domain references which will not get cleaned up when
* the target domain is unloaded.
*/
if (thread->cached_culture_info) {
int i;
for (i = 0; i < NUM_CACHED_CULTURES * 2; ++i)
mono_array_set (thread->cached_culture_info, MonoObject*, i, NULL);
}
/* if the thread is not in the hash it has been removed already */
if (!handle_remove (thread)) {
/* This needs to be called even if handle_remove () fails */
if (mono_thread_cleanup_fn)
mono_thread_cleanup_fn (thread);
return;
}
mono_release_type_locks (thread);
EnterCriticalSection (thread->synch_cs);
thread->state |= ThreadState_Stopped;
thread->state &= ~ThreadState_Background;
LeaveCriticalSection (thread->synch_cs);
mono_profiler_thread_end (thread->tid);
if (thread == mono_thread_internal_current ())
mono_thread_pop_appdomain_ref ();
thread->cached_culture_info = NULL;
mono_free_static_data (thread->static_data, TRUE);
thread->static_data = NULL;
ref_stack_destroy (thread->appdomain_refs);
thread->appdomain_refs = NULL;
if (mono_thread_cleanup_fn)
mono_thread_cleanup_fn (thread);
mono_thread_small_id_free (thread->small_id);
MONO_GC_UNREGISTER_ROOT (thread->thread_pinning_ref);
thread->small_id = -2;
}
static gpointer
get_thread_static_data (MonoInternalThread *thread, guint32 offset)
{
int idx;
g_assert ((offset & 0x80000000) == 0);
offset &= 0x7fffffff;
idx = (offset >> 24) - 1;
return ((char*) thread->static_data [idx]) + (offset & 0xffffff);
}
static MonoThread**
get_current_thread_ptr_for_domain (MonoDomain *domain, MonoInternalThread *thread)
{
static MonoClassField *current_thread_field = NULL;
guint32 offset;
if (!current_thread_field) {
current_thread_field = mono_class_get_field_from_name (mono_defaults.thread_class, "current_thread");
g_assert (current_thread_field);
}
mono_class_vtable (domain, mono_defaults.thread_class);
mono_domain_lock (domain);
offset = GPOINTER_TO_UINT (g_hash_table_lookup (domain->special_static_fields, current_thread_field));
mono_domain_unlock (domain);
g_assert (offset);
return get_thread_static_data (thread, offset);
}
static void
set_current_thread_for_domain (MonoDomain *domain, MonoInternalThread *thread, MonoThread *current)
{
MonoThread **current_thread_ptr = get_current_thread_ptr_for_domain (domain, thread);
g_assert (current->obj.vtable->domain == domain);
g_assert (!*current_thread_ptr);
*current_thread_ptr = current;
}
static MonoInternalThread*
create_internal_thread_object (void)
{
MonoVTable *vt = mono_class_vtable (mono_get_root_domain (), mono_defaults.internal_thread_class);
return (MonoInternalThread*)mono_gc_alloc_mature (vt);
}
static MonoThread*
create_thread_object (MonoDomain *domain)
{
MonoVTable *vt = mono_class_vtable (domain, mono_defaults.thread_class);
return (MonoThread*)mono_gc_alloc_mature (vt);
}
static MonoThread*
new_thread_with_internal (MonoDomain *domain, MonoInternalThread *internal)
{
MonoThread *thread = create_thread_object (domain);
MONO_OBJECT_SETREF (thread, internal_thread, internal);
return thread;
}
static void
init_root_domain_thread (MonoInternalThread *thread, MonoThread *candidate)
{
MonoDomain *domain = mono_get_root_domain ();
if (!candidate || candidate->obj.vtable->domain != domain)
candidate = new_thread_with_internal (domain, thread);
set_current_thread_for_domain (domain, thread, candidate);
g_assert (!thread->root_domain_thread);
MONO_OBJECT_SETREF (thread, root_domain_thread, candidate);
}
static guint32 WINAPI start_wrapper_internal(void *data)
{
struct StartInfo *start_info=(struct StartInfo *)data;
guint32 (*start_func)(void *);
void *start_arg;
gsize tid;
/*
* We don't create a local to hold start_info->obj, so hopefully it won't get pinned during a
* GC stack walk.
*/
MonoInternalThread *internal = start_info->obj->internal_thread;
MonoObject *start_delegate = start_info->delegate;
MonoDomain *domain = start_info->obj->obj.vtable->domain;
THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Start wrapper", __func__, GetCurrentThreadId ()));
/* We can be sure start_info->obj->tid and
* start_info->obj->handle have been set, because the thread
* was created suspended, and these values were set before the
* thread resumed
*/
tid=internal->tid;
SET_CURRENT_OBJECT (internal);
mono_monitor_init_tls ();
/* Every thread references the appdomain which created it */
mono_thread_push_appdomain_ref (domain);
if (!mono_domain_set (domain, FALSE)) {
/* No point in raising an appdomain_unloaded exception here */
/* FIXME: Cleanup here */
mono_thread_pop_appdomain_ref ();
return 0;
}
start_func = start_info->func;
start_arg = start_info->start_arg;
/* We have to do this here because mono_thread_new_init()
requires that root_domain_thread is set up. */
thread_adjust_static_data (internal);
init_root_domain_thread (internal, start_info->obj);
/* This MUST be called before any managed code can be
* executed, as it calls the callback function that (for the
* jit) sets the lmf marker.
*/
mono_thread_new_init (tid, &tid, start_func);
internal->stack_ptr = &tid;
LIBGC_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT",%d) Setting thread stack to %p", __func__, GetCurrentThreadId (), getpid (), thread->stack_ptr));
THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Setting current_object_key to %p", __func__, GetCurrentThreadId (), internal));
/* On 2.0 profile (and higher), set explicitly since state might have been
Unknown */
if (internal->apartment_state == ThreadApartmentState_Unknown)
internal->apartment_state = ThreadApartmentState_MTA;
mono_thread_init_apartment_state ();
if(internal->start_notify!=NULL) {
/* Let the thread that called Start() know we're
* ready
*/
ReleaseSemaphore (internal->start_notify, 1, NULL);
}
mono_threads_lock ();
mono_g_hash_table_remove (thread_start_args, start_info->obj);
mono_threads_unlock ();
mono_thread_set_execution_context (start_info->obj->ec_to_set);
start_info->obj->ec_to_set = NULL;
g_free (start_info);
THREAD_DEBUG (g_message ("%s: start_wrapper for %"G_GSIZE_FORMAT, __func__,
internal->tid));
/*
* Call this after calling start_notify, since the profiler callback might want
* to lock the thread, and the lock is held by thread_start () which waits for
* start_notify.
*/
mono_profiler_thread_start (tid);
/* start_func is set only for unmanaged start functions */
if (start_func) {
start_func (start_arg);
} else {
void *args [1];
g_assert (start_delegate != NULL);
args [0] = start_arg;
/* we may want to handle the exception here. See comment below on unhandled exceptions */
mono_runtime_delegate_invoke (start_delegate, args, NULL);
}
/* If the thread calls ExitThread at all, this remaining code
* will not be executed, but the main thread will eventually
* call thread_cleanup() on this thread's behalf.
*/
THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Start wrapper terminating", __func__, GetCurrentThreadId ()));
thread_cleanup (internal);
/* Do any cleanup needed for apartment state. This
* cannot be done in thread_cleanup since thread_cleanup could be
* called for a thread other than the current thread.
* mono_thread_cleanup_apartment_state cleans up apartment
* for the current thead */
mono_thread_cleanup_apartment_state ();
/* Remove the reference to the thread object in the TLS data,
* so the thread object can be finalized. This won't be
* reached if the thread threw an uncaught exception, so those
* thread handles will stay referenced :-( (This is due to
* missing support for scanning thread-specific data in the
* Boehm GC - the io-layer keeps a GC-visible hash of pointers
* to TLS data.)
*/
SET_CURRENT_OBJECT (NULL);
mono_domain_unset ();
return(0);
}
static guint32 WINAPI start_wrapper(void *data)
{
#ifdef HAVE_SGEN_GC
volatile int dummy;
/* Avoid scanning the frames above this frame during a GC */
mono_gc_set_stack_end ((void*)&dummy);
#endif
return start_wrapper_internal (data);
}
void mono_thread_new_init (intptr_t tid, gpointer stack_start, gpointer func)
{
if (mono_thread_start_cb) {
mono_thread_start_cb (tid, stack_start, func);
}
}
void mono_threads_set_default_stacksize (guint32 stacksize)
{
default_stacksize = stacksize;
}
guint32 mono_threads_get_default_stacksize (void)
{
return default_stacksize;
}
/*
* mono_create_thread:
*
* This is a wrapper around CreateThread which handles differences in the type of
* the the 'tid' argument.
*/
gpointer mono_create_thread (WapiSecurityAttributes *security,
guint32 stacksize, WapiThreadStart start,
gpointer param, guint32 create, gsize *tid)
{
gpointer res;
#ifdef HOST_WIN32
DWORD real_tid;
res = CreateThread (security, stacksize, start, param, create, &real_tid);
if (tid)
*tid = real_tid;
#else
res = CreateThread (security, stacksize, start, param, create, tid);
#endif
return res;
}
/*
* The thread start argument may be an object reference, and there is
* no ref to keep it alive when the new thread is started but not yet
* registered with the collector. So we store it in a GC tracked hash
* table.
*
* LOCKING: Assumes the threads lock is held.
*/
static void
register_thread_start_argument (MonoThread *thread, struct StartInfo *start_info)
{
if (thread_start_args == NULL) {
MONO_GC_REGISTER_ROOT_FIXED (thread_start_args);
thread_start_args = mono_g_hash_table_new (NULL, NULL);
}
mono_g_hash_table_insert (thread_start_args, thread, start_info->start_arg);
}
MonoInternalThread* mono_thread_create_internal (MonoDomain *domain, gpointer func, gpointer arg, gboolean threadpool_thread, guint32 stack_size)
{
MonoThread *thread;
MonoInternalThread *internal;
HANDLE thread_handle;
struct StartInfo *start_info;
gsize tid;
thread = create_thread_object (domain);
internal = create_internal_thread_object ();
MONO_OBJECT_SETREF (thread, internal_thread, internal);
start_info=g_new0 (struct StartInfo, 1);
start_info->func = func;
start_info->obj = thread;
start_info->start_arg = arg;
mono_threads_lock ();
if (shutting_down) {
mono_threads_unlock ();
g_free (start_info);
return NULL;
}
if (threads_starting_up == NULL) {
MONO_GC_REGISTER_ROOT_FIXED (threads_starting_up);
threads_starting_up = mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_KEY_VALUE_GC);
}
register_thread_start_argument (thread, start_info);
mono_g_hash_table_insert (threads_starting_up, thread, thread);
mono_threads_unlock ();
if (stack_size == 0)
stack_size = default_stacksize_for_thread (internal);
/* Create suspended, so we can do some housekeeping before the thread
* starts
*/
thread_handle = mono_create_thread (NULL, stack_size, (LPTHREAD_START_ROUTINE)start_wrapper, start_info,
CREATE_SUSPENDED, &tid);
THREAD_DEBUG (g_message ("%s: Started thread ID %"G_GSIZE_FORMAT" (handle %p)", __func__, tid, thread_handle));
if (thread_handle == NULL) {
/* The thread couldn't be created, so throw an exception */
mono_threads_lock ();
mono_g_hash_table_remove (threads_starting_up, thread);
mono_threads_unlock ();
g_free (start_info);
mono_raise_exception (mono_get_exception_execution_engine ("Couldn't create thread"));
return NULL;
}
internal->handle=thread_handle;
internal->tid=tid;
internal->apartment_state=ThreadApartmentState_Unknown;
internal->small_id = mono_thread_small_id_alloc ();
internal->thread_pinning_ref = internal;
MONO_GC_REGISTER_ROOT (internal->thread_pinning_ref);
internal->synch_cs = g_new0 (CRITICAL_SECTION, 1);
InitializeCriticalSection (internal->synch_cs);
internal->threadpool_thread = threadpool_thread;
if (threadpool_thread)
mono_thread_set_state (internal, ThreadState_Background);
if (handle_store (thread))
ResumeThread (thread_handle);
return internal;
}
void
mono_thread_create (MonoDomain *domain, gpointer func, gpointer arg)
{
mono_thread_create_internal (domain, func, arg, FALSE, 0);
}
/*
* mono_thread_get_stack_bounds:
*
* Return the address and size of the current threads stack. Return NULL as the
* stack address if the stack address cannot be determined.
*/
void
mono_thread_get_stack_bounds (guint8 **staddr, size_t *stsize)
{
#if defined(HAVE_PTHREAD_GET_STACKSIZE_NP) && defined(HAVE_PTHREAD_GET_STACKADDR_NP)
*staddr = (guint8*)pthread_get_stackaddr_np (pthread_self ());
*stsize = pthread_get_stacksize_np (pthread_self ());
*staddr = (guint8*)((gssize)*staddr & ~(mono_pagesize () - 1));
return;
/* FIXME: simplify the mess below */
#elif !defined(HOST_WIN32)
pthread_attr_t attr;
guint8 *current = (guint8*)&attr;
pthread_attr_init (&attr);
# ifdef HAVE_PTHREAD_GETATTR_NP
pthread_getattr_np (pthread_self(), &attr);
# else
# ifdef HAVE_PTHREAD_ATTR_GET_NP
pthread_attr_get_np (pthread_self(), &attr);
# elif defined(sun)
*staddr = NULL;
pthread_attr_getstacksize (&attr, &stsize);
# elif defined(__OpenBSD__)
stack_t ss;
int rslt;
rslt = pthread_stackseg_np(pthread_self(), &ss);
g_assert (rslt == 0);
*staddr = (guint8*)((size_t)ss.ss_sp - ss.ss_size);
*stsize = ss.ss_size;
# else
*staddr = NULL;
*stsize = 0;
return;
# endif
# endif
# if !defined(sun)
# if !defined(__OpenBSD__)
pthread_attr_getstack (&attr, (void**)staddr, stsize);
# endif
if (*staddr)
g_assert ((current > *staddr) && (current < *staddr + *stsize));
# endif
pthread_attr_destroy (&attr);
#else
*staddr = NULL;
*stsize = (size_t)-1;
#endif
/* When running under emacs, sometimes staddr is not aligned to a page size */
*staddr = (guint8*)((gssize)*staddr & ~(mono_pagesize () - 1));
}
MonoThread *
mono_thread_attach (MonoDomain *domain)
{
MonoInternalThread *thread;
MonoThread *current_thread;
HANDLE thread_handle;
gsize tid;
if ((thread = mono_thread_internal_current ())) {
if (domain != mono_domain_get ())
mono_domain_set (domain, TRUE);
/* Already attached */
return mono_thread_current ();
}
if (!mono_gc_register_thread (&domain)) {
g_error ("Thread %"G_GSIZE_FORMAT" calling into managed code is not registered with the GC. On UNIX, this can be fixed by #include-ing <gc.h> before <pthread.h> in the file containing the thread creation code.", GetCurrentThreadId ());
}
thread = create_internal_thread_object ();
thread_handle = GetCurrentThread ();
g_assert (thread_handle);
tid=GetCurrentThreadId ();
/*
* The handle returned by GetCurrentThread () is a pseudo handle, so it can't be used to
* refer to the thread from other threads for things like aborting.
*/
DuplicateHandle (GetCurrentProcess (), thread_handle, GetCurrentProcess (), &thread_handle,
THREAD_ALL_ACCESS, TRUE, 0);
thread->handle=thread_handle;
thread->tid=tid;
#ifdef PLATFORM_ANDROID
thread->android_tid = (gpointer) gettid ();
#endif
thread->apartment_state=ThreadApartmentState_Unknown;
thread->small_id = mono_thread_small_id_alloc ();
thread->thread_pinning_ref = thread;
MONO_GC_REGISTER_ROOT (thread->thread_pinning_ref);
thread->stack_ptr = &tid;
thread->synch_cs = g_new0 (CRITICAL_SECTION, 1);
InitializeCriticalSection (thread->synch_cs);
THREAD_DEBUG (g_message ("%s: Attached thread ID %"G_GSIZE_FORMAT" (handle %p)", __func__, tid, thread_handle));
current_thread = new_thread_with_internal (domain, thread);
if (!handle_store (current_thread)) {
/* Mono is shutting down, so just wait for the end */
for (;;)
Sleep (10000);
}
THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Setting current_object_key to %p", __func__, GetCurrentThreadId (), thread));
SET_CURRENT_OBJECT (thread);
mono_domain_set (domain, TRUE);
mono_monitor_init_tls ();
thread_adjust_static_data (thread);
init_root_domain_thread (thread, current_thread);
if (domain != mono_get_root_domain ())
set_current_thread_for_domain (domain, thread, current_thread);
if (mono_thread_attach_cb) {
guint8 *staddr;
size_t stsize;
mono_thread_get_stack_bounds (&staddr, &stsize);
if (staddr == NULL)
mono_thread_attach_cb (tid, &tid);
else
mono_thread_attach_cb (tid, staddr + stsize);
}
// FIXME: Need a separate callback
mono_profiler_thread_start (tid);
return current_thread;
}
void
mono_thread_detach (MonoThread *thread)
{
g_return_if_fail (thread != NULL);
THREAD_DEBUG (g_message ("%s: mono_thread_detach for %p (%"G_GSIZE_FORMAT")", __func__, thread, (gsize)thread->internal_thread->tid));
thread_cleanup (thread->internal_thread);
SET_CURRENT_OBJECT (NULL);
mono_domain_unset ();
/* Don't need to CloseHandle this thread, even though we took a
* reference in mono_thread_attach (), because the GC will do it
* when the Thread object is finalised.
*/
}
void
mono_thread_exit ()
{
MonoInternalThread *thread = mono_thread_internal_current ();
THREAD_DEBUG (g_message ("%s: mono_thread_exit for %p (%"G_GSIZE_FORMAT")", __func__, thread, (gsize)thread->tid));
thread_cleanup (thread);
SET_CURRENT_OBJECT (NULL);
mono_domain_unset ();
/* we could add a callback here for embedders to use. */
if (mono_thread_get_main () && (thread == mono_thread_get_main ()->internal_thread))
exit (mono_environment_exitcode_get ());
ExitThread (-1);
}
void
ves_icall_System_Threading_Thread_ConstructInternalThread (MonoThread *this)
{
MonoInternalThread *internal = create_internal_thread_object ();
internal->state = ThreadState_Unstarted;
internal->apartment_state = ThreadApartmentState_Unknown;
InterlockedCompareExchangePointer ((gpointer)&this->internal_thread, internal, NULL);
}
HANDLE ves_icall_System_Threading_Thread_Thread_internal(MonoThread *this,
MonoObject *start)
{
guint32 (*start_func)(void *);
struct StartInfo *start_info;
HANDLE thread;
gsize tid;
MonoInternalThread *internal;
THREAD_DEBUG (g_message("%s: Trying to start a new thread: this (%p) start (%p)", __func__, this, start));
if (!this->internal_thread)
ves_icall_System_Threading_Thread_ConstructInternalThread (this);
internal = this->internal_thread;
ensure_synch_cs_set (internal);
EnterCriticalSection (internal->synch_cs);
if ((internal->state & ThreadState_Unstarted) == 0) {
LeaveCriticalSection (internal->synch_cs);
mono_raise_exception (mono_get_exception_thread_state ("Thread has already been started."));
return NULL;
}
internal->small_id = -1;
if ((internal->state & ThreadState_Aborted) != 0) {
LeaveCriticalSection (internal->synch_cs);
return this;
}
start_func = NULL;
{
/* This is freed in start_wrapper */
start_info = g_new0 (struct StartInfo, 1);
start_info->func = start_func;
start_info->start_arg = this->start_obj; /* FIXME: GC object stored in unmanaged memory */
start_info->delegate = start;
start_info->obj = this;
g_assert (this->obj.vtable->domain == mono_domain_get ());
internal->start_notify=CreateSemaphore (NULL, 0, 0x7fffffff, NULL);
if (internal->start_notify==NULL) {
LeaveCriticalSection (internal->synch_cs);
g_warning ("%s: CreateSemaphore error 0x%x", __func__, GetLastError ());
g_free (start_info);
return(NULL);
}
mono_threads_lock ();
register_thread_start_argument (this, start_info);
if (threads_starting_up == NULL) {
MONO_GC_REGISTER_ROOT_FIXED (threads_starting_up);
threads_starting_up = mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_KEY_VALUE_GC);
}
mono_g_hash_table_insert (threads_starting_up, this, this);
mono_threads_unlock ();
thread=mono_create_thread(NULL, default_stacksize_for_thread (internal), (LPTHREAD_START_ROUTINE)start_wrapper, start_info,
CREATE_SUSPENDED, &tid);
if(thread==NULL) {
LeaveCriticalSection (internal->synch_cs);
mono_threads_lock ();
mono_g_hash_table_remove (threads_starting_up, this);
mono_threads_unlock ();
g_warning("%s: CreateThread error 0x%x", __func__, GetLastError());
return(NULL);
}
internal->handle=thread;
internal->tid=tid;
internal->small_id = mono_thread_small_id_alloc ();
internal->thread_pinning_ref = internal;
MONO_GC_REGISTER_ROOT (internal->thread_pinning_ref);
/* Don't call handle_store() here, delay it to Start.
* We can't join a thread (trying to will just block
* forever) until it actually starts running, so don't
* store the handle till then.
*/
mono_thread_start (this);
internal->state &= ~ThreadState_Unstarted;
THREAD_DEBUG (g_message ("%s: Started thread ID %"G_GSIZE_FORMAT" (handle %p)", __func__, tid, thread));
LeaveCriticalSection (internal->synch_cs);
return(thread);
}
}
void ves_icall_System_Threading_InternalThread_Thread_free_internal (MonoInternalThread *this, HANDLE thread)
{
MONO_ARCH_SAVE_REGS;
THREAD_DEBUG (g_message ("%s: Closing thread %p, handle %p", __func__, this, thread));
if (thread)
CloseHandle (thread);
if (this->synch_cs) {
DeleteCriticalSection (this->synch_cs);
g_free (this->synch_cs);
this->synch_cs = NULL;
}
g_free (this->name);
}
static void mono_thread_start (MonoThread *thread)
{
MonoInternalThread *internal = thread->internal_thread;
THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Launching thread %p (%"G_GSIZE_FORMAT")", __func__, GetCurrentThreadId (), internal, (gsize)internal->tid));
/* Only store the handle when the thread is about to be
* launched, to avoid the main thread deadlocking while trying
* to clean up a thread that will never be signalled.
*/
if (!handle_store (thread))
return;
ResumeThread (internal->handle);
if(internal->start_notify!=NULL) {
/* Wait for the thread to set up its TLS data etc, so
* theres no potential race condition if someone tries
* to look up the data believing the thread has
* started
*/
THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") waiting for thread %p (%"G_GSIZE_FORMAT") to start", __func__, GetCurrentThreadId (), internal, (gsize)internal->tid));
WaitForSingleObjectEx (internal->start_notify, INFINITE, FALSE);
CloseHandle (internal->start_notify);
internal->start_notify = NULL;
}
THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Done launching thread %p (%"G_GSIZE_FORMAT")", __func__, GetCurrentThreadId (), internal, (gsize)internal->tid));
}
void ves_icall_System_Threading_Thread_Sleep_internal(gint32 ms)
{
guint32 res;
MonoInternalThread *thread = mono_thread_internal_current ();
THREAD_DEBUG (g_message ("%s: Sleeping for %d ms", __func__, ms));
mono_thread_current_check_pending_interrupt ();
while (TRUE) {
mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
res = SleepEx(ms,TRUE);
mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
if (res == WAIT_IO_COMPLETION) { /* we might have been interrupted */
MonoException* exc = mono_thread_execute_interruption (thread);
if (exc) {
mono_raise_exception (exc);
} else {
// FIXME: !INFINITE
if (ms != INFINITE)
break;
}
} else {
break;
}
}
}
void ves_icall_System_Threading_Thread_SpinWait_nop (void)
{
}
gint32
ves_icall_System_Threading_Thread_GetDomainID (void)
{
MONO_ARCH_SAVE_REGS;
return mono_domain_get()->domain_id;
}
gboolean
ves_icall_System_Threading_Thread_Yield (void)
{
#ifdef HOST_WIN32
return SwitchToThread ();
#else
return sched_yield () == 0;
#endif
}
/*
* mono_thread_get_name:
*
* Return the name of the thread. NAME_LEN is set to the length of the name.
* Return NULL if the thread has no name. The returned memory is owned by the
* caller.
*/
gunichar2*
mono_thread_get_name (MonoInternalThread *this_obj, guint32 *name_len)
{
gunichar2 *res;
ensure_synch_cs_set (this_obj);
EnterCriticalSection (this_obj->synch_cs);
if (!this_obj->name) {
*name_len = 0;
res = NULL;
} else {
*name_len = this_obj->name_len;
res = g_new (gunichar2, this_obj->name_len);
memcpy (res, this_obj->name, sizeof (gunichar2) * this_obj->name_len);
}
LeaveCriticalSection (this_obj->synch_cs);
return res;
}
MonoString*
ves_icall_System_Threading_Thread_GetName_internal (MonoInternalThread *this_obj)
{
MonoString* str;
ensure_synch_cs_set (this_obj);
EnterCriticalSection (this_obj->synch_cs);
if (!this_obj->name)
str = NULL;
else
str = mono_string_new_utf16 (mono_domain_get (), this_obj->name, this_obj->name_len);
LeaveCriticalSection (this_obj->synch_cs);
return str;
}
void
ves_icall_System_Threading_Thread_SetName_internal (MonoInternalThread *this_obj, MonoString *name)
{
ensure_synch_cs_set (this_obj);
EnterCriticalSection (this_obj->synch_cs);
if (this_obj->name) {
LeaveCriticalSection (this_obj->synch_cs);
mono_raise_exception (mono_get_exception_invalid_operation ("Thread.Name can only be set once."));
return;
}
if (name) {
this_obj->name = g_new (gunichar2, mono_string_length (name));
memcpy (this_obj->name, mono_string_chars (name), mono_string_length (name) * 2);
this_obj->name_len = mono_string_length (name);
}
else
this_obj->name = NULL;
LeaveCriticalSection (this_obj->synch_cs);
if (this_obj->name) {
char *tname = mono_string_to_utf8 (name);
mono_profiler_thread_name (this_obj->tid, tname);
mono_free (tname);
}
}
/* If the array is already in the requested domain, we just return it,
otherwise we return a copy in that domain. */
static MonoArray*
byte_array_to_domain (MonoArray *arr, MonoDomain *domain)
{
MonoArray *copy;
if (!arr)
return NULL;
if (mono_object_domain (arr) == domain)
return arr;
copy = mono_array_new (domain, mono_defaults.byte_class, arr->max_length);
memcpy (mono_array_addr (copy, guint8, 0), mono_array_addr (arr, guint8, 0), arr->max_length);
return copy;
}
MonoArray*
ves_icall_System_Threading_Thread_ByteArrayToRootDomain (MonoArray *arr)
{
return byte_array_to_domain (arr, mono_get_root_domain ());
}
MonoArray*
ves_icall_System_Threading_Thread_ByteArrayToCurrentDomain (MonoArray *arr)
{
return byte_array_to_domain (arr, mono_domain_get ());
}
MonoThread *
mono_thread_current (void)
{
MonoDomain *domain = mono_domain_get ();
MonoInternalThread *internal = mono_thread_internal_current ();
MonoThread **current_thread_ptr;
g_assert (internal);
current_thread_ptr = get_current_thread_ptr_for_domain (domain, internal);
if (!*current_thread_ptr) {
g_assert (domain != mono_get_root_domain ());
*current_thread_ptr = new_thread_with_internal (domain, internal);
}
return *current_thread_ptr;
}
MonoInternalThread*
mono_thread_internal_current (void)
{
MonoInternalThread *res = GET_CURRENT_OBJECT ();
THREAD_DEBUG (g_message ("%s: returning %p", __func__, res));
return res;
}
gboolean ves_icall_System_Threading_Thread_Join_internal(MonoInternalThread *this,
int ms, HANDLE thread)
{
MonoInternalThread *cur_thread = mono_thread_internal_current ();
gboolean ret;
mono_thread_current_check_pending_interrupt ();
ensure_synch_cs_set (this);
EnterCriticalSection (this->synch_cs);
if ((this->state & ThreadState_Unstarted) != 0) {
LeaveCriticalSection (this->synch_cs);
mono_raise_exception (mono_get_exception_thread_state ("Thread has not been started."));
return FALSE;
}
LeaveCriticalSection (this->synch_cs);
if(ms== -1) {
ms=INFINITE;
}
THREAD_DEBUG (g_message ("%s: joining thread handle %p, %d ms", __func__, thread, ms));
mono_thread_set_state (cur_thread, ThreadState_WaitSleepJoin);
ret=WaitForSingleObjectEx (thread, ms, TRUE);
mono_thread_clr_state (cur_thread, ThreadState_WaitSleepJoin);
if(ret==WAIT_OBJECT_0) {
THREAD_DEBUG (g_message ("%s: join successful", __func__));
return(TRUE);
}
THREAD_DEBUG (g_message ("%s: join failed", __func__));
return(FALSE);
}
/* FIXME: exitContext isnt documented */
gboolean ves_icall_System_Threading_WaitHandle_WaitAll_internal(MonoArray *mono_handles, gint32 ms, gboolean exitContext)
{
HANDLE *handles;
guint32 numhandles;
guint32 ret;
guint32 i;
MonoObject *waitHandle;
MonoInternalThread *thread = mono_thread_internal_current ();
/* Do this WaitSleepJoin check before creating objects */
mono_thread_current_check_pending_interrupt ();
numhandles = mono_array_length(mono_handles);
handles = g_new0(HANDLE, numhandles);
for(i = 0; i < numhandles; i++) {
waitHandle = mono_array_get(mono_handles, MonoObject*, i);
handles [i] = mono_wait_handle_get_handle ((MonoWaitHandle *) waitHandle);
}
if(ms== -1) {
ms=INFINITE;
}
mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
ret=WaitForMultipleObjectsEx(numhandles, handles, TRUE, ms, TRUE);
mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
g_free(handles);
if(ret==WAIT_FAILED) {
THREAD_WAIT_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Wait failed", __func__, GetCurrentThreadId ()));
return(FALSE);
} else if(ret==WAIT_TIMEOUT || ret == WAIT_IO_COMPLETION) {
/* Do we want to try again if we get
* WAIT_IO_COMPLETION? The documentation for
* WaitHandle doesn't give any clues. (We'd have to
* fiddle with the timeout if we retry.)
*/
THREAD_WAIT_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Wait timed out", __func__, GetCurrentThreadId ()));
return(FALSE);
}
return(TRUE);
}
/* FIXME: exitContext isnt documented */
gint32 ves_icall_System_Threading_WaitHandle_WaitAny_internal(MonoArray *mono_handles, gint32 ms, gboolean exitContext)
{
HANDLE handles [MAXIMUM_WAIT_OBJECTS];
guint32 numhandles;
guint32 ret;
guint32 i;
MonoObject *waitHandle;
MonoInternalThread *thread = mono_thread_internal_current ();
guint32 start;
/* Do this WaitSleepJoin check before creating objects */
mono_thread_current_check_pending_interrupt ();
numhandles = mono_array_length(mono_handles);
if (numhandles > MAXIMUM_WAIT_OBJECTS)
return WAIT_FAILED;
for(i = 0; i < numhandles; i++) {
waitHandle = mono_array_get(mono_handles, MonoObject*, i);
handles [i] = mono_wait_handle_get_handle ((MonoWaitHandle *) waitHandle);
}
if(ms== -1) {
ms=INFINITE;
}
mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
start = (ms == -1) ? 0 : mono_msec_ticks ();
do {
ret = WaitForMultipleObjectsEx (numhandles, handles, FALSE, ms, TRUE);
if (ret != WAIT_IO_COMPLETION)
break;
if (ms != -1) {
guint32 diff;
diff = mono_msec_ticks () - start;
ms -= diff;
if (ms <= 0)
break;
}
} while (ms == -1 || ms > 0);
mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
THREAD_WAIT_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") returning %d", __func__, GetCurrentThreadId (), ret));
/*
* These need to be here. See MSDN dos on WaitForMultipleObjects.
*/
if (ret >= WAIT_OBJECT_0 && ret <= WAIT_OBJECT_0 + numhandles - 1) {
return ret - WAIT_OBJECT_0;
}
else if (ret >= WAIT_ABANDONED_0 && ret <= WAIT_ABANDONED_0 + numhandles - 1) {
return ret - WAIT_ABANDONED_0;
}
else {
return ret;
}
}
/* FIXME: exitContext isnt documented */
gboolean ves_icall_System_Threading_WaitHandle_WaitOne_internal(MonoObject *this, HANDLE handle, gint32 ms, gboolean exitContext)
{
guint32 ret;
MonoInternalThread *thread = mono_thread_internal_current ();
THREAD_WAIT_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") waiting for %p, %d ms", __func__, GetCurrentThreadId (), handle, ms));
if(ms== -1) {
ms=INFINITE;
}
mono_thread_current_check_pending_interrupt ();
mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
ret=WaitForSingleObjectEx (handle, ms, TRUE);
mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
if(ret==WAIT_FAILED) {
THREAD_WAIT_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Wait failed", __func__, GetCurrentThreadId ()));
return(FALSE);
} else if(ret==WAIT_TIMEOUT || ret == WAIT_IO_COMPLETION) {
/* Do we want to try again if we get
* WAIT_IO_COMPLETION? The documentation for
* WaitHandle doesn't give any clues. (We'd have to
* fiddle with the timeout if we retry.)
*/
THREAD_WAIT_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Wait timed out", __func__, GetCurrentThreadId ()));
return(FALSE);
}
return(TRUE);
}
gboolean
ves_icall_System_Threading_WaitHandle_SignalAndWait_Internal (HANDLE toSignal, HANDLE toWait, gint32 ms, gboolean exitContext)
{
guint32 ret;
MonoInternalThread *thread = mono_thread_internal_current ();
MONO_ARCH_SAVE_REGS;
if (ms == -1)
ms = INFINITE;
mono_thread_current_check_pending_interrupt ();
mono_thread_set_state (thread, ThreadState_WaitSleepJoin);
ret = SignalObjectAndWait (toSignal, toWait, ms, TRUE);
mono_thread_clr_state (thread, ThreadState_WaitSleepJoin);
return (!(ret == WAIT_TIMEOUT || ret == WAIT_IO_COMPLETION || ret == WAIT_FAILED));
}
HANDLE ves_icall_System_Threading_Mutex_CreateMutex_internal (MonoBoolean owned, MonoString *name, MonoBoolean *created)
{
HANDLE mutex;
MONO_ARCH_SAVE_REGS;
*created = TRUE;
if (name == NULL) {
mutex = CreateMutex (NULL, owned, NULL);
} else {
mutex = CreateMutex (NULL, owned, mono_string_chars (name));
if (GetLastError () == ERROR_ALREADY_EXISTS) {
*created = FALSE;
}
}
return(mutex);
}
MonoBoolean ves_icall_System_Threading_Mutex_ReleaseMutex_internal (HANDLE handle ) {
MONO_ARCH_SAVE_REGS;
return(ReleaseMutex (handle));
}
HANDLE ves_icall_System_Threading_Mutex_OpenMutex_internal (MonoString *name,
gint32 rights,
gint32 *error)
{
HANDLE ret;
MONO_ARCH_SAVE_REGS;
*error = ERROR_SUCCESS;
ret = OpenMutex (rights, FALSE, mono_string_chars (name));
if (ret == NULL) {
*error = GetLastError ();
}
return(ret);
}
HANDLE ves_icall_System_Threading_Semaphore_CreateSemaphore_internal (gint32 initialCount, gint32 maximumCount, MonoString *name, MonoBoolean *created)
{
HANDLE sem;
MONO_ARCH_SAVE_REGS;
*created = TRUE;
if (name == NULL) {
sem = CreateSemaphore (NULL, initialCount, maximumCount, NULL);
} else {
sem = CreateSemaphore (NULL, initialCount, maximumCount,
mono_string_chars (name));
if (GetLastError () == ERROR_ALREADY_EXISTS) {
*created = FALSE;
}
}
return(sem);
}
gint32 ves_icall_System_Threading_Semaphore_ReleaseSemaphore_internal (HANDLE handle, gint32 releaseCount, MonoBoolean *fail)
{
gint32 prevcount;
MONO_ARCH_SAVE_REGS;
*fail = !ReleaseSemaphore (handle, releaseCount, &prevcount);
return (prevcount);
}
HANDLE ves_icall_System_Threading_Semaphore_OpenSemaphore_internal (MonoString *name, gint32 rights, gint32 *error)
{
HANDLE ret;
MONO_ARCH_SAVE_REGS;
*error = ERROR_SUCCESS;
ret = OpenSemaphore (rights, FALSE, mono_string_chars (name));
if (ret == NULL) {
*error = GetLastError ();
}
return(ret);
}
HANDLE ves_icall_System_Threading_Events_CreateEvent_internal (MonoBoolean manual, MonoBoolean initial, MonoString *name, MonoBoolean *created)
{
HANDLE event;
MONO_ARCH_SAVE_REGS;
*created = TRUE;
if (name == NULL) {
event = CreateEvent (NULL, manual, initial, NULL);
} else {
event = CreateEvent (NULL, manual, initial,
mono_string_chars (name));
if (GetLastError () == ERROR_ALREADY_EXISTS) {
*created = FALSE;
}
}
return(event);
}
gboolean ves_icall_System_Threading_Events_SetEvent_internal (HANDLE handle) {
MONO_ARCH_SAVE_REGS;
return (SetEvent(handle));
}
gboolean ves_icall_System_Threading_Events_ResetEvent_internal (HANDLE handle) {
MONO_ARCH_SAVE_REGS;
return (ResetEvent(handle));
}
void
ves_icall_System_Threading_Events_CloseEvent_internal (HANDLE handle) {
MONO_ARCH_SAVE_REGS;
CloseHandle (handle);
}
HANDLE ves_icall_System_Threading_Events_OpenEvent_internal (MonoString *name,
gint32 rights,
gint32 *error)
{
HANDLE ret;
MONO_ARCH_SAVE_REGS;
*error = ERROR_SUCCESS;
ret = OpenEvent (rights, FALSE, mono_string_chars (name));
if (ret == NULL) {
*error = GetLastError ();
}
return(ret);
}
gint32 ves_icall_System_Threading_Interlocked_Increment_Int (gint32 *location)
{
MONO_ARCH_SAVE_REGS;
return InterlockedIncrement (location);
}
gint64 ves_icall_System_Threading_Interlocked_Increment_Long (gint64 *location)
{
gint64 ret;
MONO_ARCH_SAVE_REGS;
mono_interlocked_lock ();
ret = ++ *location;
mono_interlocked_unlock ();
return ret;
}
gint32 ves_icall_System_Threading_Interlocked_Decrement_Int (gint32 *location)
{
MONO_ARCH_SAVE_REGS;
return InterlockedDecrement(location);
}
gint64 ves_icall_System_Threading_Interlocked_Decrement_Long (gint64 * location)
{
gint64 ret;
MONO_ARCH_SAVE_REGS;
mono_interlocked_lock ();
ret = -- *location;
mono_interlocked_unlock ();
return ret;
}
gint32 ves_icall_System_Threading_Interlocked_Exchange_Int (gint32 *location, gint32 value)
{
MONO_ARCH_SAVE_REGS;
return InterlockedExchange(location, value);
}
MonoObject * ves_icall_System_Threading_Interlocked_Exchange_Object (MonoObject **location, MonoObject *value)
{
MonoObject *res;
res = (MonoObject *) InterlockedExchangePointer((gpointer *) location, value);
mono_gc_wbarrier_generic_nostore (location);
return res;
}
gpointer ves_icall_System_Threading_Interlocked_Exchange_IntPtr (gpointer *location, gpointer value)
{
return InterlockedExchangePointer(location, value);
}
gfloat ves_icall_System_Threading_Interlocked_Exchange_Single (gfloat *location, gfloat value)
{
IntFloatUnion val, ret;
MONO_ARCH_SAVE_REGS;
val.fval = value;
ret.ival = InterlockedExchange((gint32 *) location, val.ival);
return ret.fval;
}
gint64
ves_icall_System_Threading_Interlocked_Exchange_Long (gint64 *location, gint64 value)
{
#if SIZEOF_VOID_P == 8
return (gint64) InterlockedExchangePointer((gpointer *) location, (gpointer)value);
#else
gint64 res;
/*
* According to MSDN, this function is only atomic with regards to the
* other Interlocked functions on 32 bit platforms.
*/
mono_interlocked_lock ();
res = *location;
*location = value;
mono_interlocked_unlock ();
return res;
#endif
}
gdouble
ves_icall_System_Threading_Interlocked_Exchange_Double (gdouble *location, gdouble value)
{
#if SIZEOF_VOID_P == 8
LongDoubleUnion val, ret;
val.fval = value;
ret.ival = (gint64)InterlockedExchangePointer((gpointer *) location, (gpointer)val.ival);
return ret.fval;
#else
gdouble res;
/*
* According to MSDN, this function is only atomic with regards to the
* other Interlocked functions on 32 bit platforms.
*/
mono_interlocked_lock ();
res = *location;
*location = value;
mono_interlocked_unlock ();
return res;
#endif
}
gint32 ves_icall_System_Threading_Interlocked_CompareExchange_Int(gint32 *location, gint32 value, gint32 comparand)
{
MONO_ARCH_SAVE_REGS;
return InterlockedCompareExchange(location, value, comparand);
}
MonoObject * ves_icall_System_Threading_Interlocked_CompareExchange_Object (MonoObject **location, MonoObject *value, MonoObject *comparand)
{
MonoObject *res;
res = (MonoObject *) InterlockedCompareExchangePointer((gpointer *) location, value, comparand);
mono_gc_wbarrier_generic_nostore (location);
return res;
}
gpointer ves_icall_System_Threading_Interlocked_CompareExchange_IntPtr(gpointer *location, gpointer value, gpointer comparand)
{
return InterlockedCompareExchangePointer(location, value, comparand);
}
gfloat ves_icall_System_Threading_Interlocked_CompareExchange_Single (gfloat *location, gfloat value, gfloat comparand)
{
IntFloatUnion val, ret, cmp;
MONO_ARCH_SAVE_REGS;
val.fval = value;
cmp.fval = comparand;
ret.ival = InterlockedCompareExchange((gint32 *) location, val.ival, cmp.ival);
return ret.fval;
}
gdouble
ves_icall_System_Threading_Interlocked_CompareExchange_Double (gdouble *location, gdouble value, gdouble comparand)
{
#if SIZEOF_VOID_P == 8
LongDoubleUnion val, comp, ret;
val.fval = value;
comp.fval = comparand;
ret.ival = (gint64)InterlockedCompareExchangePointer((gpointer *) location, (gpointer)val.ival, (gpointer)comp.ival);
return ret.fval;
#else
gdouble old;
mono_interlocked_lock ();
old = *location;
if (old == comparand)
*location = value;
mono_interlocked_unlock ();
return old;
#endif
}
gint64
ves_icall_System_Threading_Interlocked_CompareExchange_Long (gint64 *location, gint64 value, gint64 comparand)
{
#if SIZEOF_VOID_P == 8
return (gint64)InterlockedCompareExchangePointer((gpointer *) location, (gpointer)value, (gpointer)comparand);
#else
gint64 old;
mono_interlocked_lock ();
old = *location;
if (old == comparand)
*location = value;
mono_interlocked_unlock ();
return old;
#endif
}
MonoObject*
ves_icall_System_Threading_Interlocked_CompareExchange_T (MonoObject **location, MonoObject *value, MonoObject *comparand)
{
MonoObject *res;
res = InterlockedCompareExchangePointer ((gpointer *)location, value, comparand);
mono_gc_wbarrier_generic_nostore (location);
return res;
}
MonoObject*
ves_icall_System_Threading_Interlocked_Exchange_T (MonoObject **location, MonoObject *value)
{
MonoObject *res;
res = InterlockedExchangePointer ((gpointer *)location, value);
mono_gc_wbarrier_generic_nostore (location);
return res;
}
gint32
ves_icall_System_Threading_Interlocked_Add_Int (gint32 *location, gint32 value)
{
#if SIZEOF_VOID_P == 8
/* Should be implemented as a JIT intrinsic */
mono_raise_exception (mono_get_exception_not_implemented (NULL));
return 0;
#else
gint32 orig;
mono_interlocked_lock ();
orig = *location;
*location = orig + value;
mono_interlocked_unlock ();
return orig + value;
#endif
}
gint64
ves_icall_System_Threading_Interlocked_Add_Long (gint64 *location, gint64 value)
{
#if SIZEOF_VOID_P == 8
/* Should be implemented as a JIT intrinsic */
mono_raise_exception (mono_get_exception_not_implemented (NULL));
return 0;
#else
gint64 orig;
mono_interlocked_lock ();
orig = *location;
*location = orig + value;
mono_interlocked_unlock ();
return orig + value;
#endif
}
gint64
ves_icall_System_Threading_Interlocked_Read_Long (gint64 *location)
{
#if SIZEOF_VOID_P == 8
/* 64 bit reads are already atomic */
return *location;
#else
gint64 res;
mono_interlocked_lock ();
res = *location;
mono_interlocked_unlock ();
return res;
#endif
}
void
ves_icall_System_Threading_Thread_MemoryBarrier (void)
{
mono_threads_lock ();
mono_threads_unlock ();
}
void
ves_icall_System_Threading_Thread_ClrState (MonoInternalThread* this, guint32 state)
{
mono_thread_clr_state (this, state);
if (state & ThreadState_Background) {
/* If the thread changes the background mode, the main thread has to
* be notified, since it has to rebuild the list of threads to
* wait for.
*/
SetEvent (background_change_event);
}
}
void
ves_icall_System_Threading_Thread_SetState (MonoInternalThread* this, guint32 state)
{
mono_thread_set_state (this, state);
if (state & ThreadState_Background) {
/* If the thread changes the background mode, the main thread has to
* be notified, since it has to rebuild the list of threads to
* wait for.
*/
SetEvent (background_change_event);
}
}
guint32
ves_icall_System_Threading_Thread_GetState (MonoInternalThread* this)
{
guint32 state;
ensure_synch_cs_set (this);
EnterCriticalSection (this->synch_cs);
state = this->state;
LeaveCriticalSection (this->synch_cs);
return state;
}
void ves_icall_System_Threading_Thread_Interrupt_internal (MonoInternalThread *this)
{
gboolean throw = FALSE;
ensure_synch_cs_set (this);
if (this == mono_thread_internal_current ())
return;
EnterCriticalSection (this->synch_cs);
this->thread_interrupt_requested = TRUE;
if (this->state & ThreadState_WaitSleepJoin) {
throw = TRUE;
}
LeaveCriticalSection (this->synch_cs);
if (throw) {
signal_thread_state_change (this);
}
}
void mono_thread_current_check_pending_interrupt ()
{
MonoInternalThread *thread = mono_thread_internal_current ();
gboolean throw = FALSE;
mono_debugger_check_interruption ();
ensure_synch_cs_set (thread);
EnterCriticalSection (thread->synch_cs);
if (thread->thread_interrupt_requested) {
throw = TRUE;
thread->thread_interrupt_requested = FALSE;
}
LeaveCriticalSection (thread->synch_cs);
if (throw) {
mono_raise_exception (mono_get_exception_thread_interrupted ());
}
}
int
mono_thread_get_abort_signal (void)
{
#ifdef HOST_WIN32
return -1;
#else
#ifndef SIGRTMIN
#ifdef SIGUSR1
return SIGUSR1;
#else
return -1;
#endif
#else
static int abort_signum = -1;
int i;
if (abort_signum != -1)
return abort_signum;
/* we try to avoid SIGRTMIN and any one that might have been set already, see bug #75387 */
for (i = SIGRTMIN + 1; i < SIGRTMAX; ++i) {
struct sigaction sinfo;
sigaction (i, NULL, &sinfo);
if (sinfo.sa_handler == SIG_DFL && (void*)sinfo.sa_sigaction == (void*)SIG_DFL) {
abort_signum = i;
return i;
}
}
/* fallback to the old way */
return SIGRTMIN;
#endif
#endif /* HOST_WIN32 */
}
#ifdef HOST_WIN32
static void CALLBACK interruption_request_apc (ULONG_PTR param)
{
MonoException* exc = mono_thread_request_interruption (FALSE);
if (exc) mono_raise_exception (exc);
}
#endif /* HOST_WIN32 */
/*
* signal_thread_state_change
*
* Tells the thread that his state has changed and it has to enter the new
* state as soon as possible.
*/
static void signal_thread_state_change (MonoInternalThread *thread)
{
if (thread == mono_thread_internal_current ()) {
/* Do it synchronously */
MonoException *exc = mono_thread_request_interruption (FALSE);
if (exc)
mono_raise_exception (exc);
}
#ifdef HOST_WIN32
QueueUserAPC ((PAPCFUNC)interruption_request_apc, thread->handle, NULL);
#else
/* fixme: store the state somewhere */
mono_thread_kill (thread, mono_thread_get_abort_signal ());
/*
* This will cause waits to be broken.
* It will also prevent the thread from entering a wait, so if the thread returns
* from the wait before it receives the abort signal, it will just spin in the wait
* functions in the io-layer until the signal handler calls QueueUserAPC which will
* make it return.
*/
wapi_interrupt_thread (thread->handle);
#endif /* HOST_WIN32 */
}
void
ves_icall_System_Threading_Thread_Abort (MonoInternalThread *thread, MonoObject *state)
{
ensure_synch_cs_set (thread);
EnterCriticalSection (thread->synch_cs);
if ((thread->state & ThreadState_AbortRequested) != 0 ||
(thread->state & ThreadState_StopRequested) != 0 ||
(thread->state & ThreadState_Stopped) != 0)
{
LeaveCriticalSection (thread->synch_cs);
return;
}
if ((thread->state & ThreadState_Unstarted) != 0) {
thread->state |= ThreadState_Aborted;
LeaveCriticalSection (thread->synch_cs);
return;
}
thread->state |= ThreadState_AbortRequested;
if (thread->abort_state_handle)
mono_gchandle_free (thread->abort_state_handle);
if (state) {
thread->abort_state_handle = mono_gchandle_new (state, FALSE);
g_assert (thread->abort_state_handle);
} else {
thread->abort_state_handle = 0;
}
thread->abort_exc = NULL;
/*
* abort_exc is set in mono_thread_execute_interruption(),
* triggered by the call to signal_thread_state_change(),
* below. There's a point between where we have
* abort_state_handle set, but abort_exc NULL, but that's not
* a problem.
*/
LeaveCriticalSection (thread->synch_cs);
THREAD_DEBUG (g_message ("%s: (%"G_GSIZE_FORMAT") Abort requested for %p (%"G_GSIZE_FORMAT")", __func__, GetCurrentThreadId (), thread, (gsize)thread->tid));
/* During shutdown, we can't wait for other threads */
if (!shutting_down)
/* Make sure the thread is awake */
mono_thread_resume (thread);
signal_thread_state_change (thread);
}
void
ves_icall_System_Threading_Thread_ResetAbort (void)
{
MonoInternalThread *thread = mono_thread_internal_current ();
gboolean was_aborting;
ensure_synch_cs_set (thread);
EnterCriticalSection (thread->synch_cs);
was_aborting = thread->state & ThreadState_AbortRequested;
thread->state &= ~ThreadState_AbortRequested;
LeaveCriticalSection (thread->synch_cs);
if (!was_aborting) {
const char *msg = "Unable to reset abort because no abort was requested";
mono_raise_exception (mono_get_exception_thread_state (msg));
}
thread->abort_exc = NULL;
if (thread->abort_state_handle) {
mono_gchandle_free (thread->abort_state_handle);
/* This is actually not necessary - the handle
only counts if the exception is set */
thread->abort_state_handle = 0;
}
}
void
mono_thread_internal_reset_abort (MonoInternalThread *thread)
{
ensure_synch_cs_set (thread);
EnterCriticalSection (thread->synch_cs);
thread->state &= ~ThreadState_AbortRequested;
if (thread->abort_exc) {
thread->abort_exc = NULL;
if (thread->abort_state_handle) {
mono_gchandle_free (thread->abort_state_handle);
/* This is actually not necessary - the handle
only counts if the exception is set */
thread->abort_state_handle = 0;
}
}
LeaveCriticalSection (thread->synch_cs);
}
MonoObject*
ves_icall_System_Threading_Thread_GetAbortExceptionState (MonoThread *this)
{
MonoInternalThread *thread = this->internal_thread;
MonoObject *state, *deserialized = NULL, *exc;
MonoDomain *domain;
if (!thread->abort_state_handle)
return NULL;
state = mono_gchandle_get_target (thread->abort_state_handle);
g_assert (state);
domain = mono_domain_get ();
if (mono_object_domain (state) == domain)
return state;
deserialized = mono_object_xdomain_representation (state, domain, &exc);
if (!deserialized) {
MonoException *invalid_op_exc = mono_get_exception_invalid_operation ("Thread.ExceptionState cannot access an ExceptionState from a different AppDomain");
if (exc)
MONO_OBJECT_SETREF (invalid_op_exc, inner_ex, exc);
mono_raise_exception (invalid_op_exc);
}
return deserialized;
}
static gboolean
mono_thread_suspend (MonoInternalThread *thread)
{
ensure_synch_cs_set (thread);
EnterCriticalSection (thread->synch_cs);
if ((thread->state & ThreadState_Unstarted) != 0 ||
(thread->state & ThreadState_Aborted) != 0 ||
(thread->state & ThreadState_Stopped) != 0)
{
LeaveCriticalSection (thread->synch_cs);
return FALSE;
}
if ((thread->state & ThreadState_Suspended) != 0 ||
(thread->state & ThreadState_SuspendRequested) != 0 ||
(thread->state & ThreadState_StopRequested) != 0)
{
LeaveCriticalSection (thread->synch_cs);
return TRUE;
}
thread->state |= ThreadState_SuspendRequested;
LeaveCriticalSection (thread->synch_cs);
signal_thread_state_change (thread);
return TRUE;
}
void
ves_icall_System_Threading_Thread_Suspend (MonoInternalThread *thread)
{
if (!mono_thread_suspend (thread))
mono_raise_exception (mono_get_exception_thread_state ("Thread has not been started, or is dead."));
}
static gboolean
mono_thread_resume (MonoInternalThread *thread)
{
ensure_synch_cs_set (thread);
EnterCriticalSection (thread->synch_cs);
if ((thread->state & ThreadState_SuspendRequested) != 0) {
thread->state &= ~ThreadState_SuspendRequested;
LeaveCriticalSection (thread->synch_cs);
return TRUE;
}
if ((thread->state & ThreadState_Suspended) == 0 ||
(thread->state & ThreadState_Unstarted) != 0 ||
(thread->state & ThreadState_Aborted) != 0 ||
(thread->state & ThreadState_Stopped) != 0)
{
LeaveCriticalSection (thread->synch_cs);
return FALSE;
}
thread->resume_event = CreateEvent (NULL, TRUE, FALSE, NULL);
if (thread->resume_event == NULL) {
LeaveCriticalSection (thread->synch_cs);
return(FALSE);
}
/* Awake the thread */
SetEvent (thread->suspend_event);
LeaveCriticalSection (thread->synch_cs);
/* Wait for the thread to awake */
WaitForSingleObject (thread->resume_event, INFINITE);
CloseHandle (thread->resume_event);
thread->resume_event = NULL;
return TRUE;
}
void
ves_icall_System_Threading_Thread_Resume (MonoThread *thread)
{
if (!thread->internal_thread || !mono_thread_resume (thread->internal_thread))
mono_raise_exception (mono_get_exception_thread_state ("Thread has not been started, or is dead."));
}
static gboolean
find_wrapper (MonoMethod *m, gint no, gint ilo, gboolean managed, gpointer data)
{
if (managed)
return TRUE;
if (m->wrapper_type == MONO_WRAPPER_RUNTIME_INVOKE ||
m->wrapper_type == MONO_WRAPPER_XDOMAIN_INVOKE ||
m->wrapper_type == MONO_WRAPPER_XDOMAIN_DISPATCH)
{
*((gboolean*)data) = TRUE;
return TRUE;
}
return FALSE;
}
static gboolean
is_running_protected_wrapper (void)
{
gboolean found = FALSE;
mono_stack_walk (find_wrapper, &found);
return found;
}
void mono_thread_internal_stop (MonoInternalThread *thread)
{
ensure_synch_cs_set (thread);
EnterCriticalSection (thread->synch_cs);
if ((thread->state & ThreadState_StopRequested) != 0 ||
(thread->state & ThreadState_Stopped) != 0)
{
LeaveCriticalSection (thread->synch_cs);
return;
}
/* Make sure the thread is awake */
mono_thread_resume (thread);
thread->state |= ThreadState_StopRequested;
thread->state &= ~ThreadState_AbortRequested;
LeaveCriticalSection (thread->synch_cs);
signal_thread_state_change (thread);
}
void mono_thread_stop (MonoThread *thread)
{
mono_thread_internal_stop (thread->internal_thread);
}
gint8
ves_icall_System_Threading_Thread_VolatileRead1 (void *ptr)
{
return *((volatile gint8 *) (ptr));
}
gint16
ves_icall_System_Threading_Thread_VolatileRead2 (void *ptr)
{
return *((volatile gint16 *) (ptr));
}
gint32
ves_icall_System_Threading_Thread_VolatileRead4 (void *ptr)
{
return *((volatile gint32 *) (ptr));
}
gint64
ves_icall_System_Threading_Thread_VolatileRead8 (void *ptr)
{
return *((volatile gint64 *) (ptr));
}
void *
ves_icall_System_Threading_Thread_VolatileReadIntPtr (void *ptr)
{
return (void *) *((volatile void **) ptr);
}
void
ves_icall_System_Threading_Thread_VolatileWrite1 (void *ptr, gint8 value)
{
*((volatile gint8 *) ptr) = value;
}
void
ves_icall_System_Threading_Thread_VolatileWrite2 (void *ptr, gint16 value)
{
*((volatile gint16 *) ptr) = value;
}
void
ves_icall_System_Threading_Thread_VolatileWrite4 (void *ptr, gint32 value)
{
*((volatile gint32 *) ptr) = value;
}
void
ves_icall_System_Threading_Thread_VolatileWrite8 (void *ptr, gint64 value)
{
*((volatile gint64 *) ptr) = value;
}
void
ves_icall_System_Threading_Thread_VolatileWriteIntPtr (void *ptr, void *value)
{
*((volatile void **) ptr) = value;
}
void
ves_icall_System_Threading_Thread_VolatileWriteObject (void *ptr, void *value)
{
mono_gc_wbarrier_generic_store (ptr, value);
}
void mono_thread_init (MonoThreadStartCB start_cb,
MonoThreadAttachCB attach_cb)
{
mono_thread_smr_init ();
InitializeCriticalSection(&threads_mutex);
InitializeCriticalSection(&interlocked_mutex);
InitializeCriticalSection(&contexts_mutex);
background_change_event = CreateEvent (NULL, TRUE, FALSE, NULL);
g_assert(background_change_event != NULL);
mono_init_static_data_info (&thread_static_info);
mono_init_static_data_info (&context_static_info);
MONO_FAST_TLS_INIT (tls_current_object);
current_object_key=TlsAlloc();
THREAD_DEBUG (g_message ("%s: Allocated current_object_key %d", __func__, current_object_key));
mono_thread_start_cb = start_cb;
mono_thread_attach_cb = attach_cb;
/* Get a pseudo handle to the current process. This is just a
* kludge so that wapi can build a process handle if needed.
* As a pseudo handle is returned, we don't need to clean
* anything up.
*/
GetCurrentProcess ();
}
void mono_thread_cleanup (void)
{
#if !defined(HOST_WIN32) && !defined(RUN_IN_SUBTHREAD)
/* The main thread must abandon any held mutexes (particularly
* important for named mutexes as they are shared across
* processes, see bug 74680.) This will happen when the
* thread exits, but if it's not running in a subthread it
* won't exit in time.
*/
/* Using non-w32 API is a nasty kludge, but I couldn't find
* anything in the documentation that would let me do this
* here yet still be safe to call on windows.
*/
_wapi_thread_signal_self (mono_environment_exitcode_get ());
#endif
#if 0
/* This stuff needs more testing, it seems one of these
* critical sections can be locked when mono_thread_cleanup is
* called.
*/
DeleteCriticalSection (&threads_mutex);
DeleteCriticalSection (&interlocked_mutex);
DeleteCriticalSection (&contexts_mutex);
DeleteCriticalSection (&delayed_free_table_mutex);
DeleteCriticalSection (&small_id_mutex);
CloseHandle (background_change_event);
#endif
TlsFree (current_object_key);
}
void
mono_threads_install_cleanup (MonoThreadCleanupFunc func)
{
mono_thread_cleanup_fn = func;
}
void
mono_thread_set_manage_callback (MonoThread *thread, MonoThreadManageCallback func)
{
thread->internal_thread->manage_callback = func;
}
void mono_threads_install_notify_pending_exc (MonoThreadNotifyPendingExcFunc func)
{
mono_thread_notify_pending_exc_fn = func;
}
G_GNUC_UNUSED
static void print_tids (gpointer key, gpointer value, gpointer user)
{
/* GPOINTER_TO_UINT breaks horribly if sizeof(void *) >
* sizeof(uint) and a cast to uint would overflow
*/
/* Older versions of glib don't have G_GSIZE_FORMAT, so just
* print this as a pointer.
*/
g_message ("Waiting for: %p", key);
}
struct wait_data
{
HANDLE handles[MAXIMUM_WAIT_OBJECTS];
MonoInternalThread *threads[MAXIMUM_WAIT_OBJECTS];
guint32 num;
};
static void wait_for_tids (struct wait_data *wait, guint32 timeout)
{
guint32 i, ret;
THREAD_DEBUG (g_message("%s: %d threads to wait for in this batch", __func__, wait->num));
ret=WaitForMultipleObjectsEx(wait->num, wait->handles, TRUE, timeout, TRUE);
if(ret==WAIT_FAILED) {
/* See the comment in build_wait_tids() */
THREAD_DEBUG (g_message ("%s: Wait failed", __func__));
return;
}
for(i=0; i<wait->num; i++)
CloseHandle (wait->handles[i]);
if (ret == WAIT_TIMEOUT)
return;
for(i=0; i<wait->num; i++) {
gsize tid = wait->threads[i]->tid;
mono_threads_lock ();
if(mono_g_hash_table_lookup (threads, (gpointer)tid)!=NULL) {
/* This thread must have been killed, because
* it hasn't cleaned itself up. (It's just
* possible that the thread exited before the
* parent thread had a chance to store the
* handle, and now there is another pointer to
* the already-exited thread stored. In this
* case, we'll just get two
* mono_profiler_thread_end() calls for the
* same thread.)
*/
mono_threads_unlock ();
THREAD_DEBUG (g_message ("%s: cleaning up after thread %p (%"G_GSIZE_FORMAT")", __func__, wait->threads[i], tid));
thread_cleanup (wait->threads[i]);
} else {
mono_threads_unlock ();
}
}
}
static void wait_for_tids_or_state_change (struct wait_data *wait, guint32 timeout)
{
guint32 i, ret, count;
THREAD_DEBUG (g_message("%s: %d threads to wait for in this batch", __func__, wait->num));
/* Add the thread state change event, so it wakes up if a thread changes
* to background mode.
*/
count = wait->num;
if (count < MAXIMUM_WAIT_OBJECTS) {
wait->handles [count] = background_change_event;
count++;
}
ret=WaitForMultipleObjectsEx (count, wait->handles, FALSE, timeout, TRUE);
if(ret==WAIT_FAILED) {
/* See the comment in build_wait_tids() */
THREAD_DEBUG (g_message ("%s: Wait failed", __func__));
return;
}
for(i=0; i<wait->num; i++)
CloseHandle (wait->handles[i]);
if (ret == WAIT_TIMEOUT)
return;
if (ret < wait->num) {
gsize tid = wait->threads[ret]->tid;
mono_threads_lock ();
if (mono_g_hash_table_lookup (threads, (gpointer)tid)!=NULL) {
/* See comment in wait_for_tids about thread cleanup */
mono_threads_unlock ();
THREAD_DEBUG (g_message ("%s: cleaning up after thread %"G_GSIZE_FORMAT, __func__, tid));
thread_cleanup (wait->threads [ret]);
} else
mono_threads_unlock ();
}
}
static void build_wait_tids (gpointer key, gpointer value, gpointer user)
{
struct wait_data *wait=(struct wait_data *)user;
if(wait->num<MAXIMUM_WAIT_OBJECTS) {
HANDLE handle;
MonoInternalThread *thread=(MonoInternalThread *)value;
/* Ignore background threads, we abort them later */
/* Do not lock here since it is not needed and the caller holds threads_lock */
if (thread->state & ThreadState_Background) {
THREAD_DEBUG (g_message ("%s: ignoring background thread %"G_GSIZE_FORMAT, __func__, (gsize)thread->tid));
return; /* just leave, ignore */
}
if (mono_gc_is_finalizer_internal_thread (thread)) {
THREAD_DEBUG (g_message ("%s: ignoring finalizer thread %"G_GSIZE_FORMAT, __func__, (gsize)thread->tid));
return;
}
if (thread == mono_thread_internal_current ()) {
THREAD_DEBUG (g_message ("%s: ignoring current thread %"G_GSIZE_FORMAT, __func__, (gsize)thread->tid));
return;
}
if (mono_thread_get_main () && (thread == mono_thread_get_main ()->internal_thread)) {
THREAD_DEBUG (g_message ("%s: ignoring main thread %"G_GSIZE_FORMAT, __func__, (gsize)thread->tid));
return;
}
if (thread->flags & MONO_THREAD_FLAG_DONT_MANAGE) {
THREAD_DEBUG (g_message ("%s: ignoring thread %" G_GSIZE_FORMAT "with DONT_MANAGE flag set.", __func__, (gsize)thread->tid));
return;
}
handle = OpenThread (THREAD_ALL_ACCESS, TRUE, thread->tid);
if (handle == NULL) {
THREAD_DEBUG (g_message ("%s: ignoring unopenable thread %"G_GSIZE_FORMAT, __func__, (gsize)thread->tid));
return;
}
THREAD_DEBUG (g_message ("%s: Invoking mono_thread_manage callback on thread %p", __func__, thread));
if ((thread->manage_callback == NULL) || (thread->manage_callback (thread->root_domain_thread) == TRUE)) {
wait->handles[wait->num]=handle;
wait->threads[wait->num]=thread;
wait->num++;
THREAD_DEBUG (g_message ("%s: adding thread %"G_GSIZE_FORMAT, __func__, (gsize)thread->tid));
} else {
THREAD_DEBUG (g_message ("%s: ignoring (because of callback) thread %"G_GSIZE_FORMAT, __func__, (gsize)thread->tid));
}
} else {
/* Just ignore the rest, we can't do anything with
* them yet
*/
}
}
static gboolean
remove_and_abort_threads (gpointer key, gpointer value, gpointer user)
{
struct wait_data *wait=(struct wait_data *)user;
gsize self = GetCurrentThreadId ();
MonoInternalThread *thread = value;
HANDLE handle;
if (wait->num >= MAXIMUM_WAIT_OBJECTS)
return FALSE;
/* The finalizer thread is not a background thread */
if (thread->tid != self && (thread->state & ThreadState_Background) != 0 &&
!(thread->flags & MONO_THREAD_FLAG_DONT_MANAGE)) {
handle = OpenThread (THREAD_ALL_ACCESS, TRUE, thread->tid);
if (handle == NULL)
return FALSE;
/* printf ("A: %d\n", wait->num); */
wait->handles[wait->num]=thread->handle;
wait->threads[wait->num]=thread;
wait->num++;
THREAD_DEBUG (g_print ("%s: Aborting id: %"G_GSIZE_FORMAT"\n", __func__, (gsize)thread->tid));
mono_thread_internal_stop (thread);
return TRUE;
}
return (thread->tid != self && !mono_gc_is_finalizer_internal_thread (thread));
}
/**
* mono_threads_set_shutting_down:
*
* Is called by a thread that wants to shut down Mono. If the runtime is already
* shutting down, the calling thread is suspended/stopped, and this function never
* returns.
*/
void
mono_threads_set_shutting_down (void)
{
MonoInternalThread *current_thread = mono_thread_internal_current ();
mono_threads_lock ();
if (shutting_down) {
mono_threads_unlock ();
/* Make sure we're properly suspended/stopped */
EnterCriticalSection (current_thread->synch_cs);
if ((current_thread->state & ThreadState_SuspendRequested) ||
(current_thread->state & ThreadState_AbortRequested) ||
(current_thread->state & ThreadState_StopRequested)) {
LeaveCriticalSection (current_thread->synch_cs);
mono_thread_execute_interruption (current_thread);
} else {
current_thread->state |= ThreadState_Stopped;
LeaveCriticalSection (current_thread->synch_cs);
}
/*since we're killing the thread, unset the current domain.*/
mono_domain_unset ();
/* Wake up other threads potentially waiting for us */
ExitThread (0);
} else {
shutting_down = TRUE;
/* Not really a background state change, but this will
* interrupt the main thread if it is waiting for all
* the other threads.
*/
SetEvent (background_change_event);
mono_threads_unlock ();
}
}
/**
* mono_threads_is_shutting_down:
*
* Returns whether a thread has commenced shutdown of Mono. Note that
* if the function returns FALSE the caller must not assume that
* shutdown is not in progress, because the situation might have
* changed since the function returned. For that reason this function
* is of very limited utility.
*/
gboolean
mono_threads_is_shutting_down (void)
{
return shutting_down;
}
void mono_thread_manage (void)
{
struct wait_data wait_data;
struct wait_data *wait = &wait_data;
memset (wait, 0, sizeof (struct wait_data));
/* join each thread that's still running */
THREAD_DEBUG (g_message ("%s: Joining each running thread...", __func__));
mono_threads_lock ();
if(threads==NULL) {
THREAD_DEBUG (g_message("%s: No threads", __func__));
mono_threads_unlock ();
return;
}
mono_threads_unlock ();
do {
mono_threads_lock ();
if (shutting_down) {
/* somebody else is shutting down */
mono_threads_unlock ();
break;
}
THREAD_DEBUG (g_message ("%s: There are %d threads to join", __func__, mono_g_hash_table_size (threads));
mono_g_hash_table_foreach (threads, print_tids, NULL));
ResetEvent (background_change_event);
wait->num=0;
/*We must zero all InternalThread pointers to avoid making the GC unhappy.*/
memset (wait->threads, 0, MAXIMUM_WAIT_OBJECTS * SIZEOF_VOID_P);
mono_g_hash_table_foreach (threads, build_wait_tids, wait);
mono_threads_unlock ();
if(wait->num>0) {
/* Something to wait for */
wait_for_tids_or_state_change (wait, INFINITE);
}
THREAD_DEBUG (g_message ("%s: I have %d threads after waiting.", __func__, wait->num));
} while(wait->num>0);
mono_threads_set_shutting_down ();
/* No new threads will be created after this point */
mono_runtime_set_shutting_down ();
THREAD_DEBUG (g_message ("%s: threadpool cleanup", __func__));
mono_thread_pool_cleanup ();
/*
* Remove everything but the finalizer thread and self.
* Also abort all the background threads
* */
do {
mono_threads_lock ();
wait->num = 0;
/*We must zero all InternalThread pointers to avoid making the GC unhappy.*/
memset (wait->threads, 0, MAXIMUM_WAIT_OBJECTS * SIZEOF_VOID_P);
mono_g_hash_table_foreach_remove (threads, remove_and_abort_threads, wait);
mono_threads_unlock ();
THREAD_DEBUG (g_message ("%s: wait->num is now %d", __func__, wait->num));
if(wait->num>0) {
/* Something to wait for */
wait_for_tids (wait, INFINITE);
}
} while (wait->num > 0);
/*
* give the subthreads a chance to really quit (this is mainly needed
* to get correct user and system times from getrusage/wait/time(1)).
* This could be removed if we avoid pthread_detach() and use pthread_join().
*/
#ifndef HOST_WIN32
sched_yield ();
#endif
}
static void terminate_thread (gpointer key, gpointer value, gpointer user)
{
MonoInternalThread *thread=(MonoInternalThread *)value;
if(thread->tid != (gsize)user) {
/*TerminateThread (thread->handle, -1);*/
}
}
void mono_thread_abort_all_other_threads (void)
{
gsize self = GetCurrentThreadId ();
mono_threads_lock ();
THREAD_DEBUG (g_message ("%s: There are %d threads to abort", __func__,
mono_g_hash_table_size (threads));
mono_g_hash_table_foreach (threads, print_tids, NULL));
mono_g_hash_table_foreach (threads, terminate_thread, (gpointer)self);
mono_threads_unlock ();
}
static void
collect_threads_for_suspend (gpointer key, gpointer value, gpointer user_data)
{
MonoInternalThread *thread = (MonoInternalThread*)value;
struct wait_data *wait = (struct wait_data*)user_data;
HANDLE handle;
/*
* We try to exclude threads early, to avoid running into the MAXIMUM_WAIT_OBJECTS
* limitation.
* This needs no locking.
*/
if ((thread->state & ThreadState_Suspended) != 0 ||
(thread->state & ThreadState_Stopped) != 0)
return;
if (wait->num<MAXIMUM_WAIT_OBJECTS) {
handle = OpenThread (THREAD_ALL_ACCESS, TRUE, thread->tid);
if (handle == NULL)
return;
wait->handles [wait->num] = handle;
wait->threads [wait->num] = thread;
wait->num++;
}
}
/*
* mono_thread_suspend_all_other_threads:
*
* Suspend all managed threads except the finalizer thread and this thread. It is
* not possible to resume them later.
*/
void mono_thread_suspend_all_other_threads (void)
{
struct wait_data wait_data;
struct wait_data *wait = &wait_data;
int i;
gsize self = GetCurrentThreadId ();
gpointer *events;
guint32 eventidx = 0;
gboolean starting, finished;
memset (wait, 0, sizeof (struct wait_data));
/*
* The other threads could be in an arbitrary state at this point, i.e.
* they could be starting up, shutting down etc. This means that there could be
* threads which are not even in the threads hash table yet.
*/
/*
* First we set a barrier which will be checked by all threads before they
* are added to the threads hash table, and they will exit if the flag is set.
* This ensures that no threads could be added to the hash later.
* We will use shutting_down as the barrier for now.
*/
g_assert (shutting_down);
/*
* We make multiple calls to WaitForMultipleObjects since:
* - we can only wait for MAXIMUM_WAIT_OBJECTS threads
* - some threads could exit without becoming suspended
*/
finished = FALSE;
while (!finished) {
/*
* Make a copy of the hashtable since we can't do anything with
* threads while threads_mutex is held.
*/
wait->num = 0;
/*We must zero all InternalThread pointers to avoid making the GC unhappy.*/
memset (wait->threads, 0, MAXIMUM_WAIT_OBJECTS * SIZEOF_VOID_P);
mono_threads_lock ();
mono_g_hash_table_foreach (threads, collect_threads_for_suspend, wait);
mono_threads_unlock ();
events = g_new0 (gpointer, wait->num);
eventidx = 0;
/* Get the suspended events that we'll be waiting for */
for (i = 0; i < wait->num; ++i) {
MonoInternalThread *thread = wait->threads [i];
gboolean signal_suspend = FALSE;
if ((thread->tid == self) || mono_gc_is_finalizer_internal_thread (thread) || (thread->flags & MONO_THREAD_FLAG_DONT_MANAGE)) {
//CloseHandle (wait->handles [i]);
wait->threads [i] = NULL; /* ignore this thread in next loop */
continue;
}
ensure_synch_cs_set (thread);
EnterCriticalSection (thread->synch_cs);
if (thread->suspended_event == NULL) {
thread->suspended_event = CreateEvent (NULL, TRUE, FALSE, NULL);
if (thread->suspended_event == NULL) {
/* Forget this one and go on to the next */
LeaveCriticalSection (thread->synch_cs);
continue;
}
}
if ((thread->state & ThreadState_Suspended) != 0 ||
(thread->state & ThreadState_StopRequested) != 0 ||
(thread->state & ThreadState_Stopped) != 0) {
LeaveCriticalSection (thread->synch_cs);
CloseHandle (wait->handles [i]);
wait->threads [i] = NULL; /* ignore this thread in next loop */
continue;
}
if ((thread->state & ThreadState_SuspendRequested) == 0)
signal_suspend = TRUE;
events [eventidx++] = thread->suspended_event;
/* Convert abort requests into suspend requests */
if ((thread->state & ThreadState_AbortRequested) != 0)
thread->state &= ~ThreadState_AbortRequested;
thread->state |= ThreadState_SuspendRequested;
LeaveCriticalSection (thread->synch_cs);
/* Signal the thread to suspend */
if (signal_suspend)
signal_thread_state_change (thread);
}
if (eventidx > 0) {
WaitForMultipleObjectsEx (eventidx, events, TRUE, 100, FALSE);
for (i = 0; i < wait->num; ++i) {
MonoInternalThread *thread = wait->threads [i];
if (thread == NULL)
continue;
ensure_synch_cs_set (thread);
EnterCriticalSection (thread->synch_cs);
if ((thread->state & ThreadState_Suspended) != 0) {
CloseHandle (thread->suspended_event);
thread->suspended_event = NULL;
}
LeaveCriticalSection (thread->synch_cs);
}
} else {
/*
* If there are threads which are starting up, we wait until they
* are suspended when they try to register in the threads hash.
* This is guaranteed to finish, since the threads which can create new
* threads get suspended after a while.
* FIXME: The finalizer thread can still create new threads.
*/
mono_threads_lock ();
if (threads_starting_up)
starting = mono_g_hash_table_size (threads_starting_up) > 0;
else
starting = FALSE;
mono_threads_unlock ();
if (starting)
Sleep (100);
else
finished = TRUE;
}
g_free (events);
}
}
static void
collect_threads (gpointer key, gpointer value, gpointer user_data)
{
MonoInternalThread *thread = (MonoInternalThread*)value;
struct wait_data *wait = (struct wait_data*)user_data;
HANDLE handle;
if (wait->num<MAXIMUM_WAIT_OBJECTS) {
handle = OpenThread (THREAD_ALL_ACCESS, TRUE, thread->tid);
if (handle == NULL)
return;
wait->handles [wait->num] = handle;
wait->threads [wait->num] = thread;
wait->num++;
}
}
/**
* mono_threads_request_thread_dump:
*
* Ask all threads except the current to print their stacktrace to stdout.
*/
void
mono_threads_request_thread_dump (void)
{
struct wait_data wait_data;
struct wait_data *wait = &wait_data;
int i;
memset (wait, 0, sizeof (struct wait_data));
/*
* Make a copy of the hashtable since we can't do anything with
* threads while threads_mutex is held.
*/
mono_threads_lock ();
mono_g_hash_table_foreach (threads, collect_threads, wait);
mono_threads_unlock ();
for (i = 0; i < wait->num; ++i) {
MonoInternalThread *thread = wait->threads [i];
if (!mono_gc_is_finalizer_internal_thread (thread) &&
(thread != mono_thread_internal_current ()) &&
!thread->thread_dump_requested) {
thread->thread_dump_requested = TRUE;
signal_thread_state_change (thread);
}
CloseHandle (wait->handles [i]);
}
}
struct ref_stack {
gpointer *refs;
gint allocated; /* +1 so that refs [allocated] == NULL */
gint bottom;
};
typedef struct ref_stack RefStack;
static RefStack *
ref_stack_new (gint initial_size)
{
RefStack *rs;
initial_size = MAX (initial_size, 16) + 1;
rs = g_new0 (RefStack, 1);
rs->refs = g_new0 (gpointer, initial_size);
rs->allocated = initial_size;
return rs;
}
static void
ref_stack_destroy (gpointer ptr)
{
RefStack *rs = ptr;
if (rs != NULL) {
g_free (rs->refs);
g_free (rs);
}
}
static void
ref_stack_push (RefStack *rs, gpointer ptr)
{
g_assert (rs != NULL);
if (rs->bottom >= rs->allocated) {
rs->refs = g_realloc (rs->refs, rs->allocated * 2 * sizeof (gpointer) + 1);
rs->allocated <<= 1;
rs->refs [rs->allocated] = NULL;
}
rs->refs [rs->bottom++] = ptr;
}
static void
ref_stack_pop (RefStack *rs)
{
if (rs == NULL || rs->bottom == 0)
return;
rs->bottom--;
rs->refs [rs->bottom] = NULL;
}
static gboolean
ref_stack_find (RefStack *rs, gpointer ptr)
{
gpointer *refs;
if (rs == NULL)
return FALSE;
for (refs = rs->refs; refs && *refs; refs++) {
if (*refs == ptr)
return TRUE;
}
return FALSE;
}
/*
* mono_thread_push_appdomain_ref:
*
* Register that the current thread may have references to objects in domain
* @domain on its stack. Each call to this function should be paired with a
* call to pop_appdomain_ref.
*/
void
mono_thread_push_appdomain_ref (MonoDomain *domain)
{
MonoInternalThread *thread = mono_thread_internal_current ();
if (thread) {
/* printf ("PUSH REF: %"G_GSIZE_FORMAT" -> %s.\n", (gsize)thread->tid, domain->friendly_name); */
SPIN_LOCK (thread->lock_thread_id);
if (thread->appdomain_refs == NULL)
thread->appdomain_refs = ref_stack_new (16);
ref_stack_push (thread->appdomain_refs, domain);
SPIN_UNLOCK (thread->lock_thread_id);
}
}
void
mono_thread_pop_appdomain_ref (void)
{
MonoInternalThread *thread = mono_thread_internal_current ();
if (thread) {
/* printf ("POP REF: %"G_GSIZE_FORMAT" -> %s.\n", (gsize)thread->tid, ((MonoDomain*)(thread->appdomain_refs->data))->friendly_name); */
SPIN_LOCK (thread->lock_thread_id);
ref_stack_pop (thread->appdomain_refs);
SPIN_UNLOCK (thread->lock_thread_id);
}
}
gboolean
mono_thread_internal_has_appdomain_ref (MonoInternalThread *thread, MonoDomain *domain)
{
gboolean res;
SPIN_LOCK (thread->lock_thread_id);
res = ref_stack_find (thread->appdomain_refs, domain);
SPIN_UNLOCK (thread->lock_thread_id);
return res;
}
gboolean
mono_thread_has_appdomain_ref (MonoThread *thread, MonoDomain *domain)
{
return mono_thread_internal_has_appdomain_ref (thread->internal_thread, domain);
}
typedef struct abort_appdomain_data {
struct wait_data wait;
MonoDomain *domain;
} abort_appdomain_data;
static void
collect_appdomain_thread (gpointer key, gpointer value, gpointer user_data)
{
MonoInternalThread *thread = (MonoInternalThread*)value;
abort_appdomain_data *data = (abort_appdomain_data*)user_data;
MonoDomain *domain = data->domain;
if (mono_thread_internal_has_appdomain_ref (thread, domain)) {
/* printf ("ABORTING THREAD %p BECAUSE IT REFERENCES DOMAIN %s.\n", thread->tid, domain->friendly_name); */
if(data->wait.num<MAXIMUM_WAIT_OBJECTS) {
HANDLE handle = OpenThread (THREAD_ALL_ACCESS, TRUE, thread->tid);
if (handle == NULL)
return;
data->wait.handles [data->wait.num] = handle;
data->wait.threads [data->wait.num] = thread;
data->wait.num++;
} else {
/* Just ignore the rest, we can't do anything with
* them yet
*/
}
}
}
/*
* mono_threads_abort_appdomain_threads:
*
* Abort threads which has references to the given appdomain.
*/
gboolean
mono_threads_abort_appdomain_threads (MonoDomain *domain, int timeout)
{
abort_appdomain_data user_data;
guint32 start_time;
int orig_timeout = timeout;
int i;
THREAD_DEBUG (g_message ("%s: starting abort", __func__));
start_time = mono_msec_ticks ();
do {
mono_threads_lock ();
user_data.domain = domain;
user_data.wait.num = 0;
/* This shouldn't take any locks */
mono_g_hash_table_foreach (threads, collect_appdomain_thread, &user_data);
mono_threads_unlock ();
if (user_data.wait.num > 0) {
/* Abort the threads outside the threads lock */
for (i = 0; i < user_data.wait.num; ++i)
ves_icall_System_Threading_Thread_Abort (user_data.wait.threads [i], NULL);
/*
* We should wait for the threads either to abort, or to leave the
* domain. We can't do the latter, so we wait with a timeout.
*/
wait_for_tids (&user_data.wait, 100);
}
/* Update remaining time */
timeout -= mono_msec_ticks () - start_time;
start_time = mono_msec_ticks ();
if (orig_timeout != -1 && timeout < 0)
return FALSE;
}
while (user_data.wait.num > 0);
THREAD_DEBUG (g_message ("%s: abort done", __func__));
return TRUE;
}
static void
clear_cached_culture (gpointer key, gpointer value, gpointer user_data)
{
MonoInternalThread *thread = (MonoInternalThread*)value;
MonoDomain *domain = (MonoDomain*)user_data;
int i;
/* No locking needed here */
/* FIXME: why no locking? writes to the cache are protected with synch_cs above */
if (thread->cached_culture_info) {
for (i = 0; i < NUM_CACHED_CULTURES * 2; ++i) {
MonoObject *obj = mono_array_get (thread->cached_culture_info, MonoObject*, i);
if (obj && obj->vtable->domain == domain)
mono_array_set (thread->cached_culture_info, MonoObject*, i, NULL);
}
}
}
/*
* mono_threads_clear_cached_culture:
*
* Clear the cached_current_culture from all threads if it is in the
* given appdomain.
*/
void
mono_threads_clear_cached_culture (MonoDomain *domain)
{
mono_threads_lock ();
mono_g_hash_table_foreach (threads, clear_cached_culture, domain);
mono_threads_unlock ();
}
/*
* mono_thread_get_undeniable_exception:
*
* Return an exception which needs to be raised when leaving a catch clause.
* This is used for undeniable exception propagation.
*/
MonoException*
mono_thread_get_undeniable_exception (void)
{
MonoInternalThread *thread = mono_thread_internal_current ();
if (thread && thread->abort_exc && !is_running_protected_wrapper ()) {
/*
* FIXME: Clear the abort exception and return an AppDomainUnloaded
* exception if the thread no longer references a dying appdomain.
*/
thread->abort_exc->trace_ips = NULL;
thread->abort_exc->stack_trace = NULL;
return thread->abort_exc;
}
return NULL;
}
#if MONO_SMALL_CONFIG
#define NUM_STATIC_DATA_IDX 4
static const int static_data_size [NUM_STATIC_DATA_IDX] = {
64, 256, 1024, 4096
};
#else
#define NUM_STATIC_DATA_IDX 8
static const int static_data_size [NUM_STATIC_DATA_IDX] = {
1024, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216
};
#endif
static uintptr_t* static_reference_bitmaps [NUM_STATIC_DATA_IDX];
#ifdef HAVE_SGEN_GC
static void
mark_tls_slots (void *addr, MonoGCMarkFunc mark_func)
{
int i;
gpointer *static_data = addr;
for (i = 0; i < NUM_STATIC_DATA_IDX; ++i) {
int j, numwords;
void **ptr;
if (!static_data [i])
continue;
numwords = 1 + static_data_size [i] / sizeof (gpointer) / (sizeof(uintptr_t) * 8);
ptr = static_data [i];
for (j = 0; j < numwords; ++j, ptr += sizeof (uintptr_t) * 8) {
uintptr_t bmap = static_reference_bitmaps [i][j];
void ** p = ptr;
while (bmap) {
if ((bmap & 1) && *p) {
mark_func (p);
}
p++;
bmap >>= 1;
}
}
}
}
#endif
/*
* mono_alloc_static_data
*
* Allocate memory blocks for storing threads or context static data
*/
static void
mono_alloc_static_data (gpointer **static_data_ptr, guint32 offset, gboolean threadlocal)
{
guint idx = (offset >> 24) - 1;
int i;
gpointer* static_data = *static_data_ptr;
if (!static_data) {
static void* tls_desc = NULL;
#ifdef HAVE_SGEN_GC
if (!tls_desc)
tls_desc = mono_gc_make_root_descr_user (mark_tls_slots);
#endif
static_data = mono_gc_alloc_fixed (static_data_size [0], threadlocal?tls_desc:NULL);
*static_data_ptr = static_data;
static_data [0] = static_data;
}
for (i = 1; i <= idx; ++i) {
if (static_data [i])
continue;
#ifdef HAVE_SGEN_GC
static_data [i] = threadlocal?g_malloc0 (static_data_size [i]):mono_gc_alloc_fixed (static_data_size [i], NULL);
#else
static_data [i] = mono_gc_alloc_fixed (static_data_size [i], NULL);
#endif
}
}
static void
mono_free_static_data (gpointer* static_data, gboolean threadlocal)
{
int i;
for (i = 1; i < NUM_STATIC_DATA_IDX; ++i) {
if (!static_data [i])
continue;
#ifdef HAVE_SGEN_GC
if (threadlocal)
g_free (static_data [i]);
else
mono_gc_free_fixed (static_data [i]);
#else
mono_gc_free_fixed (static_data [i]);
#endif
}
mono_gc_free_fixed (static_data);
}
/*
* mono_init_static_data_info
*
* Initializes static data counters
*/
static void mono_init_static_data_info (StaticDataInfo *static_data)
{
static_data->idx = 0;
static_data->offset = 0;
static_data->freelist = NULL;
}
/*
* mono_alloc_static_data_slot
*
* Generates an offset for static data. static_data contains the counters
* used to generate it.
*/
static guint32
mono_alloc_static_data_slot (StaticDataInfo *static_data, guint32 size, guint32 align)
{
guint32 offset;
if (!static_data->idx && !static_data->offset) {
/*
* we use the first chunk of the first allocation also as
* an array for the rest of the data
*/
static_data->offset = sizeof (gpointer) * NUM_STATIC_DATA_IDX;
}
static_data->offset += align - 1;
static_data->offset &= ~(align - 1);
if (static_data->offset + size >= static_data_size [static_data->idx]) {
static_data->idx ++;
g_assert (size <= static_data_size [static_data->idx]);
g_assert (static_data->idx < NUM_STATIC_DATA_IDX);
static_data->offset = 0;
}
offset = static_data->offset | ((static_data->idx + 1) << 24);
static_data->offset += size;
return offset;
}
/*
* ensure thread static fields already allocated are valid for thread
* This function is called when a thread is created or on thread attach.
*/
static void
thread_adjust_static_data (MonoInternalThread *thread)
{
guint32 offset;
mono_threads_lock ();
if (thread_static_info.offset || thread_static_info.idx > 0) {
/* get the current allocated size */
offset = thread_static_info.offset | ((thread_static_info.idx + 1) << 24);
mono_alloc_static_data (&(thread->static_data), offset, TRUE);
}
mono_threads_unlock ();
}
static void
alloc_thread_static_data_helper (gpointer key, gpointer value, gpointer user)
{
MonoInternalThread *thread = value;
guint32 offset = GPOINTER_TO_UINT (user);
mono_alloc_static_data (&(thread->static_data), offset, TRUE);
}
static MonoThreadDomainTls*
search_tls_slot_in_freelist (StaticDataInfo *static_data, guint32 size, guint32 align)
{
MonoThreadDomainTls* prev = NULL;
MonoThreadDomainTls* tmp = static_data->freelist;
while (tmp) {
if (tmp->size == size) {
if (prev)
prev->next = tmp->next;
else
static_data->freelist = tmp->next;
return tmp;
}
tmp = tmp->next;
}
return NULL;
}
static void
update_tls_reference_bitmap (guint32 offset, uintptr_t *bitmap, int max_set)
{
int i;
int idx = (offset >> 24) - 1;
uintptr_t *rb;
if (!static_reference_bitmaps [idx])
static_reference_bitmaps [idx] = g_new0 (uintptr_t, 1 + static_data_size [idx] / sizeof(gpointer) / (sizeof(uintptr_t) * 8));
rb = static_reference_bitmaps [idx];
offset &= 0xffffff;
offset /= sizeof (gpointer);
/* offset is now the bitmap offset */
for (i = 0; i < max_set; ++i) {
if (bitmap [i / sizeof (uintptr_t)] & (1L << (i & (sizeof (uintptr_t) * 8 -1))))
rb [(offset + i) / (sizeof (uintptr_t) * 8)] |= (1L << ((offset + i) & (sizeof (uintptr_t) * 8 -1)));
}
}
static void
clear_reference_bitmap (guint32 offset, guint32 size)
{
int idx = (offset >> 24) - 1;
uintptr_t *rb;
rb = static_reference_bitmaps [idx];
offset &= 0xffffff;
offset /= sizeof (gpointer);
size /= sizeof (gpointer);
size += offset;
/* offset is now the bitmap offset */
for (; offset < size; ++offset)
rb [offset / (sizeof (uintptr_t) * 8)] &= ~(1L << (offset & (sizeof (uintptr_t) * 8 -1)));
}
/*
* The offset for a special static variable is composed of three parts:
* a bit that indicates the type of static data (0:thread, 1:context),
* an index in the array of chunks of memory for the thread (thread->static_data)
* and an offset in that chunk of mem. This allows allocating less memory in the
* common case.
*/
guint32
mono_alloc_special_static_data (guint32 static_type, guint32 size, guint32 align, uintptr_t *bitmap, int max_set)
{
guint32 offset;
if (static_type == SPECIAL_STATIC_THREAD) {
MonoThreadDomainTls *item;
mono_threads_lock ();
item = search_tls_slot_in_freelist (&thread_static_info, size, align);
/*g_print ("TLS alloc: %d in domain %p (total: %d), cached: %p\n", size, mono_domain_get (), thread_static_info.offset, item);*/
if (item) {
offset = item->offset;
g_free (item);
} else {
offset = mono_alloc_static_data_slot (&thread_static_info, size, align);
}
update_tls_reference_bitmap (offset, bitmap, max_set);
/* This can be called during startup */
if (threads != NULL)
mono_g_hash_table_foreach (threads, alloc_thread_static_data_helper, GUINT_TO_POINTER (offset));
mono_threads_unlock ();
} else {
g_assert (static_type == SPECIAL_STATIC_CONTEXT);
mono_contexts_lock ();
offset = mono_alloc_static_data_slot (&context_static_info, size, align);
mono_contexts_unlock ();
offset |= 0x80000000; /* Set the high bit to indicate context static data */
}
return offset;
}
gpointer
mono_get_special_static_data_for_thread (MonoInternalThread *thread, guint32 offset)
{
/* The high bit means either thread (0) or static (1) data. */
guint32 static_type = (offset & 0x80000000);
int idx;
offset &= 0x7fffffff;
idx = (offset >> 24) - 1;
if (static_type == 0) {
return get_thread_static_data (thread, offset);
} else {
/* Allocate static data block under demand, since we don't have a list
// of contexts
*/
MonoAppContext *context = mono_context_get ();
if (!context->static_data || !context->static_data [idx]) {
mono_contexts_lock ();
mono_alloc_static_data (&(context->static_data), offset, FALSE);
mono_contexts_unlock ();
}
return ((char*) context->static_data [idx]) + (offset & 0xffffff);
}
}
gpointer
mono_get_special_static_data (guint32 offset)
{
return mono_get_special_static_data_for_thread (mono_thread_internal_current (), offset);
}
typedef struct {
guint32 offset;
guint32 size;
} TlsOffsetSize;
static void
free_thread_static_data_helper (gpointer key, gpointer value, gpointer user)
{
MonoInternalThread *thread = value;
TlsOffsetSize *data = user;
int idx = (data->offset >> 24) - 1;
char *ptr;
if (!thread->static_data || !thread->static_data [idx])
return;
ptr = ((char*) thread->static_data [idx]) + (data->offset & 0xffffff);
memset (ptr, 0, data->size);
}
static void
do_free_special_slot (guint32 offset, guint32 size)
{
guint32 static_type = (offset & 0x80000000);
/*g_print ("free %s , size: %d, offset: %x\n", field->name, size, offset);*/
if (static_type == 0) {
TlsOffsetSize data;
MonoThreadDomainTls *item = g_new0 (MonoThreadDomainTls, 1);
data.offset = offset & 0x7fffffff;
data.size = size;
clear_reference_bitmap (data.offset, data.size);
if (threads != NULL)
mono_g_hash_table_foreach (threads, free_thread_static_data_helper, &data);
item->offset = offset;
item->size = size;
if (!mono_runtime_is_shutting_down ()) {
item->next = thread_static_info.freelist;
thread_static_info.freelist = item;
} else {
/* We could be called during shutdown after mono_thread_cleanup () is called */
g_free (item);
}
} else {
/* FIXME: free context static data as well */
}
}
static void
do_free_special (gpointer key, gpointer value, gpointer data)
{
MonoClassField *field = key;
guint32 offset = GPOINTER_TO_UINT (value);
gint32 align;
guint32 size;
size = mono_type_size (field->type, &align);
do_free_special_slot (offset, size);
}
void
mono_alloc_special_static_data_free (GHashTable *special_static_fields)
{
mono_threads_lock ();
g_hash_table_foreach (special_static_fields, do_free_special, NULL);
mono_threads_unlock ();
}
void
mono_special_static_data_free_slot (guint32 offset, guint32 size)
{
mono_threads_lock ();
do_free_special_slot (offset, size);
mono_threads_unlock ();
}
/*
* allocates room in the thread local area for storing an instance of the struct type
* the allocation is kept track of in domain->tlsrec_list.
*/
uint32_t
mono_thread_alloc_tls (MonoReflectionType *type)
{
MonoDomain *domain = mono_domain_get ();
MonoClass *klass;
MonoTlsDataRecord *tlsrec;
int max_set = 0;
gsize *bitmap;
gsize default_bitmap [4] = {0};
uint32_t tls_offset;
guint32 size;
gint32 align;
klass = mono_class_from_mono_type (type->type);
/* TlsDatum is a struct, so we subtract the object header size offset */
bitmap = mono_class_compute_bitmap (klass, default_bitmap, sizeof (default_bitmap) * 8, - (int)(sizeof (MonoObject) / sizeof (gpointer)), &max_set, FALSE);
size = mono_type_size (type->type, &align);
tls_offset = mono_alloc_special_static_data (SPECIAL_STATIC_THREAD, size, align, bitmap, max_set);
if (bitmap != default_bitmap)
g_free (bitmap);
tlsrec = g_new0 (MonoTlsDataRecord, 1);
tlsrec->tls_offset = tls_offset;
tlsrec->size = size;
mono_domain_lock (domain);
tlsrec->next = domain->tlsrec_list;
domain->tlsrec_list = tlsrec;
mono_domain_unlock (domain);
return tls_offset;
}
void
mono_thread_destroy_tls (uint32_t tls_offset)
{
MonoTlsDataRecord *prev = NULL;
MonoTlsDataRecord *cur;
guint32 size = 0;
MonoDomain *domain = mono_domain_get ();
mono_domain_lock (domain);
cur = domain->tlsrec_list;
while (cur) {
if (cur->tls_offset == tls_offset) {
if (prev)
prev->next = cur->next;
else
domain->tlsrec_list = cur->next;
size = cur->size;
g_free (cur);
break;
}
prev = cur;
cur = cur->next;
}
mono_domain_unlock (domain);
if (size)
mono_special_static_data_free_slot (tls_offset, size);
}
/*
* This is just to ensure cleanup: the finalizers should have taken care, so this is not perf-critical.
*/
void
mono_thread_destroy_domain_tls (MonoDomain *domain)
{
while (domain->tlsrec_list)
mono_thread_destroy_tls (domain->tlsrec_list->tls_offset);
}
static MonoClassField *local_slots = NULL;
typedef struct {
/* local tls data to get locals_slot from a thread */
guint32 offset;
int idx;
/* index in the locals_slot array */
int slot;
} LocalSlotID;
static void
clear_local_slot (gpointer key, gpointer value, gpointer user_data)
{
LocalSlotID *sid = user_data;
MonoInternalThread *thread = (MonoInternalThread*)value;
MonoArray *slots_array;
/*
* the static field is stored at: ((char*) thread->static_data [idx]) + (offset & 0xffffff);
* it is for the right domain, so we need to check if it is allocated an initialized
* for the current thread.
*/
/*g_print ("handling thread %p\n", thread);*/
if (!thread->static_data || !thread->static_data [sid->idx])
return;
slots_array = *(MonoArray **)(((char*) thread->static_data [sid->idx]) + (sid->offset & 0xffffff));
if (!slots_array || sid->slot >= mono_array_length (slots_array))
return;
mono_array_set (slots_array, MonoObject*, sid->slot, NULL);
}
void
mono_thread_free_local_slot_values (int slot, MonoBoolean thread_local)
{
MonoDomain *domain;
LocalSlotID sid;
sid.slot = slot;
if (thread_local) {
void *addr = NULL;
if (!local_slots) {
local_slots = mono_class_get_field_from_name (mono_defaults.thread_class, "local_slots");
if (!local_slots) {
g_warning ("local_slots field not found in Thread class");
return;
}
}
domain = mono_domain_get ();
mono_domain_lock (domain);
if (domain->special_static_fields)
addr = g_hash_table_lookup (domain->special_static_fields, local_slots);
mono_domain_unlock (domain);
if (!addr)
return;
/*g_print ("freeing slot %d at %p\n", slot, addr);*/
sid.offset = GPOINTER_TO_UINT (addr);
sid.offset &= 0x7fffffff;
sid.idx = (sid.offset >> 24) - 1;
mono_threads_lock ();
mono_g_hash_table_foreach (threads, clear_local_slot, &sid);
mono_threads_unlock ();
} else {
/* FIXME: clear the slot for MonoAppContexts, too */
}
}
#ifdef HOST_WIN32
static void CALLBACK dummy_apc (ULONG_PTR param)
{
}
#else
static guint32 dummy_apc (gpointer param)
{
return 0;
}
#endif
/*
* mono_thread_execute_interruption
*
* Performs the operation that the requested thread state requires (abort,
* suspend or stop)
*/
static MonoException* mono_thread_execute_interruption (MonoInternalThread *thread)
{
ensure_synch_cs_set (thread);
EnterCriticalSection (thread->synch_cs);
/* MonoThread::interruption_requested can only be changed with atomics */
if (InterlockedCompareExchange (&thread->interruption_requested, FALSE, TRUE)) {
/* this will consume pending APC calls */
WaitForSingleObjectEx (GetCurrentThread(), 0, TRUE);
InterlockedDecrement (&thread_interruption_requested);
#ifndef HOST_WIN32
/* Clear the interrupted flag of the thread so it can wait again */
wapi_clear_interruption ();
#endif
}
if ((thread->state & ThreadState_AbortRequested) != 0) {
LeaveCriticalSection (thread->synch_cs);
if (thread->abort_exc == NULL) {
/*
* This might be racy, but it has to be called outside the lock
* since it calls managed code.
*/
MONO_OBJECT_SETREF (thread, abort_exc, mono_get_exception_thread_abort ());
}
return thread->abort_exc;
}
else if ((thread->state & ThreadState_SuspendRequested) != 0) {
thread->state &= ~ThreadState_SuspendRequested;
thread->state |= ThreadState_Suspended;
thread->suspend_event = CreateEvent (NULL, TRUE, FALSE, NULL);
if (thread->suspend_event == NULL) {
LeaveCriticalSection (thread->synch_cs);
return(NULL);
}
if (thread->suspended_event)
SetEvent (thread->suspended_event);
LeaveCriticalSection (thread->synch_cs);
if (shutting_down) {
/* After we left the lock, the runtime might shut down so everything becomes invalid */
for (;;)
Sleep (1000);
}
WaitForSingleObject (thread->suspend_event, INFINITE);
EnterCriticalSection (thread->synch_cs);
CloseHandle (thread->suspend_event);
thread->suspend_event = NULL;
thread->state &= ~ThreadState_Suspended;
/* The thread that requested the resume will have replaced this event
* and will be waiting for it
*/
SetEvent (thread->resume_event);
LeaveCriticalSection (thread->synch_cs);
return NULL;
}
else if ((thread->state & ThreadState_StopRequested) != 0) {
/* FIXME: do this through the JIT? */
LeaveCriticalSection (thread->synch_cs);
mono_thread_exit ();
return NULL;
} else if (thread->thread_interrupt_requested) {
thread->thread_interrupt_requested = FALSE;
LeaveCriticalSection (thread->synch_cs);
return(mono_get_exception_thread_interrupted ());
}
LeaveCriticalSection (thread->synch_cs);
return NULL;
}
/*
* mono_thread_request_interruption
*
* A signal handler can call this method to request the interruption of a
* thread. The result of the interruption will depend on the current state of
* the thread. If the result is an exception that needs to be throw, it is
* provided as return value.
*/
MonoException*
mono_thread_request_interruption (gboolean running_managed)
{
MonoInternalThread *thread = mono_thread_internal_current ();
/* The thread may already be stopping */
if (thread == NULL)
return NULL;
#ifdef HOST_WIN32
if (thread->interrupt_on_stop &&
thread->state & ThreadState_StopRequested &&
thread->state & ThreadState_Background)
ExitThread (1);
#endif
if (InterlockedCompareExchange (&thread->interruption_requested, 1, 0) == 1)
return NULL;
if (!running_managed || is_running_protected_wrapper ()) {
/* Can't stop while in unmanaged code. Increase the global interruption
request count. When exiting the unmanaged method the count will be
checked and the thread will be interrupted. */
InterlockedIncrement (&thread_interruption_requested);
if (mono_thread_notify_pending_exc_fn && !running_managed)
/* The JIT will notify the thread about the interruption */
/* This shouldn't take any locks */
mono_thread_notify_pending_exc_fn ();
/* this will awake the thread if it is in WaitForSingleObject
or similar */
/* Our implementation of this function ignores the func argument */
QueueUserAPC ((PAPCFUNC)dummy_apc, thread->handle, NULL);
return NULL;
}
else {
return mono_thread_execute_interruption (thread);
}
}
/*This function should be called by a thread after it has exited all of
* its handle blocks at interruption time.*/
MonoException*
mono_thread_resume_interruption (void)
{
MonoInternalThread *thread = mono_thread_internal_current ();
gboolean still_aborting;
/* The thread may already be stopping */
if (thread == NULL)
return NULL;
ensure_synch_cs_set (thread);
EnterCriticalSection (thread->synch_cs);
still_aborting = (thread->state & ThreadState_AbortRequested) != 0;
LeaveCriticalSection (thread->synch_cs);
/*This can happen if the protected block called Thread::ResetAbort*/
if (!still_aborting)
return FALSE;
if (InterlockedCompareExchange (&thread->interruption_requested, 1, 0) == 1)
return NULL;
InterlockedIncrement (&thread_interruption_requested);
#ifndef HOST_WIN32
wapi_self_interrupt ();
#endif
return mono_thread_execute_interruption (thread);
}
gboolean mono_thread_interruption_requested ()
{
if (thread_interruption_requested) {
MonoInternalThread *thread = mono_thread_internal_current ();
/* The thread may already be stopping */
if (thread != NULL)
return (thread->interruption_requested);
}
return FALSE;
}
static void mono_thread_interruption_checkpoint_request (gboolean bypass_abort_protection)
{
MonoInternalThread *thread = mono_thread_internal_current ();
/* The thread may already be stopping */
if (thread == NULL)
return;
mono_debugger_check_interruption ();
if (thread->interruption_requested && (bypass_abort_protection || !is_running_protected_wrapper ())) {
MonoException* exc = mono_thread_execute_interruption (thread);
if (exc) mono_raise_exception (exc);
}
}
/*
* Performs the interruption of the current thread, if one has been requested,
* and the thread is not running a protected wrapper.
*/
void mono_thread_interruption_checkpoint ()
{
mono_thread_interruption_checkpoint_request (FALSE);
}
/*
* Performs the interruption of the current thread, if one has been requested.
*/
void mono_thread_force_interruption_checkpoint ()
{
mono_thread_interruption_checkpoint_request (TRUE);
}
/*
* mono_thread_get_and_clear_pending_exception:
*
* Return any pending exceptions for the current thread and clear it as a side effect.
*/
MonoException*
mono_thread_get_and_clear_pending_exception (void)
{
MonoInternalThread *thread = mono_thread_internal_current ();
/* The thread may already be stopping */
if (thread == NULL)
return NULL;
if (thread->interruption_requested && !is_running_protected_wrapper ()) {
return mono_thread_execute_interruption (thread);
}
if (thread->pending_exception) {
MonoException *exc = thread->pending_exception;
thread->pending_exception = NULL;
return exc;
}
return NULL;
}
/*
* mono_set_pending_exception:
*
* Set the pending exception of the current thread to EXC. On platforms which
* support it, the exception will be thrown when execution returns to managed code.
* On other platforms, this function is equivalent to mono_raise_exception ().
* Internal calls which report exceptions using this function instead of
* raise_exception () might be called by JITted code using a more efficient calling
* convention.
*/
void
mono_set_pending_exception (MonoException *exc)
{
MonoInternalThread *thread = mono_thread_internal_current ();
/* The thread may already be stopping */
if (thread == NULL)
return;
if (mono_thread_notify_pending_exc_fn) {
MONO_OBJECT_SETREF (thread, pending_exception, exc);
mono_thread_notify_pending_exc_fn ();
} else {
/* No way to notify the JIT about the exception, have to throw it now */
mono_raise_exception (exc);
}
}
/**
* mono_thread_interruption_request_flag:
*
* Returns the address of a flag that will be non-zero if an interruption has
* been requested for a thread. The thread to interrupt may not be the current
* thread, so an additional call to mono_thread_interruption_requested() or
* mono_thread_interruption_checkpoint() is allways needed if the flag is not
* zero.
*/
gint32* mono_thread_interruption_request_flag ()
{
return &thread_interruption_requested;
}
void
mono_thread_init_apartment_state (void)
{
#ifdef HOST_WIN32
MonoInternalThread* thread = mono_thread_internal_current ();
/* Positive return value indicates success, either
* S_OK if this is first CoInitialize call, or
* S_FALSE if CoInitialize already called, but with same
* threading model. A negative value indicates failure,
* probably due to trying to change the threading model.
*/
if (CoInitializeEx(NULL, (thread->apartment_state == ThreadApartmentState_STA)
? COINIT_APARTMENTTHREADED
: COINIT_MULTITHREADED) < 0) {
thread->apartment_state = ThreadApartmentState_Unknown;
}
#endif
}
void
mono_thread_cleanup_apartment_state (void)
{
#ifdef HOST_WIN32
MonoInternalThread* thread = mono_thread_internal_current ();
if (thread && thread->apartment_state != ThreadApartmentState_Unknown) {
CoUninitialize ();
}
#endif
}
void
mono_thread_set_state (MonoInternalThread *thread, MonoThreadState state)
{
ensure_synch_cs_set (thread);
EnterCriticalSection (thread->synch_cs);
thread->state |= state;
LeaveCriticalSection (thread->synch_cs);
}
void
mono_thread_clr_state (MonoInternalThread *thread, MonoThreadState state)
{
ensure_synch_cs_set (thread);
EnterCriticalSection (thread->synch_cs);
thread->state &= ~state;
LeaveCriticalSection (thread->synch_cs);
}
gboolean
mono_thread_test_state (MonoInternalThread *thread, MonoThreadState test)
{
gboolean ret = FALSE;
ensure_synch_cs_set (thread);
EnterCriticalSection (thread->synch_cs);
if ((thread->state & test) != 0) {
ret = TRUE;
}
LeaveCriticalSection (thread->synch_cs);
return ret;
}
static MonoClassField *execution_context_field;
static MonoObject**
get_execution_context_addr (void)
{
MonoDomain *domain = mono_domain_get ();
guint32 offset;
if (!execution_context_field) {
execution_context_field = mono_class_get_field_from_name (mono_defaults.thread_class,
"_ec");
g_assert (execution_context_field);
}
g_assert (mono_class_try_get_vtable (domain, mono_defaults.appdomain_class));
mono_domain_lock (domain);
offset = GPOINTER_TO_UINT (g_hash_table_lookup (domain->special_static_fields, execution_context_field));
mono_domain_unlock (domain);
g_assert (offset);
return (MonoObject**) mono_get_special_static_data (offset);
}
MonoObject*
mono_thread_get_execution_context (void)
{
return *get_execution_context_addr ();
}
void
mono_thread_set_execution_context (MonoObject *ec)
{
*get_execution_context_addr () = ec;
}
static gboolean has_tls_get = FALSE;
void
mono_runtime_set_has_tls_get (gboolean val)
{
has_tls_get = val;
}
gboolean
mono_runtime_has_tls_get (void)
{
return has_tls_get;
}
int
mono_thread_kill (MonoInternalThread *thread, int signal)
{
#ifdef HOST_WIN32
/* Win32 uses QueueUserAPC and callers of this are guarded */
g_assert_not_reached ();
#else
# ifdef PTHREAD_POINTER_ID
return pthread_kill ((gpointer)(gsize)(thread->tid), mono_thread_get_abort_signal ());
# else
# ifdef PLATFORM_ANDROID
if (thread->android_tid != 0) {
int ret;
int old_errno = errno;
ret = tkill ((pid_t) thread->android_tid, signal);
if (ret < 0) {
ret = errno;
errno = old_errno;
}
return ret;
}
else
return pthread_kill (thread->tid, mono_thread_get_abort_signal ());
# else
return pthread_kill (thread->tid, mono_thread_get_abort_signal ());
# endif
# endif
#endif
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3434_0 |
crossvul-cpp_data_bad_2172_0 | /* SCTP kernel implementation
* Copyright (c) 1999-2000 Cisco, Inc.
* Copyright (c) 1999-2001 Motorola, Inc.
* Copyright (c) 2002 International Business Machines, Corp.
*
* This file is part of the SCTP kernel implementation
*
* These functions are the methods for accessing the SCTP inqueue.
*
* An SCTP inqueue is a queue into which you push SCTP packets
* (which might be bundles or fragments of chunks) and out of which you
* pop SCTP whole chunks.
*
* This SCTP implementation is free software;
* you can redistribute it and/or modify it under the terms of
* the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This SCTP implementation is distributed in the hope that it
* will be useful, but WITHOUT ANY WARRANTY; without even the implied
* ************************
* warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with GNU CC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
* Please send any bug reports or fixes you make to the
* email address(es):
* lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
/* Initialize an SCTP inqueue. */
void sctp_inq_init(struct sctp_inq *queue)
{
INIT_LIST_HEAD(&queue->in_chunk_list);
queue->in_progress = NULL;
/* Create a task for delivering data. */
INIT_WORK(&queue->immediate, NULL);
}
/* Release the memory associated with an SCTP inqueue. */
void sctp_inq_free(struct sctp_inq *queue)
{
struct sctp_chunk *chunk, *tmp;
/* Empty the queue. */
list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) {
list_del_init(&chunk->list);
sctp_chunk_free(chunk);
}
/* If there is a packet which is currently being worked on,
* free it as well.
*/
if (queue->in_progress) {
sctp_chunk_free(queue->in_progress);
queue->in_progress = NULL;
}
}
/* Put a new packet in an SCTP inqueue.
* We assume that packet->sctp_hdr is set and in host byte order.
*/
void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *chunk)
{
/* Directly call the packet handling routine. */
if (chunk->rcvr->dead) {
sctp_chunk_free(chunk);
return;
}
/* We are now calling this either from the soft interrupt
* or from the backlog processing.
* Eventually, we should clean up inqueue to not rely
* on the BH related data structures.
*/
list_add_tail(&chunk->list, &q->in_chunk_list);
if (chunk->asoc)
chunk->asoc->stats.ipackets++;
q->immediate.func(&q->immediate);
}
/* Peek at the next chunk on the inqeue. */
struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue)
{
struct sctp_chunk *chunk;
sctp_chunkhdr_t *ch = NULL;
chunk = queue->in_progress;
/* If there is no more chunks in this packet, say so */
if (chunk->singleton ||
chunk->end_of_packet ||
chunk->pdiscard)
return NULL;
ch = (sctp_chunkhdr_t *)chunk->chunk_end;
return ch;
}
/* Extract a chunk from an SCTP inqueue.
*
* WARNING: If you need to put the chunk on another queue, you need to
* make a shallow copy (clone) of it.
*/
struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
{
struct sctp_chunk *chunk;
sctp_chunkhdr_t *ch = NULL;
/* The assumption is that we are safe to process the chunks
* at this time.
*/
if ((chunk = queue->in_progress)) {
/* There is a packet that we have been working on.
* Any post processing work to do before we move on?
*/
if (chunk->singleton ||
chunk->end_of_packet ||
chunk->pdiscard) {
sctp_chunk_free(chunk);
chunk = queue->in_progress = NULL;
} else {
/* Nothing to do. Next chunk in the packet, please. */
ch = (sctp_chunkhdr_t *) chunk->chunk_end;
/* Force chunk->skb->data to chunk->chunk_end. */
skb_pull(chunk->skb,
chunk->chunk_end - chunk->skb->data);
/* Verify that we have at least chunk headers
* worth of buffer left.
*/
if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
sctp_chunk_free(chunk);
chunk = queue->in_progress = NULL;
}
}
}
/* Do we need to take the next packet out of the queue to process? */
if (!chunk) {
struct list_head *entry;
/* Is the queue empty? */
if (list_empty(&queue->in_chunk_list))
return NULL;
entry = queue->in_chunk_list.next;
chunk = queue->in_progress =
list_entry(entry, struct sctp_chunk, list);
list_del_init(entry);
/* This is the first chunk in the packet. */
chunk->singleton = 1;
ch = (sctp_chunkhdr_t *) chunk->skb->data;
chunk->data_accepted = 0;
}
chunk->chunk_hdr = ch;
chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
/* In the unlikely case of an IP reassembly, the skb could be
* non-linear. If so, update chunk_end so that it doesn't go past
* the skb->tail.
*/
if (unlikely(skb_is_nonlinear(chunk->skb))) {
if (chunk->chunk_end > skb_tail_pointer(chunk->skb))
chunk->chunk_end = skb_tail_pointer(chunk->skb);
}
skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
chunk->subh.v = NULL; /* Subheader is no longer valid. */
if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) {
/* This is not a singleton */
chunk->singleton = 0;
} else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
/* RFC 2960, Section 6.10 Bundling
*
* Partial chunks MUST NOT be placed in an SCTP packet.
* If the receiver detects a partial chunk, it MUST drop
* the chunk.
*
* Since the end of the chunk is past the end of our buffer
* (which contains the whole packet, we can freely discard
* the whole packet.
*/
sctp_chunk_free(chunk);
chunk = queue->in_progress = NULL;
return NULL;
} else {
/* We are at the end of the packet, so mark the chunk
* in case we need to send a SACK.
*/
chunk->end_of_packet = 1;
}
pr_debug("+++sctp_inq_pop+++ chunk:%p[%s], length:%d, skb->len:%d\n",
chunk, sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
ntohs(chunk->chunk_hdr->length), chunk->skb->len);
return chunk;
}
/* Set a top-half handler.
*
* Originally, we the top-half handler was scheduled as a BH. We now
* call the handler directly in sctp_inq_push() at a time that
* we know we are lock safe.
* The intent is that this routine will pull stuff out of the
* inqueue and process it.
*/
void sctp_inq_set_th_handler(struct sctp_inq *q, work_func_t callback)
{
INIT_WORK(&q->immediate, callback);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_2172_0 |
crossvul-cpp_data_good_5006_0 | /*
* NET3 IP device support routines.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Derived from the IP parts of dev.c 1.0.19
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Mark Evans, <evansmp@uhura.aston.ac.uk>
*
* Additional Authors:
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* Changes:
* Alexey Kuznetsov: pa_* fields are replaced with ifaddr
* lists.
* Cyrus Durgin: updated for kmod
* Matthias Andree: in devinet_ioctl, compare label and
* address (4.4BSD alias style support),
* fall back to comparing just the label
* if no match found.
*/
#include <asm/uaccess.h>
#include <linux/bitops.h>
#include <linux/capability.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/if_addr.h>
#include <linux/if_ether.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/notifier.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/slab.h>
#include <linux/hash.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
#include <linux/kmod.h>
#include <linux/netconf.h>
#include <net/arp.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/ip_fib.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/addrconf.h>
#include "fib_lookup.h"
static struct ipv4_devconf ipv4_devconf = {
.data = {
[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
},
};
static struct ipv4_devconf ipv4_devconf_dflt = {
.data = {
[IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
[IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
[IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
[IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
},
};
#define IPV4_DEVCONF_DFLT(net, attr) \
IPV4_DEVCONF((*net->ipv4.devconf_dflt), attr)
static const struct nla_policy ifa_ipv4_policy[IFA_MAX+1] = {
[IFA_LOCAL] = { .type = NLA_U32 },
[IFA_ADDRESS] = { .type = NLA_U32 },
[IFA_BROADCAST] = { .type = NLA_U32 },
[IFA_LABEL] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 },
[IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
[IFA_FLAGS] = { .type = NLA_U32 },
};
#define IN4_ADDR_HSIZE_SHIFT 8
#define IN4_ADDR_HSIZE (1U << IN4_ADDR_HSIZE_SHIFT)
static struct hlist_head inet_addr_lst[IN4_ADDR_HSIZE];
static u32 inet_addr_hash(const struct net *net, __be32 addr)
{
u32 val = (__force u32) addr ^ net_hash_mix(net);
return hash_32(val, IN4_ADDR_HSIZE_SHIFT);
}
static void inet_hash_insert(struct net *net, struct in_ifaddr *ifa)
{
u32 hash = inet_addr_hash(net, ifa->ifa_local);
ASSERT_RTNL();
hlist_add_head_rcu(&ifa->hash, &inet_addr_lst[hash]);
}
static void inet_hash_remove(struct in_ifaddr *ifa)
{
ASSERT_RTNL();
hlist_del_init_rcu(&ifa->hash);
}
/**
* __ip_dev_find - find the first device with a given source address.
* @net: the net namespace
* @addr: the source address
* @devref: if true, take a reference on the found device
*
* If a caller uses devref=false, it should be protected by RCU, or RTNL
*/
struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
{
u32 hash = inet_addr_hash(net, addr);
struct net_device *result = NULL;
struct in_ifaddr *ifa;
rcu_read_lock();
hlist_for_each_entry_rcu(ifa, &inet_addr_lst[hash], hash) {
if (ifa->ifa_local == addr) {
struct net_device *dev = ifa->ifa_dev->dev;
if (!net_eq(dev_net(dev), net))
continue;
result = dev;
break;
}
}
if (!result) {
struct flowi4 fl4 = { .daddr = addr };
struct fib_result res = { 0 };
struct fib_table *local;
/* Fallback to FIB local table so that communication
* over loopback subnets work.
*/
local = fib_get_table(net, RT_TABLE_LOCAL);
if (local &&
!fib_table_lookup(local, &fl4, &res, FIB_LOOKUP_NOREF) &&
res.type == RTN_LOCAL)
result = FIB_RES_DEV(res);
}
if (result && devref)
dev_hold(result);
rcu_read_unlock();
return result;
}
EXPORT_SYMBOL(__ip_dev_find);
static void rtmsg_ifa(int event, struct in_ifaddr *, struct nlmsghdr *, u32);
static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
int destroy);
#ifdef CONFIG_SYSCTL
static int devinet_sysctl_register(struct in_device *idev);
static void devinet_sysctl_unregister(struct in_device *idev);
#else
static int devinet_sysctl_register(struct in_device *idev)
{
return 0;
}
static void devinet_sysctl_unregister(struct in_device *idev)
{
}
#endif
/* Locks all the inet devices. */
static struct in_ifaddr *inet_alloc_ifa(void)
{
return kzalloc(sizeof(struct in_ifaddr), GFP_KERNEL);
}
static void inet_rcu_free_ifa(struct rcu_head *head)
{
struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
if (ifa->ifa_dev)
in_dev_put(ifa->ifa_dev);
kfree(ifa);
}
static void inet_free_ifa(struct in_ifaddr *ifa)
{
call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
}
void in_dev_finish_destroy(struct in_device *idev)
{
struct net_device *dev = idev->dev;
WARN_ON(idev->ifa_list);
WARN_ON(idev->mc_list);
kfree(rcu_dereference_protected(idev->mc_hash, 1));
#ifdef NET_REFCNT_DEBUG
pr_debug("%s: %p=%s\n", __func__, idev, dev ? dev->name : "NIL");
#endif
dev_put(dev);
if (!idev->dead)
pr_err("Freeing alive in_device %p\n", idev);
else
kfree(idev);
}
EXPORT_SYMBOL(in_dev_finish_destroy);
static struct in_device *inetdev_init(struct net_device *dev)
{
struct in_device *in_dev;
int err = -ENOMEM;
ASSERT_RTNL();
in_dev = kzalloc(sizeof(*in_dev), GFP_KERNEL);
if (!in_dev)
goto out;
memcpy(&in_dev->cnf, dev_net(dev)->ipv4.devconf_dflt,
sizeof(in_dev->cnf));
in_dev->cnf.sysctl = NULL;
in_dev->dev = dev;
in_dev->arp_parms = neigh_parms_alloc(dev, &arp_tbl);
if (!in_dev->arp_parms)
goto out_kfree;
if (IPV4_DEVCONF(in_dev->cnf, FORWARDING))
dev_disable_lro(dev);
/* Reference in_dev->dev */
dev_hold(dev);
/* Account for reference dev->ip_ptr (below) */
in_dev_hold(in_dev);
err = devinet_sysctl_register(in_dev);
if (err) {
in_dev->dead = 1;
in_dev_put(in_dev);
in_dev = NULL;
goto out;
}
ip_mc_init_dev(in_dev);
if (dev->flags & IFF_UP)
ip_mc_up(in_dev);
/* we can receive as soon as ip_ptr is set -- do this last */
rcu_assign_pointer(dev->ip_ptr, in_dev);
out:
return in_dev ?: ERR_PTR(err);
out_kfree:
kfree(in_dev);
in_dev = NULL;
goto out;
}
static void in_dev_rcu_put(struct rcu_head *head)
{
struct in_device *idev = container_of(head, struct in_device, rcu_head);
in_dev_put(idev);
}
static void inetdev_destroy(struct in_device *in_dev)
{
struct in_ifaddr *ifa;
struct net_device *dev;
ASSERT_RTNL();
dev = in_dev->dev;
in_dev->dead = 1;
ip_mc_destroy_dev(in_dev);
while ((ifa = in_dev->ifa_list) != NULL) {
inet_del_ifa(in_dev, &in_dev->ifa_list, 0);
inet_free_ifa(ifa);
}
RCU_INIT_POINTER(dev->ip_ptr, NULL);
devinet_sysctl_unregister(in_dev);
neigh_parms_release(&arp_tbl, in_dev->arp_parms);
arp_ifdown(dev);
call_rcu(&in_dev->rcu_head, in_dev_rcu_put);
}
int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b)
{
rcu_read_lock();
for_primary_ifa(in_dev) {
if (inet_ifa_match(a, ifa)) {
if (!b || inet_ifa_match(b, ifa)) {
rcu_read_unlock();
return 1;
}
}
} endfor_ifa(in_dev);
rcu_read_unlock();
return 0;
}
static void __inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
int destroy, struct nlmsghdr *nlh, u32 portid)
{
struct in_ifaddr *promote = NULL;
struct in_ifaddr *ifa, *ifa1 = *ifap;
struct in_ifaddr *last_prim = in_dev->ifa_list;
struct in_ifaddr *prev_prom = NULL;
int do_promote = IN_DEV_PROMOTE_SECONDARIES(in_dev);
ASSERT_RTNL();
if (in_dev->dead)
goto no_promotions;
/* 1. Deleting primary ifaddr forces deletion all secondaries
* unless alias promotion is set
**/
if (!(ifa1->ifa_flags & IFA_F_SECONDARY)) {
struct in_ifaddr **ifap1 = &ifa1->ifa_next;
while ((ifa = *ifap1) != NULL) {
if (!(ifa->ifa_flags & IFA_F_SECONDARY) &&
ifa1->ifa_scope <= ifa->ifa_scope)
last_prim = ifa;
if (!(ifa->ifa_flags & IFA_F_SECONDARY) ||
ifa1->ifa_mask != ifa->ifa_mask ||
!inet_ifa_match(ifa1->ifa_address, ifa)) {
ifap1 = &ifa->ifa_next;
prev_prom = ifa;
continue;
}
if (!do_promote) {
inet_hash_remove(ifa);
*ifap1 = ifa->ifa_next;
rtmsg_ifa(RTM_DELADDR, ifa, nlh, portid);
blocking_notifier_call_chain(&inetaddr_chain,
NETDEV_DOWN, ifa);
inet_free_ifa(ifa);
} else {
promote = ifa;
break;
}
}
}
/* On promotion all secondaries from subnet are changing
* the primary IP, we must remove all their routes silently
* and later to add them back with new prefsrc. Do this
* while all addresses are on the device list.
*/
for (ifa = promote; ifa; ifa = ifa->ifa_next) {
if (ifa1->ifa_mask == ifa->ifa_mask &&
inet_ifa_match(ifa1->ifa_address, ifa))
fib_del_ifaddr(ifa, ifa1);
}
no_promotions:
/* 2. Unlink it */
*ifap = ifa1->ifa_next;
inet_hash_remove(ifa1);
/* 3. Announce address deletion */
/* Send message first, then call notifier.
At first sight, FIB update triggered by notifier
will refer to already deleted ifaddr, that could confuse
netlink listeners. It is not true: look, gated sees
that route deleted and if it still thinks that ifaddr
is valid, it will try to restore deleted routes... Grr.
So that, this order is correct.
*/
rtmsg_ifa(RTM_DELADDR, ifa1, nlh, portid);
blocking_notifier_call_chain(&inetaddr_chain, NETDEV_DOWN, ifa1);
if (promote) {
struct in_ifaddr *next_sec = promote->ifa_next;
if (prev_prom) {
prev_prom->ifa_next = promote->ifa_next;
promote->ifa_next = last_prim->ifa_next;
last_prim->ifa_next = promote;
}
promote->ifa_flags &= ~IFA_F_SECONDARY;
rtmsg_ifa(RTM_NEWADDR, promote, nlh, portid);
blocking_notifier_call_chain(&inetaddr_chain,
NETDEV_UP, promote);
for (ifa = next_sec; ifa; ifa = ifa->ifa_next) {
if (ifa1->ifa_mask != ifa->ifa_mask ||
!inet_ifa_match(ifa1->ifa_address, ifa))
continue;
fib_add_ifaddr(ifa);
}
}
if (destroy)
inet_free_ifa(ifa1);
}
static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
int destroy)
{
__inet_del_ifa(in_dev, ifap, destroy, NULL, 0);
}
static void check_lifetime(struct work_struct *work);
static DECLARE_DELAYED_WORK(check_lifetime_work, check_lifetime);
static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
u32 portid)
{
struct in_device *in_dev = ifa->ifa_dev;
struct in_ifaddr *ifa1, **ifap, **last_primary;
ASSERT_RTNL();
if (!ifa->ifa_local) {
inet_free_ifa(ifa);
return 0;
}
ifa->ifa_flags &= ~IFA_F_SECONDARY;
last_primary = &in_dev->ifa_list;
for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
ifap = &ifa1->ifa_next) {
if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
ifa->ifa_scope <= ifa1->ifa_scope)
last_primary = &ifa1->ifa_next;
if (ifa1->ifa_mask == ifa->ifa_mask &&
inet_ifa_match(ifa1->ifa_address, ifa)) {
if (ifa1->ifa_local == ifa->ifa_local) {
inet_free_ifa(ifa);
return -EEXIST;
}
if (ifa1->ifa_scope != ifa->ifa_scope) {
inet_free_ifa(ifa);
return -EINVAL;
}
ifa->ifa_flags |= IFA_F_SECONDARY;
}
}
if (!(ifa->ifa_flags & IFA_F_SECONDARY)) {
prandom_seed((__force u32) ifa->ifa_local);
ifap = last_primary;
}
ifa->ifa_next = *ifap;
*ifap = ifa;
inet_hash_insert(dev_net(in_dev->dev), ifa);
cancel_delayed_work(&check_lifetime_work);
queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
/* Send message first, then call notifier.
Notifier will trigger FIB update, so that
listeners of netlink will know about new ifaddr */
rtmsg_ifa(RTM_NEWADDR, ifa, nlh, portid);
blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
return 0;
}
static int inet_insert_ifa(struct in_ifaddr *ifa)
{
return __inet_insert_ifa(ifa, NULL, 0);
}
static int inet_set_ifa(struct net_device *dev, struct in_ifaddr *ifa)
{
struct in_device *in_dev = __in_dev_get_rtnl(dev);
ASSERT_RTNL();
if (!in_dev) {
inet_free_ifa(ifa);
return -ENOBUFS;
}
ipv4_devconf_setall(in_dev);
neigh_parms_data_state_setall(in_dev->arp_parms);
if (ifa->ifa_dev != in_dev) {
WARN_ON(ifa->ifa_dev);
in_dev_hold(in_dev);
ifa->ifa_dev = in_dev;
}
if (ipv4_is_loopback(ifa->ifa_local))
ifa->ifa_scope = RT_SCOPE_HOST;
return inet_insert_ifa(ifa);
}
/* Caller must hold RCU or RTNL :
* We dont take a reference on found in_device
*/
struct in_device *inetdev_by_index(struct net *net, int ifindex)
{
struct net_device *dev;
struct in_device *in_dev = NULL;
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifindex);
if (dev)
in_dev = rcu_dereference_rtnl(dev->ip_ptr);
rcu_read_unlock();
return in_dev;
}
EXPORT_SYMBOL(inetdev_by_index);
/* Called only from RTNL semaphored context. No locks. */
struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
__be32 mask)
{
ASSERT_RTNL();
for_primary_ifa(in_dev) {
if (ifa->ifa_mask == mask && inet_ifa_match(prefix, ifa))
return ifa;
} endfor_ifa(in_dev);
return NULL;
}
static int ip_mc_config(struct sock *sk, bool join, const struct in_ifaddr *ifa)
{
struct ip_mreqn mreq = {
.imr_multiaddr.s_addr = ifa->ifa_address,
.imr_ifindex = ifa->ifa_dev->dev->ifindex,
};
int ret;
ASSERT_RTNL();
lock_sock(sk);
if (join)
ret = ip_mc_join_group(sk, &mreq);
else
ret = ip_mc_leave_group(sk, &mreq);
release_sock(sk);
return ret;
}
static int inet_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct nlattr *tb[IFA_MAX+1];
struct in_device *in_dev;
struct ifaddrmsg *ifm;
struct in_ifaddr *ifa, **ifap;
int err = -EINVAL;
ASSERT_RTNL();
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
if (err < 0)
goto errout;
ifm = nlmsg_data(nlh);
in_dev = inetdev_by_index(net, ifm->ifa_index);
if (!in_dev) {
err = -ENODEV;
goto errout;
}
for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
ifap = &ifa->ifa_next) {
if (tb[IFA_LOCAL] &&
ifa->ifa_local != nla_get_in_addr(tb[IFA_LOCAL]))
continue;
if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
continue;
if (tb[IFA_ADDRESS] &&
(ifm->ifa_prefixlen != ifa->ifa_prefixlen ||
!inet_ifa_match(nla_get_in_addr(tb[IFA_ADDRESS]), ifa)))
continue;
if (ipv4_is_multicast(ifa->ifa_address))
ip_mc_config(net->ipv4.mc_autojoin_sk, false, ifa);
__inet_del_ifa(in_dev, ifap, 1, nlh, NETLINK_CB(skb).portid);
return 0;
}
err = -EADDRNOTAVAIL;
errout:
return err;
}
#define INFINITY_LIFE_TIME 0xFFFFFFFF
static void check_lifetime(struct work_struct *work)
{
unsigned long now, next, next_sec, next_sched;
struct in_ifaddr *ifa;
struct hlist_node *n;
int i;
now = jiffies;
next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
for (i = 0; i < IN4_ADDR_HSIZE; i++) {
bool change_needed = false;
rcu_read_lock();
hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {
unsigned long age;
if (ifa->ifa_flags & IFA_F_PERMANENT)
continue;
/* We try to batch several events at once. */
age = (now - ifa->ifa_tstamp +
ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
age >= ifa->ifa_valid_lft) {
change_needed = true;
} else if (ifa->ifa_preferred_lft ==
INFINITY_LIFE_TIME) {
continue;
} else if (age >= ifa->ifa_preferred_lft) {
if (time_before(ifa->ifa_tstamp +
ifa->ifa_valid_lft * HZ, next))
next = ifa->ifa_tstamp +
ifa->ifa_valid_lft * HZ;
if (!(ifa->ifa_flags & IFA_F_DEPRECATED))
change_needed = true;
} else if (time_before(ifa->ifa_tstamp +
ifa->ifa_preferred_lft * HZ,
next)) {
next = ifa->ifa_tstamp +
ifa->ifa_preferred_lft * HZ;
}
}
rcu_read_unlock();
if (!change_needed)
continue;
rtnl_lock();
hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) {
unsigned long age;
if (ifa->ifa_flags & IFA_F_PERMANENT)
continue;
/* We try to batch several events at once. */
age = (now - ifa->ifa_tstamp +
ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&
age >= ifa->ifa_valid_lft) {
struct in_ifaddr **ifap;
for (ifap = &ifa->ifa_dev->ifa_list;
*ifap != NULL; ifap = &(*ifap)->ifa_next) {
if (*ifap == ifa) {
inet_del_ifa(ifa->ifa_dev,
ifap, 1);
break;
}
}
} else if (ifa->ifa_preferred_lft !=
INFINITY_LIFE_TIME &&
age >= ifa->ifa_preferred_lft &&
!(ifa->ifa_flags & IFA_F_DEPRECATED)) {
ifa->ifa_flags |= IFA_F_DEPRECATED;
rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
}
}
rtnl_unlock();
}
next_sec = round_jiffies_up(next);
next_sched = next;
/* If rounded timeout is accurate enough, accept it. */
if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
next_sched = next_sec;
now = jiffies;
/* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
if (time_before(next_sched, now + ADDRCONF_TIMER_FUZZ_MAX))
next_sched = now + ADDRCONF_TIMER_FUZZ_MAX;
queue_delayed_work(system_power_efficient_wq, &check_lifetime_work,
next_sched - now);
}
static void set_ifa_lifetime(struct in_ifaddr *ifa, __u32 valid_lft,
__u32 prefered_lft)
{
unsigned long timeout;
ifa->ifa_flags &= ~(IFA_F_PERMANENT | IFA_F_DEPRECATED);
timeout = addrconf_timeout_fixup(valid_lft, HZ);
if (addrconf_finite_timeout(timeout))
ifa->ifa_valid_lft = timeout;
else
ifa->ifa_flags |= IFA_F_PERMANENT;
timeout = addrconf_timeout_fixup(prefered_lft, HZ);
if (addrconf_finite_timeout(timeout)) {
if (timeout == 0)
ifa->ifa_flags |= IFA_F_DEPRECATED;
ifa->ifa_preferred_lft = timeout;
}
ifa->ifa_tstamp = jiffies;
if (!ifa->ifa_cstamp)
ifa->ifa_cstamp = ifa->ifa_tstamp;
}
static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
__u32 *pvalid_lft, __u32 *pprefered_lft)
{
struct nlattr *tb[IFA_MAX+1];
struct in_ifaddr *ifa;
struct ifaddrmsg *ifm;
struct net_device *dev;
struct in_device *in_dev;
int err;
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv4_policy);
if (err < 0)
goto errout;
ifm = nlmsg_data(nlh);
err = -EINVAL;
if (ifm->ifa_prefixlen > 32 || !tb[IFA_LOCAL])
goto errout;
dev = __dev_get_by_index(net, ifm->ifa_index);
err = -ENODEV;
if (!dev)
goto errout;
in_dev = __in_dev_get_rtnl(dev);
err = -ENOBUFS;
if (!in_dev)
goto errout;
ifa = inet_alloc_ifa();
if (!ifa)
/*
* A potential indev allocation can be left alive, it stays
* assigned to its device and is destroy with it.
*/
goto errout;
ipv4_devconf_setall(in_dev);
neigh_parms_data_state_setall(in_dev->arp_parms);
in_dev_hold(in_dev);
if (!tb[IFA_ADDRESS])
tb[IFA_ADDRESS] = tb[IFA_LOCAL];
INIT_HLIST_NODE(&ifa->hash);
ifa->ifa_prefixlen = ifm->ifa_prefixlen;
ifa->ifa_mask = inet_make_mask(ifm->ifa_prefixlen);
ifa->ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) :
ifm->ifa_flags;
ifa->ifa_scope = ifm->ifa_scope;
ifa->ifa_dev = in_dev;
ifa->ifa_local = nla_get_in_addr(tb[IFA_LOCAL]);
ifa->ifa_address = nla_get_in_addr(tb[IFA_ADDRESS]);
if (tb[IFA_BROADCAST])
ifa->ifa_broadcast = nla_get_in_addr(tb[IFA_BROADCAST]);
if (tb[IFA_LABEL])
nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
else
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
if (tb[IFA_CACHEINFO]) {
struct ifa_cacheinfo *ci;
ci = nla_data(tb[IFA_CACHEINFO]);
if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
err = -EINVAL;
goto errout_free;
}
*pvalid_lft = ci->ifa_valid;
*pprefered_lft = ci->ifa_prefered;
}
return ifa;
errout_free:
inet_free_ifa(ifa);
errout:
return ERR_PTR(err);
}
static struct in_ifaddr *find_matching_ifa(struct in_ifaddr *ifa)
{
struct in_device *in_dev = ifa->ifa_dev;
struct in_ifaddr *ifa1, **ifap;
if (!ifa->ifa_local)
return NULL;
for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
ifap = &ifa1->ifa_next) {
if (ifa1->ifa_mask == ifa->ifa_mask &&
inet_ifa_match(ifa1->ifa_address, ifa) &&
ifa1->ifa_local == ifa->ifa_local)
return ifa1;
}
return NULL;
}
static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct in_ifaddr *ifa;
struct in_ifaddr *ifa_existing;
__u32 valid_lft = INFINITY_LIFE_TIME;
__u32 prefered_lft = INFINITY_LIFE_TIME;
ASSERT_RTNL();
ifa = rtm_to_ifaddr(net, nlh, &valid_lft, &prefered_lft);
if (IS_ERR(ifa))
return PTR_ERR(ifa);
ifa_existing = find_matching_ifa(ifa);
if (!ifa_existing) {
/* It would be best to check for !NLM_F_CREATE here but
* userspace already relies on not having to provide this.
*/
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
if (ifa->ifa_flags & IFA_F_MCAUTOJOIN) {
int ret = ip_mc_config(net->ipv4.mc_autojoin_sk,
true, ifa);
if (ret < 0) {
inet_free_ifa(ifa);
return ret;
}
}
return __inet_insert_ifa(ifa, nlh, NETLINK_CB(skb).portid);
} else {
inet_free_ifa(ifa);
if (nlh->nlmsg_flags & NLM_F_EXCL ||
!(nlh->nlmsg_flags & NLM_F_REPLACE))
return -EEXIST;
ifa = ifa_existing;
set_ifa_lifetime(ifa, valid_lft, prefered_lft);
cancel_delayed_work(&check_lifetime_work);
queue_delayed_work(system_power_efficient_wq,
&check_lifetime_work, 0);
rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
}
return 0;
}
/*
* Determine a default network mask, based on the IP address.
*/
static int inet_abc_len(__be32 addr)
{
int rc = -1; /* Something else, probably a multicast. */
if (ipv4_is_zeronet(addr))
rc = 0;
else {
__u32 haddr = ntohl(addr);
if (IN_CLASSA(haddr))
rc = 8;
else if (IN_CLASSB(haddr))
rc = 16;
else if (IN_CLASSC(haddr))
rc = 24;
}
return rc;
}
int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
{
struct ifreq ifr;
struct sockaddr_in sin_orig;
struct sockaddr_in *sin = (struct sockaddr_in *)&ifr.ifr_addr;
struct in_device *in_dev;
struct in_ifaddr **ifap = NULL;
struct in_ifaddr *ifa = NULL;
struct net_device *dev;
char *colon;
int ret = -EFAULT;
int tryaddrmatch = 0;
/*
* Fetch the caller's info block into kernel space
*/
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
goto out;
ifr.ifr_name[IFNAMSIZ - 1] = 0;
/* save original address for comparison */
memcpy(&sin_orig, sin, sizeof(*sin));
colon = strchr(ifr.ifr_name, ':');
if (colon)
*colon = 0;
dev_load(net, ifr.ifr_name);
switch (cmd) {
case SIOCGIFADDR: /* Get interface address */
case SIOCGIFBRDADDR: /* Get the broadcast address */
case SIOCGIFDSTADDR: /* Get the destination address */
case SIOCGIFNETMASK: /* Get the netmask for the interface */
/* Note that these ioctls will not sleep,
so that we do not impose a lock.
One day we will be forced to put shlock here (I mean SMP)
*/
tryaddrmatch = (sin_orig.sin_family == AF_INET);
memset(sin, 0, sizeof(*sin));
sin->sin_family = AF_INET;
break;
case SIOCSIFFLAGS:
ret = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
goto out;
break;
case SIOCSIFADDR: /* Set interface address (and family) */
case SIOCSIFBRDADDR: /* Set the broadcast address */
case SIOCSIFDSTADDR: /* Set the destination address */
case SIOCSIFNETMASK: /* Set the netmask for the interface */
ret = -EPERM;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
goto out;
ret = -EINVAL;
if (sin->sin_family != AF_INET)
goto out;
break;
default:
ret = -EINVAL;
goto out;
}
rtnl_lock();
ret = -ENODEV;
dev = __dev_get_by_name(net, ifr.ifr_name);
if (!dev)
goto done;
if (colon)
*colon = ':';
in_dev = __in_dev_get_rtnl(dev);
if (in_dev) {
if (tryaddrmatch) {
/* Matthias Andree */
/* compare label and address (4.4BSD style) */
/* note: we only do this for a limited set of ioctls
and only if the original address family was AF_INET.
This is checked above. */
for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
ifap = &ifa->ifa_next) {
if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
sin_orig.sin_addr.s_addr ==
ifa->ifa_local) {
break; /* found */
}
}
}
/* we didn't get a match, maybe the application is
4.3BSD-style and passed in junk so we fall back to
comparing just the label */
if (!ifa) {
for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
ifap = &ifa->ifa_next)
if (!strcmp(ifr.ifr_name, ifa->ifa_label))
break;
}
}
ret = -EADDRNOTAVAIL;
if (!ifa && cmd != SIOCSIFADDR && cmd != SIOCSIFFLAGS)
goto done;
switch (cmd) {
case SIOCGIFADDR: /* Get interface address */
sin->sin_addr.s_addr = ifa->ifa_local;
goto rarok;
case SIOCGIFBRDADDR: /* Get the broadcast address */
sin->sin_addr.s_addr = ifa->ifa_broadcast;
goto rarok;
case SIOCGIFDSTADDR: /* Get the destination address */
sin->sin_addr.s_addr = ifa->ifa_address;
goto rarok;
case SIOCGIFNETMASK: /* Get the netmask for the interface */
sin->sin_addr.s_addr = ifa->ifa_mask;
goto rarok;
case SIOCSIFFLAGS:
if (colon) {
ret = -EADDRNOTAVAIL;
if (!ifa)
break;
ret = 0;
if (!(ifr.ifr_flags & IFF_UP))
inet_del_ifa(in_dev, ifap, 1);
break;
}
ret = dev_change_flags(dev, ifr.ifr_flags);
break;
case SIOCSIFADDR: /* Set interface address (and family) */
ret = -EINVAL;
if (inet_abc_len(sin->sin_addr.s_addr) < 0)
break;
if (!ifa) {
ret = -ENOBUFS;
ifa = inet_alloc_ifa();
if (!ifa)
break;
INIT_HLIST_NODE(&ifa->hash);
if (colon)
memcpy(ifa->ifa_label, ifr.ifr_name, IFNAMSIZ);
else
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
} else {
ret = 0;
if (ifa->ifa_local == sin->sin_addr.s_addr)
break;
inet_del_ifa(in_dev, ifap, 0);
ifa->ifa_broadcast = 0;
ifa->ifa_scope = 0;
}
ifa->ifa_address = ifa->ifa_local = sin->sin_addr.s_addr;
if (!(dev->flags & IFF_POINTOPOINT)) {
ifa->ifa_prefixlen = inet_abc_len(ifa->ifa_address);
ifa->ifa_mask = inet_make_mask(ifa->ifa_prefixlen);
if ((dev->flags & IFF_BROADCAST) &&
ifa->ifa_prefixlen < 31)
ifa->ifa_broadcast = ifa->ifa_address |
~ifa->ifa_mask;
} else {
ifa->ifa_prefixlen = 32;
ifa->ifa_mask = inet_make_mask(32);
}
set_ifa_lifetime(ifa, INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
ret = inet_set_ifa(dev, ifa);
break;
case SIOCSIFBRDADDR: /* Set the broadcast address */
ret = 0;
if (ifa->ifa_broadcast != sin->sin_addr.s_addr) {
inet_del_ifa(in_dev, ifap, 0);
ifa->ifa_broadcast = sin->sin_addr.s_addr;
inet_insert_ifa(ifa);
}
break;
case SIOCSIFDSTADDR: /* Set the destination address */
ret = 0;
if (ifa->ifa_address == sin->sin_addr.s_addr)
break;
ret = -EINVAL;
if (inet_abc_len(sin->sin_addr.s_addr) < 0)
break;
ret = 0;
inet_del_ifa(in_dev, ifap, 0);
ifa->ifa_address = sin->sin_addr.s_addr;
inet_insert_ifa(ifa);
break;
case SIOCSIFNETMASK: /* Set the netmask for the interface */
/*
* The mask we set must be legal.
*/
ret = -EINVAL;
if (bad_mask(sin->sin_addr.s_addr, 0))
break;
ret = 0;
if (ifa->ifa_mask != sin->sin_addr.s_addr) {
__be32 old_mask = ifa->ifa_mask;
inet_del_ifa(in_dev, ifap, 0);
ifa->ifa_mask = sin->sin_addr.s_addr;
ifa->ifa_prefixlen = inet_mask_len(ifa->ifa_mask);
/* See if current broadcast address matches
* with current netmask, then recalculate
* the broadcast address. Otherwise it's a
* funny address, so don't touch it since
* the user seems to know what (s)he's doing...
*/
if ((dev->flags & IFF_BROADCAST) &&
(ifa->ifa_prefixlen < 31) &&
(ifa->ifa_broadcast ==
(ifa->ifa_local|~old_mask))) {
ifa->ifa_broadcast = (ifa->ifa_local |
~sin->sin_addr.s_addr);
}
inet_insert_ifa(ifa);
}
break;
}
done:
rtnl_unlock();
out:
return ret;
rarok:
rtnl_unlock();
ret = copy_to_user(arg, &ifr, sizeof(struct ifreq)) ? -EFAULT : 0;
goto out;
}
static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
{
struct in_device *in_dev = __in_dev_get_rtnl(dev);
struct in_ifaddr *ifa;
struct ifreq ifr;
int done = 0;
if (!in_dev)
goto out;
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
if (!buf) {
done += sizeof(ifr);
continue;
}
if (len < (int) sizeof(ifr))
break;
memset(&ifr, 0, sizeof(struct ifreq));
strcpy(ifr.ifr_name, ifa->ifa_label);
(*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
(*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
ifa->ifa_local;
if (copy_to_user(buf, &ifr, sizeof(struct ifreq))) {
done = -EFAULT;
break;
}
buf += sizeof(struct ifreq);
len -= sizeof(struct ifreq);
done += sizeof(struct ifreq);
}
out:
return done;
}
__be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope)
{
__be32 addr = 0;
struct in_device *in_dev;
struct net *net = dev_net(dev);
int master_idx;
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
goto no_in_dev;
for_primary_ifa(in_dev) {
if (ifa->ifa_scope > scope)
continue;
if (!dst || inet_ifa_match(dst, ifa)) {
addr = ifa->ifa_local;
break;
}
if (!addr)
addr = ifa->ifa_local;
} endfor_ifa(in_dev);
if (addr)
goto out_unlock;
no_in_dev:
master_idx = l3mdev_master_ifindex_rcu(dev);
/* For VRFs, the VRF device takes the place of the loopback device,
* with addresses on it being preferred. Note in such cases the
* loopback device will be among the devices that fail the master_idx
* equality check in the loop below.
*/
if (master_idx &&
(dev = dev_get_by_index_rcu(net, master_idx)) &&
(in_dev = __in_dev_get_rcu(dev))) {
for_primary_ifa(in_dev) {
if (ifa->ifa_scope != RT_SCOPE_LINK &&
ifa->ifa_scope <= scope) {
addr = ifa->ifa_local;
goto out_unlock;
}
} endfor_ifa(in_dev);
}
/* Not loopback addresses on loopback should be preferred
in this case. It is important that lo is the first interface
in dev_base list.
*/
for_each_netdev_rcu(net, dev) {
if (l3mdev_master_ifindex_rcu(dev) != master_idx)
continue;
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
continue;
for_primary_ifa(in_dev) {
if (ifa->ifa_scope != RT_SCOPE_LINK &&
ifa->ifa_scope <= scope) {
addr = ifa->ifa_local;
goto out_unlock;
}
} endfor_ifa(in_dev);
}
out_unlock:
rcu_read_unlock();
return addr;
}
EXPORT_SYMBOL(inet_select_addr);
static __be32 confirm_addr_indev(struct in_device *in_dev, __be32 dst,
__be32 local, int scope)
{
int same = 0;
__be32 addr = 0;
for_ifa(in_dev) {
if (!addr &&
(local == ifa->ifa_local || !local) &&
ifa->ifa_scope <= scope) {
addr = ifa->ifa_local;
if (same)
break;
}
if (!same) {
same = (!local || inet_ifa_match(local, ifa)) &&
(!dst || inet_ifa_match(dst, ifa));
if (same && addr) {
if (local || !dst)
break;
/* Is the selected addr into dst subnet? */
if (inet_ifa_match(addr, ifa))
break;
/* No, then can we use new local src? */
if (ifa->ifa_scope <= scope) {
addr = ifa->ifa_local;
break;
}
/* search for large dst subnet for addr */
same = 0;
}
}
} endfor_ifa(in_dev);
return same ? addr : 0;
}
/*
* Confirm that local IP address exists using wildcards:
* - net: netns to check, cannot be NULL
* - in_dev: only on this interface, NULL=any interface
* - dst: only in the same subnet as dst, 0=any dst
* - local: address, 0=autoselect the local address
* - scope: maximum allowed scope value for the local address
*/
__be32 inet_confirm_addr(struct net *net, struct in_device *in_dev,
__be32 dst, __be32 local, int scope)
{
__be32 addr = 0;
struct net_device *dev;
if (in_dev)
return confirm_addr_indev(in_dev, dst, local, scope);
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
addr = confirm_addr_indev(in_dev, dst, local, scope);
if (addr)
break;
}
}
rcu_read_unlock();
return addr;
}
EXPORT_SYMBOL(inet_confirm_addr);
/*
* Device notifier
*/
int register_inetaddr_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&inetaddr_chain, nb);
}
EXPORT_SYMBOL(register_inetaddr_notifier);
int unregister_inetaddr_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&inetaddr_chain, nb);
}
EXPORT_SYMBOL(unregister_inetaddr_notifier);
/* Rename ifa_labels for a device name change. Make some effort to preserve
* existing alias numbering and to create unique labels if possible.
*/
static void inetdev_changename(struct net_device *dev, struct in_device *in_dev)
{
struct in_ifaddr *ifa;
int named = 0;
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
char old[IFNAMSIZ], *dot;
memcpy(old, ifa->ifa_label, IFNAMSIZ);
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
if (named++ == 0)
goto skip;
dot = strchr(old, ':');
if (!dot) {
sprintf(old, ":%d", named);
dot = old;
}
if (strlen(dot) + strlen(dev->name) < IFNAMSIZ)
strcat(ifa->ifa_label, dot);
else
strcpy(ifa->ifa_label + (IFNAMSIZ - strlen(dot) - 1), dot);
skip:
rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0);
}
}
static bool inetdev_valid_mtu(unsigned int mtu)
{
return mtu >= 68;
}
static void inetdev_send_gratuitous_arp(struct net_device *dev,
struct in_device *in_dev)
{
struct in_ifaddr *ifa;
for (ifa = in_dev->ifa_list; ifa;
ifa = ifa->ifa_next) {
arp_send(ARPOP_REQUEST, ETH_P_ARP,
ifa->ifa_local, dev,
ifa->ifa_local, NULL,
dev->dev_addr, NULL);
}
}
/* Called only under RTNL semaphore */
static int inetdev_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct in_device *in_dev = __in_dev_get_rtnl(dev);
ASSERT_RTNL();
if (!in_dev) {
if (event == NETDEV_REGISTER) {
in_dev = inetdev_init(dev);
if (IS_ERR(in_dev))
return notifier_from_errno(PTR_ERR(in_dev));
if (dev->flags & IFF_LOOPBACK) {
IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
}
} else if (event == NETDEV_CHANGEMTU) {
/* Re-enabling IP */
if (inetdev_valid_mtu(dev->mtu))
in_dev = inetdev_init(dev);
}
goto out;
}
switch (event) {
case NETDEV_REGISTER:
pr_debug("%s: bug\n", __func__);
RCU_INIT_POINTER(dev->ip_ptr, NULL);
break;
case NETDEV_UP:
if (!inetdev_valid_mtu(dev->mtu))
break;
if (dev->flags & IFF_LOOPBACK) {
struct in_ifaddr *ifa = inet_alloc_ifa();
if (ifa) {
INIT_HLIST_NODE(&ifa->hash);
ifa->ifa_local =
ifa->ifa_address = htonl(INADDR_LOOPBACK);
ifa->ifa_prefixlen = 8;
ifa->ifa_mask = inet_make_mask(8);
in_dev_hold(in_dev);
ifa->ifa_dev = in_dev;
ifa->ifa_scope = RT_SCOPE_HOST;
memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
set_ifa_lifetime(ifa, INFINITY_LIFE_TIME,
INFINITY_LIFE_TIME);
ipv4_devconf_setall(in_dev);
neigh_parms_data_state_setall(in_dev->arp_parms);
inet_insert_ifa(ifa);
}
}
ip_mc_up(in_dev);
/* fall through */
case NETDEV_CHANGEADDR:
if (!IN_DEV_ARP_NOTIFY(in_dev))
break;
/* fall through */
case NETDEV_NOTIFY_PEERS:
/* Send gratuitous ARP to notify of link change */
inetdev_send_gratuitous_arp(dev, in_dev);
break;
case NETDEV_DOWN:
ip_mc_down(in_dev);
break;
case NETDEV_PRE_TYPE_CHANGE:
ip_mc_unmap(in_dev);
break;
case NETDEV_POST_TYPE_CHANGE:
ip_mc_remap(in_dev);
break;
case NETDEV_CHANGEMTU:
if (inetdev_valid_mtu(dev->mtu))
break;
/* disable IP when MTU is not enough */
case NETDEV_UNREGISTER:
inetdev_destroy(in_dev);
break;
case NETDEV_CHANGENAME:
/* Do not notify about label change, this event is
* not interesting to applications using netlink.
*/
inetdev_changename(dev, in_dev);
devinet_sysctl_unregister(in_dev);
devinet_sysctl_register(in_dev);
break;
}
out:
return NOTIFY_DONE;
}
static struct notifier_block ip_netdev_notifier = {
.notifier_call = inetdev_event,
};
static size_t inet_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
+ nla_total_size(4) /* IFA_ADDRESS */
+ nla_total_size(4) /* IFA_LOCAL */
+ nla_total_size(4) /* IFA_BROADCAST */
+ nla_total_size(IFNAMSIZ) /* IFA_LABEL */
+ nla_total_size(4) /* IFA_FLAGS */
+ nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
}
static inline u32 cstamp_delta(unsigned long cstamp)
{
return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
}
static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
unsigned long tstamp, u32 preferred, u32 valid)
{
struct ifa_cacheinfo ci;
ci.cstamp = cstamp_delta(cstamp);
ci.tstamp = cstamp_delta(tstamp);
ci.ifa_prefered = preferred;
ci.ifa_valid = valid;
return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
}
static int inet_fill_ifaddr(struct sk_buff *skb, struct in_ifaddr *ifa,
u32 portid, u32 seq, int event, unsigned int flags)
{
struct ifaddrmsg *ifm;
struct nlmsghdr *nlh;
u32 preferred, valid;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), flags);
if (!nlh)
return -EMSGSIZE;
ifm = nlmsg_data(nlh);
ifm->ifa_family = AF_INET;
ifm->ifa_prefixlen = ifa->ifa_prefixlen;
ifm->ifa_flags = ifa->ifa_flags;
ifm->ifa_scope = ifa->ifa_scope;
ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
preferred = ifa->ifa_preferred_lft;
valid = ifa->ifa_valid_lft;
if (preferred != INFINITY_LIFE_TIME) {
long tval = (jiffies - ifa->ifa_tstamp) / HZ;
if (preferred > tval)
preferred -= tval;
else
preferred = 0;
if (valid != INFINITY_LIFE_TIME) {
if (valid > tval)
valid -= tval;
else
valid = 0;
}
}
} else {
preferred = INFINITY_LIFE_TIME;
valid = INFINITY_LIFE_TIME;
}
if ((ifa->ifa_address &&
nla_put_in_addr(skb, IFA_ADDRESS, ifa->ifa_address)) ||
(ifa->ifa_local &&
nla_put_in_addr(skb, IFA_LOCAL, ifa->ifa_local)) ||
(ifa->ifa_broadcast &&
nla_put_in_addr(skb, IFA_BROADCAST, ifa->ifa_broadcast)) ||
(ifa->ifa_label[0] &&
nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
nla_put_u32(skb, IFA_FLAGS, ifa->ifa_flags) ||
put_cacheinfo(skb, ifa->ifa_cstamp, ifa->ifa_tstamp,
preferred, valid))
goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
int h, s_h;
int idx, s_idx;
int ip_idx, s_ip_idx;
struct net_device *dev;
struct in_device *in_dev;
struct in_ifaddr *ifa;
struct hlist_head *head;
s_h = cb->args[0];
s_idx = idx = cb->args[1];
s_ip_idx = ip_idx = cb->args[2];
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &net->dev_index_head[h];
rcu_read_lock();
cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
net->dev_base_seq;
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
if (h > s_h || idx > s_idx)
s_ip_idx = 0;
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
goto cont;
for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
ifa = ifa->ifa_next, ip_idx++) {
if (ip_idx < s_ip_idx)
continue;
if (inet_fill_ifaddr(skb, ifa,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWADDR, NLM_F_MULTI) < 0) {
rcu_read_unlock();
goto done;
}
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
}
cont:
idx++;
}
rcu_read_unlock();
}
done:
cb->args[0] = h;
cb->args[1] = idx;
cb->args[2] = ip_idx;
return skb->len;
}
static void rtmsg_ifa(int event, struct in_ifaddr *ifa, struct nlmsghdr *nlh,
u32 portid)
{
struct sk_buff *skb;
u32 seq = nlh ? nlh->nlmsg_seq : 0;
int err = -ENOBUFS;
struct net *net;
net = dev_net(ifa->ifa_dev->dev);
skb = nlmsg_new(inet_nlmsg_size(), GFP_KERNEL);
if (!skb)
goto errout;
err = inet_fill_ifaddr(skb, ifa, portid, seq, event, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in inet_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, portid, RTNLGRP_IPV4_IFADDR, nlh, GFP_KERNEL);
return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
}
static size_t inet_get_link_af_size(const struct net_device *dev,
u32 ext_filter_mask)
{
struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
if (!in_dev)
return 0;
return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
}
static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
u32 ext_filter_mask)
{
struct in_device *in_dev = rcu_dereference_rtnl(dev->ip_ptr);
struct nlattr *nla;
int i;
if (!in_dev)
return -ENODATA;
nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
if (!nla)
return -EMSGSIZE;
for (i = 0; i < IPV4_DEVCONF_MAX; i++)
((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
return 0;
}
static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
[IFLA_INET_CONF] = { .type = NLA_NESTED },
};
static int inet_validate_link_af(const struct net_device *dev,
const struct nlattr *nla)
{
struct nlattr *a, *tb[IFLA_INET_MAX+1];
int err, rem;
if (dev && !__in_dev_get_rtnl(dev))
return -EAFNOSUPPORT;
err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
if (err < 0)
return err;
if (tb[IFLA_INET_CONF]) {
nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
int cfgid = nla_type(a);
if (nla_len(a) < 4)
return -EINVAL;
if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
return -EINVAL;
}
}
return 0;
}
static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
{
struct in_device *in_dev = __in_dev_get_rtnl(dev);
struct nlattr *a, *tb[IFLA_INET_MAX+1];
int rem;
if (!in_dev)
return -EAFNOSUPPORT;
if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
BUG();
if (tb[IFLA_INET_CONF]) {
nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
}
return 0;
}
static int inet_netconf_msgsize_devconf(int type)
{
int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
+ nla_total_size(4); /* NETCONFA_IFINDEX */
bool all = false;
if (type == NETCONFA_ALL)
all = true;
if (all || type == NETCONFA_FORWARDING)
size += nla_total_size(4);
if (all || type == NETCONFA_RP_FILTER)
size += nla_total_size(4);
if (all || type == NETCONFA_MC_FORWARDING)
size += nla_total_size(4);
if (all || type == NETCONFA_PROXY_NEIGH)
size += nla_total_size(4);
if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
size += nla_total_size(4);
return size;
}
static int inet_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
struct ipv4_devconf *devconf, u32 portid,
u32 seq, int event, unsigned int flags,
int type)
{
struct nlmsghdr *nlh;
struct netconfmsg *ncm;
bool all = false;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
flags);
if (!nlh)
return -EMSGSIZE;
if (type == NETCONFA_ALL)
all = true;
ncm = nlmsg_data(nlh);
ncm->ncm_family = AF_INET;
if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_FORWARDING) &&
nla_put_s32(skb, NETCONFA_FORWARDING,
IPV4_DEVCONF(*devconf, FORWARDING)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_RP_FILTER) &&
nla_put_s32(skb, NETCONFA_RP_FILTER,
IPV4_DEVCONF(*devconf, RP_FILTER)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_MC_FORWARDING) &&
nla_put_s32(skb, NETCONFA_MC_FORWARDING,
IPV4_DEVCONF(*devconf, MC_FORWARDING)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_PROXY_NEIGH) &&
nla_put_s32(skb, NETCONFA_PROXY_NEIGH,
IPV4_DEVCONF(*devconf, PROXY_ARP)) < 0)
goto nla_put_failure;
if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
IPV4_DEVCONF(*devconf, IGNORE_ROUTES_WITH_LINKDOWN)) < 0)
goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
struct ipv4_devconf *devconf)
{
struct sk_buff *skb;
int err = -ENOBUFS;
skb = nlmsg_new(inet_netconf_msgsize_devconf(type), GFP_ATOMIC);
if (!skb)
goto errout;
err = inet_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
RTM_NEWNETCONF, 0, type);
if (err < 0) {
/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_IPV4_NETCONF, NULL, GFP_ATOMIC);
return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_IPV4_NETCONF, err);
}
static const struct nla_policy devconf_ipv4_policy[NETCONFA_MAX+1] = {
[NETCONFA_IFINDEX] = { .len = sizeof(int) },
[NETCONFA_FORWARDING] = { .len = sizeof(int) },
[NETCONFA_RP_FILTER] = { .len = sizeof(int) },
[NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
[NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
};
static int inet_netconf_get_devconf(struct sk_buff *in_skb,
struct nlmsghdr *nlh)
{
struct net *net = sock_net(in_skb->sk);
struct nlattr *tb[NETCONFA_MAX+1];
struct netconfmsg *ncm;
struct sk_buff *skb;
struct ipv4_devconf *devconf;
struct in_device *in_dev;
struct net_device *dev;
int ifindex;
int err;
err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
devconf_ipv4_policy);
if (err < 0)
goto errout;
err = -EINVAL;
if (!tb[NETCONFA_IFINDEX])
goto errout;
ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
switch (ifindex) {
case NETCONFA_IFINDEX_ALL:
devconf = net->ipv4.devconf_all;
break;
case NETCONFA_IFINDEX_DEFAULT:
devconf = net->ipv4.devconf_dflt;
break;
default:
dev = __dev_get_by_index(net, ifindex);
if (!dev)
goto errout;
in_dev = __in_dev_get_rtnl(dev);
if (!in_dev)
goto errout;
devconf = &in_dev->cnf;
break;
}
err = -ENOBUFS;
skb = nlmsg_new(inet_netconf_msgsize_devconf(NETCONFA_ALL), GFP_ATOMIC);
if (!skb)
goto errout;
err = inet_netconf_fill_devconf(skb, ifindex, devconf,
NETLINK_CB(in_skb).portid,
nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
NETCONFA_ALL);
if (err < 0) {
/* -EMSGSIZE implies BUG in inet_netconf_msgsize_devconf() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
errout:
return err;
}
static int inet_netconf_dump_devconf(struct sk_buff *skb,
struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
int h, s_h;
int idx, s_idx;
struct net_device *dev;
struct in_device *in_dev;
struct hlist_head *head;
s_h = cb->args[0];
s_idx = idx = cb->args[1];
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &net->dev_index_head[h];
rcu_read_lock();
cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
net->dev_base_seq;
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
goto cont;
if (inet_netconf_fill_devconf(skb, dev->ifindex,
&in_dev->cnf,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNETCONF,
NLM_F_MULTI,
NETCONFA_ALL) < 0) {
rcu_read_unlock();
goto done;
}
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
rcu_read_unlock();
}
if (h == NETDEV_HASHENTRIES) {
if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
net->ipv4.devconf_all,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNETCONF, NLM_F_MULTI,
NETCONFA_ALL) < 0)
goto done;
else
h++;
}
if (h == NETDEV_HASHENTRIES + 1) {
if (inet_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
net->ipv4.devconf_dflt,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
RTM_NEWNETCONF, NLM_F_MULTI,
NETCONFA_ALL) < 0)
goto done;
else
h++;
}
done:
cb->args[0] = h;
cb->args[1] = idx;
return skb->len;
}
#ifdef CONFIG_SYSCTL
static void devinet_copy_dflt_conf(struct net *net, int i)
{
struct net_device *dev;
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
struct in_device *in_dev;
in_dev = __in_dev_get_rcu(dev);
if (in_dev && !test_bit(i, in_dev->cnf.state))
in_dev->cnf.data[i] = net->ipv4.devconf_dflt->data[i];
}
rcu_read_unlock();
}
/* called with RTNL locked */
static void inet_forward_change(struct net *net)
{
struct net_device *dev;
int on = IPV4_DEVCONF_ALL(net, FORWARDING);
IPV4_DEVCONF_ALL(net, ACCEPT_REDIRECTS) = !on;
IPV4_DEVCONF_DFLT(net, FORWARDING) = on;
inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
NETCONFA_IFINDEX_ALL,
net->ipv4.devconf_all);
inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
NETCONFA_IFINDEX_DEFAULT,
net->ipv4.devconf_dflt);
for_each_netdev(net, dev) {
struct in_device *in_dev;
if (on)
dev_disable_lro(dev);
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev) {
IN_DEV_CONF_SET(in_dev, FORWARDING, on);
inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
dev->ifindex, &in_dev->cnf);
}
rcu_read_unlock();
}
}
static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
{
if (cnf == net->ipv4.devconf_dflt)
return NETCONFA_IFINDEX_DEFAULT;
else if (cnf == net->ipv4.devconf_all)
return NETCONFA_IFINDEX_ALL;
else {
struct in_device *idev
= container_of(cnf, struct in_device, cnf);
return idev->dev->ifindex;
}
}
static int devinet_conf_proc(struct ctl_table *ctl, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
int old_value = *(int *)ctl->data;
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
int new_value = *(int *)ctl->data;
if (write) {
struct ipv4_devconf *cnf = ctl->extra1;
struct net *net = ctl->extra2;
int i = (int *)ctl->data - cnf->data;
int ifindex;
set_bit(i, cnf->state);
if (cnf == net->ipv4.devconf_dflt)
devinet_copy_dflt_conf(net, i);
if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 ||
i == IPV4_DEVCONF_ROUTE_LOCALNET - 1)
if ((new_value == 0) && (old_value != 0))
rt_cache_flush(net);
if (i == IPV4_DEVCONF_RP_FILTER - 1 &&
new_value != old_value) {
ifindex = devinet_conf_ifindex(net, cnf);
inet_netconf_notify_devconf(net, NETCONFA_RP_FILTER,
ifindex, cnf);
}
if (i == IPV4_DEVCONF_PROXY_ARP - 1 &&
new_value != old_value) {
ifindex = devinet_conf_ifindex(net, cnf);
inet_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
ifindex, cnf);
}
if (i == IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN - 1 &&
new_value != old_value) {
ifindex = devinet_conf_ifindex(net, cnf);
inet_netconf_notify_devconf(net, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
ifindex, cnf);
}
}
return ret;
}
static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
int val = *valp;
loff_t pos = *ppos;
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
if (write && *valp != val) {
struct net *net = ctl->extra2;
if (valp != &IPV4_DEVCONF_DFLT(net, FORWARDING)) {
if (!rtnl_trylock()) {
/* Restore the original values before restarting */
*valp = val;
*ppos = pos;
return restart_syscall();
}
if (valp == &IPV4_DEVCONF_ALL(net, FORWARDING)) {
inet_forward_change(net);
} else {
struct ipv4_devconf *cnf = ctl->extra1;
struct in_device *idev =
container_of(cnf, struct in_device, cnf);
if (*valp)
dev_disable_lro(idev->dev);
inet_netconf_notify_devconf(net,
NETCONFA_FORWARDING,
idev->dev->ifindex,
cnf);
}
rtnl_unlock();
rt_cache_flush(net);
} else
inet_netconf_notify_devconf(net, NETCONFA_FORWARDING,
NETCONFA_IFINDEX_DEFAULT,
net->ipv4.devconf_dflt);
}
return ret;
}
static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
int *valp = ctl->data;
int val = *valp;
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
struct net *net = ctl->extra2;
if (write && *valp != val)
rt_cache_flush(net);
return ret;
}
#define DEVINET_SYSCTL_ENTRY(attr, name, mval, proc) \
{ \
.procname = name, \
.data = ipv4_devconf.data + \
IPV4_DEVCONF_ ## attr - 1, \
.maxlen = sizeof(int), \
.mode = mval, \
.proc_handler = proc, \
.extra1 = &ipv4_devconf, \
}
#define DEVINET_SYSCTL_RW_ENTRY(attr, name) \
DEVINET_SYSCTL_ENTRY(attr, name, 0644, devinet_conf_proc)
#define DEVINET_SYSCTL_RO_ENTRY(attr, name) \
DEVINET_SYSCTL_ENTRY(attr, name, 0444, devinet_conf_proc)
#define DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, proc) \
DEVINET_SYSCTL_ENTRY(attr, name, 0644, proc)
#define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
static struct devinet_sysctl_table {
struct ctl_table_header *sysctl_header;
struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
} devinet_sysctl = {
.devinet_vars = {
DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
devinet_sysctl_forward),
DEVINET_SYSCTL_RO_ENTRY(MC_FORWARDING, "mc_forwarding"),
DEVINET_SYSCTL_RW_ENTRY(ACCEPT_REDIRECTS, "accept_redirects"),
DEVINET_SYSCTL_RW_ENTRY(SECURE_REDIRECTS, "secure_redirects"),
DEVINET_SYSCTL_RW_ENTRY(SHARED_MEDIA, "shared_media"),
DEVINET_SYSCTL_RW_ENTRY(RP_FILTER, "rp_filter"),
DEVINET_SYSCTL_RW_ENTRY(SEND_REDIRECTS, "send_redirects"),
DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE,
"accept_source_route"),
DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"),
DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"),
DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"),
DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"),
DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
DEVINET_SYSCTL_RW_ENTRY(LOG_MARTIANS, "log_martians"),
DEVINET_SYSCTL_RW_ENTRY(TAG, "tag"),
DEVINET_SYSCTL_RW_ENTRY(ARPFILTER, "arp_filter"),
DEVINET_SYSCTL_RW_ENTRY(ARP_ANNOUNCE, "arp_announce"),
DEVINET_SYSCTL_RW_ENTRY(ARP_IGNORE, "arp_ignore"),
DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
"force_igmp_version"),
DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
"igmpv2_unsolicited_report_interval"),
DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
"igmpv3_unsolicited_report_interval"),
DEVINET_SYSCTL_RW_ENTRY(IGNORE_ROUTES_WITH_LINKDOWN,
"ignore_routes_with_linkdown"),
DEVINET_SYSCTL_RW_ENTRY(DROP_GRATUITOUS_ARP,
"drop_gratuitous_arp"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
"promote_secondaries"),
DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
"route_localnet"),
DEVINET_SYSCTL_FLUSHING_ENTRY(DROP_UNICAST_IN_L2_MULTICAST,
"drop_unicast_in_l2_multicast"),
},
};
static int __devinet_sysctl_register(struct net *net, char *dev_name,
struct ipv4_devconf *p)
{
int i;
struct devinet_sysctl_table *t;
char path[sizeof("net/ipv4/conf/") + IFNAMSIZ];
t = kmemdup(&devinet_sysctl, sizeof(*t), GFP_KERNEL);
if (!t)
goto out;
for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
t->devinet_vars[i].extra1 = p;
t->devinet_vars[i].extra2 = net;
}
snprintf(path, sizeof(path), "net/ipv4/conf/%s", dev_name);
t->sysctl_header = register_net_sysctl(net, path, t->devinet_vars);
if (!t->sysctl_header)
goto free;
p->sysctl = t;
return 0;
free:
kfree(t);
out:
return -ENOBUFS;
}
static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
{
struct devinet_sysctl_table *t = cnf->sysctl;
if (!t)
return;
cnf->sysctl = NULL;
unregister_net_sysctl_table(t->sysctl_header);
kfree(t);
}
static int devinet_sysctl_register(struct in_device *idev)
{
int err;
if (!sysctl_dev_name_is_allowed(idev->dev->name))
return -EINVAL;
err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
if (err)
return err;
err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
&idev->cnf);
if (err)
neigh_sysctl_unregister(idev->arp_parms);
return err;
}
static void devinet_sysctl_unregister(struct in_device *idev)
{
__devinet_sysctl_unregister(&idev->cnf);
neigh_sysctl_unregister(idev->arp_parms);
}
static struct ctl_table ctl_forward_entry[] = {
{
.procname = "ip_forward",
.data = &ipv4_devconf.data[
IPV4_DEVCONF_FORWARDING - 1],
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = devinet_sysctl_forward,
.extra1 = &ipv4_devconf,
.extra2 = &init_net,
},
{ },
};
#endif
static __net_init int devinet_init_net(struct net *net)
{
int err;
struct ipv4_devconf *all, *dflt;
#ifdef CONFIG_SYSCTL
struct ctl_table *tbl = ctl_forward_entry;
struct ctl_table_header *forw_hdr;
#endif
err = -ENOMEM;
all = &ipv4_devconf;
dflt = &ipv4_devconf_dflt;
if (!net_eq(net, &init_net)) {
all = kmemdup(all, sizeof(ipv4_devconf), GFP_KERNEL);
if (!all)
goto err_alloc_all;
dflt = kmemdup(dflt, sizeof(ipv4_devconf_dflt), GFP_KERNEL);
if (!dflt)
goto err_alloc_dflt;
#ifdef CONFIG_SYSCTL
tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
if (!tbl)
goto err_alloc_ctl;
tbl[0].data = &all->data[IPV4_DEVCONF_FORWARDING - 1];
tbl[0].extra1 = all;
tbl[0].extra2 = net;
#endif
}
#ifdef CONFIG_SYSCTL
err = __devinet_sysctl_register(net, "all", all);
if (err < 0)
goto err_reg_all;
err = __devinet_sysctl_register(net, "default", dflt);
if (err < 0)
goto err_reg_dflt;
err = -ENOMEM;
forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
if (!forw_hdr)
goto err_reg_ctl;
net->ipv4.forw_hdr = forw_hdr;
#endif
net->ipv4.devconf_all = all;
net->ipv4.devconf_dflt = dflt;
return 0;
#ifdef CONFIG_SYSCTL
err_reg_ctl:
__devinet_sysctl_unregister(dflt);
err_reg_dflt:
__devinet_sysctl_unregister(all);
err_reg_all:
if (tbl != ctl_forward_entry)
kfree(tbl);
err_alloc_ctl:
#endif
if (dflt != &ipv4_devconf_dflt)
kfree(dflt);
err_alloc_dflt:
if (all != &ipv4_devconf)
kfree(all);
err_alloc_all:
return err;
}
static __net_exit void devinet_exit_net(struct net *net)
{
#ifdef CONFIG_SYSCTL
struct ctl_table *tbl;
tbl = net->ipv4.forw_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv4.forw_hdr);
__devinet_sysctl_unregister(net->ipv4.devconf_dflt);
__devinet_sysctl_unregister(net->ipv4.devconf_all);
kfree(tbl);
#endif
kfree(net->ipv4.devconf_dflt);
kfree(net->ipv4.devconf_all);
}
static __net_initdata struct pernet_operations devinet_ops = {
.init = devinet_init_net,
.exit = devinet_exit_net,
};
static struct rtnl_af_ops inet_af_ops __read_mostly = {
.family = AF_INET,
.fill_link_af = inet_fill_link_af,
.get_link_af_size = inet_get_link_af_size,
.validate_link_af = inet_validate_link_af,
.set_link_af = inet_set_link_af,
};
void __init devinet_init(void)
{
int i;
for (i = 0; i < IN4_ADDR_HSIZE; i++)
INIT_HLIST_HEAD(&inet_addr_lst[i]);
register_pernet_subsys(&devinet_ops);
register_gifconf(PF_INET, inet_gifconf);
register_netdevice_notifier(&ip_netdev_notifier);
queue_delayed_work(system_power_efficient_wq, &check_lifetime_work, 0);
rtnl_af_register(&inet_af_ops);
rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL, NULL);
rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL, NULL);
rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr, NULL);
rtnl_register(PF_INET, RTM_GETNETCONF, inet_netconf_get_devconf,
inet_netconf_dump_devconf, NULL);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_5006_0 |
crossvul-cpp_data_bad_4966_3 | /*
* NET4: Implementation of BSD Unix domain sockets.
*
* Authors: Alan Cox, <alan@lxorguk.ukuu.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Fixes:
* Linus Torvalds : Assorted bug cures.
* Niibe Yutaka : async I/O support.
* Carsten Paeth : PF_UNIX check, address fixes.
* Alan Cox : Limit size of allocated blocks.
* Alan Cox : Fixed the stupid socketpair bug.
* Alan Cox : BSD compatibility fine tuning.
* Alan Cox : Fixed a bug in connect when interrupted.
* Alan Cox : Sorted out a proper draft version of
* file descriptor passing hacked up from
* Mike Shaver's work.
* Marty Leisner : Fixes to fd passing
* Nick Nevin : recvmsg bugfix.
* Alan Cox : Started proper garbage collector
* Heiko EiBfeldt : Missing verify_area check
* Alan Cox : Started POSIXisms
* Andreas Schwab : Replace inode by dentry for proper
* reference counting
* Kirk Petersen : Made this a module
* Christoph Rohland : Elegant non-blocking accept/connect algorithm.
* Lots of bug fixes.
* Alexey Kuznetosv : Repaired (I hope) bugs introduces
* by above two patches.
* Andrea Arcangeli : If possible we block in connect(2)
* if the max backlog of the listen socket
* is been reached. This won't break
* old apps and it will avoid huge amount
* of socks hashed (this for unix_gc()
* performances reasons).
* Security fix that limits the max
* number of socks to 2*max_files and
* the number of skb queueable in the
* dgram receiver.
* Artur Skawina : Hash function optimizations
* Alexey Kuznetsov : Full scale SMP. Lot of bugs are introduced 8)
* Malcolm Beattie : Set peercred for socketpair
* Michal Ostrowski : Module initialization cleanup.
* Arnaldo C. Melo : Remove MOD_{INC,DEC}_USE_COUNT,
* the core infrastructure is doing that
* for all net proto families now (2.5.69+)
*
*
* Known differences from reference BSD that was tested:
*
* [TO FIX]
* ECONNREFUSED is not returned from one end of a connected() socket to the
* other the moment one end closes.
* fstat() doesn't return st_dev=0, and give the blksize as high water mark
* and a fake inode identifier (nor the BSD first socket fstat twice bug).
* [NOT TO FIX]
* accept() returns a path name even if the connecting socket has closed
* in the meantime (BSD loses the path and gives up).
* accept() returns 0 length path for an unbound connector. BSD returns 16
* and a null first byte in the path (but not for gethost/peername - BSD bug ??)
* socketpair(...SOCK_RAW..) doesn't panic the kernel.
* BSD af_unix apparently has connect forgetting to block properly.
* (need to check this with the POSIX spec in detail)
*
* Differences from 2.0.0-11-... (ANK)
* Bug fixes and improvements.
* - client shutdown killed server socket.
* - removed all useless cli/sti pairs.
*
* Semantic changes/extensions.
* - generic control message passing.
* - SCM_CREDENTIALS control message.
* - "Abstract" (not FS based) socket bindings.
* Abstract names are sequences of bytes (not zero terminated)
* started by 0, so that this name space does not intersect
* with BSD names.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/stat.h>
#include <linux/dcache.h>
#include <linux/namei.h>
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/fcntl.h>
#include <linux/termios.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/af_unix.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <net/scm.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/mount.h>
#include <net/checksum.h>
#include <linux/security.h>
#include <linux/freezer.h>
struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
EXPORT_SYMBOL_GPL(unix_socket_table);
DEFINE_SPINLOCK(unix_table_lock);
EXPORT_SYMBOL_GPL(unix_table_lock);
static atomic_long_t unix_nr_socks;
static struct hlist_head *unix_sockets_unbound(void *addr)
{
unsigned long hash = (unsigned long)addr;
hash ^= hash >> 16;
hash ^= hash >> 8;
hash %= UNIX_HASH_SIZE;
return &unix_socket_table[UNIX_HASH_SIZE + hash];
}
#define UNIX_ABSTRACT(sk) (unix_sk(sk)->addr->hash < UNIX_HASH_SIZE)
#ifdef CONFIG_SECURITY_NETWORK
static void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{
UNIXCB(skb).secid = scm->secid;
}
static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{
scm->secid = UNIXCB(skb).secid;
}
static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
{
return (scm->secid == UNIXCB(skb).secid);
}
#else
static inline void unix_get_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{ }
static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
{ }
static inline bool unix_secdata_eq(struct scm_cookie *scm, struct sk_buff *skb)
{
return true;
}
#endif /* CONFIG_SECURITY_NETWORK */
/*
* SMP locking strategy:
* hash table is protected with spinlock unix_table_lock
* each socket state is protected by separate spin lock.
*/
static inline unsigned int unix_hash_fold(__wsum n)
{
unsigned int hash = (__force unsigned int)csum_fold(n);
hash ^= hash>>8;
return hash&(UNIX_HASH_SIZE-1);
}
#define unix_peer(sk) (unix_sk(sk)->peer)
static inline int unix_our_peer(struct sock *sk, struct sock *osk)
{
return unix_peer(osk) == sk;
}
static inline int unix_may_send(struct sock *sk, struct sock *osk)
{
return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
}
static inline int unix_recvq_full(struct sock const *sk)
{
return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
}
struct sock *unix_peer_get(struct sock *s)
{
struct sock *peer;
unix_state_lock(s);
peer = unix_peer(s);
if (peer)
sock_hold(peer);
unix_state_unlock(s);
return peer;
}
EXPORT_SYMBOL_GPL(unix_peer_get);
static inline void unix_release_addr(struct unix_address *addr)
{
if (atomic_dec_and_test(&addr->refcnt))
kfree(addr);
}
/*
* Check unix socket name:
* - should be not zero length.
* - if started by not zero, should be NULL terminated (FS object)
* - if started by zero, it is abstract name.
*/
static int unix_mkname(struct sockaddr_un *sunaddr, int len, unsigned int *hashp)
{
if (len <= sizeof(short) || len > sizeof(*sunaddr))
return -EINVAL;
if (!sunaddr || sunaddr->sun_family != AF_UNIX)
return -EINVAL;
if (sunaddr->sun_path[0]) {
/*
* This may look like an off by one error but it is a bit more
* subtle. 108 is the longest valid AF_UNIX path for a binding.
* sun_path[108] doesn't as such exist. However in kernel space
* we are guaranteed that it is a valid memory location in our
* kernel address buffer.
*/
((char *)sunaddr)[len] = 0;
len = strlen(sunaddr->sun_path)+1+sizeof(short);
return len;
}
*hashp = unix_hash_fold(csum_partial(sunaddr, len, 0));
return len;
}
static void __unix_remove_socket(struct sock *sk)
{
sk_del_node_init(sk);
}
static void __unix_insert_socket(struct hlist_head *list, struct sock *sk)
{
WARN_ON(!sk_unhashed(sk));
sk_add_node(sk, list);
}
static inline void unix_remove_socket(struct sock *sk)
{
spin_lock(&unix_table_lock);
__unix_remove_socket(sk);
spin_unlock(&unix_table_lock);
}
static inline void unix_insert_socket(struct hlist_head *list, struct sock *sk)
{
spin_lock(&unix_table_lock);
__unix_insert_socket(list, sk);
spin_unlock(&unix_table_lock);
}
static struct sock *__unix_find_socket_byname(struct net *net,
struct sockaddr_un *sunname,
int len, int type, unsigned int hash)
{
struct sock *s;
sk_for_each(s, &unix_socket_table[hash ^ type]) {
struct unix_sock *u = unix_sk(s);
if (!net_eq(sock_net(s), net))
continue;
if (u->addr->len == len &&
!memcmp(u->addr->name, sunname, len))
goto found;
}
s = NULL;
found:
return s;
}
static inline struct sock *unix_find_socket_byname(struct net *net,
struct sockaddr_un *sunname,
int len, int type,
unsigned int hash)
{
struct sock *s;
spin_lock(&unix_table_lock);
s = __unix_find_socket_byname(net, sunname, len, type, hash);
if (s)
sock_hold(s);
spin_unlock(&unix_table_lock);
return s;
}
static struct sock *unix_find_socket_byinode(struct inode *i)
{
struct sock *s;
spin_lock(&unix_table_lock);
sk_for_each(s,
&unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
struct dentry *dentry = unix_sk(s)->path.dentry;
if (dentry && d_backing_inode(dentry) == i) {
sock_hold(s);
goto found;
}
}
s = NULL;
found:
spin_unlock(&unix_table_lock);
return s;
}
/* Support code for asymmetrically connected dgram sockets
*
* If a datagram socket is connected to a socket not itself connected
* to the first socket (eg, /dev/log), clients may only enqueue more
* messages if the present receive queue of the server socket is not
* "too large". This means there's a second writeability condition
* poll and sendmsg need to test. The dgram recv code will do a wake
* up on the peer_wait wait queue of a socket upon reception of a
* datagram which needs to be propagated to sleeping would-be writers
* since these might not have sent anything so far. This can't be
* accomplished via poll_wait because the lifetime of the server
* socket might be less than that of its clients if these break their
* association with it or if the server socket is closed while clients
* are still connected to it and there's no way to inform "a polling
* implementation" that it should let go of a certain wait queue
*
* In order to propagate a wake up, a wait_queue_t of the client
* socket is enqueued on the peer_wait queue of the server socket
* whose wake function does a wake_up on the ordinary client socket
* wait queue. This connection is established whenever a write (or
* poll for write) hit the flow control condition and broken when the
* association to the server socket is dissolved or after a wake up
* was relayed.
*/
static int unix_dgram_peer_wake_relay(wait_queue_t *q, unsigned mode, int flags,
void *key)
{
struct unix_sock *u;
wait_queue_head_t *u_sleep;
u = container_of(q, struct unix_sock, peer_wake);
__remove_wait_queue(&unix_sk(u->peer_wake.private)->peer_wait,
q);
u->peer_wake.private = NULL;
/* relaying can only happen while the wq still exists */
u_sleep = sk_sleep(&u->sk);
if (u_sleep)
wake_up_interruptible_poll(u_sleep, key);
return 0;
}
static int unix_dgram_peer_wake_connect(struct sock *sk, struct sock *other)
{
struct unix_sock *u, *u_other;
int rc;
u = unix_sk(sk);
u_other = unix_sk(other);
rc = 0;
spin_lock(&u_other->peer_wait.lock);
if (!u->peer_wake.private) {
u->peer_wake.private = other;
__add_wait_queue(&u_other->peer_wait, &u->peer_wake);
rc = 1;
}
spin_unlock(&u_other->peer_wait.lock);
return rc;
}
static void unix_dgram_peer_wake_disconnect(struct sock *sk,
struct sock *other)
{
struct unix_sock *u, *u_other;
u = unix_sk(sk);
u_other = unix_sk(other);
spin_lock(&u_other->peer_wait.lock);
if (u->peer_wake.private == other) {
__remove_wait_queue(&u_other->peer_wait, &u->peer_wake);
u->peer_wake.private = NULL;
}
spin_unlock(&u_other->peer_wait.lock);
}
static void unix_dgram_peer_wake_disconnect_wakeup(struct sock *sk,
struct sock *other)
{
unix_dgram_peer_wake_disconnect(sk, other);
wake_up_interruptible_poll(sk_sleep(sk),
POLLOUT |
POLLWRNORM |
POLLWRBAND);
}
/* preconditions:
* - unix_peer(sk) == other
* - association is stable
*/
static int unix_dgram_peer_wake_me(struct sock *sk, struct sock *other)
{
int connected;
connected = unix_dgram_peer_wake_connect(sk, other);
if (unix_recvq_full(other))
return 1;
if (connected)
unix_dgram_peer_wake_disconnect(sk, other);
return 0;
}
static int unix_writable(const struct sock *sk)
{
return sk->sk_state != TCP_LISTEN &&
(atomic_read(&sk->sk_wmem_alloc) << 2) <= sk->sk_sndbuf;
}
static void unix_write_space(struct sock *sk)
{
struct socket_wq *wq;
rcu_read_lock();
if (unix_writable(sk)) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait,
POLLOUT | POLLWRNORM | POLLWRBAND);
sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
}
rcu_read_unlock();
}
/* When dgram socket disconnects (or changes its peer), we clear its receive
* queue of packets arrived from previous peer. First, it allows to do
* flow control based only on wmem_alloc; second, sk connected to peer
* may receive messages only from that peer. */
static void unix_dgram_disconnected(struct sock *sk, struct sock *other)
{
if (!skb_queue_empty(&sk->sk_receive_queue)) {
skb_queue_purge(&sk->sk_receive_queue);
wake_up_interruptible_all(&unix_sk(sk)->peer_wait);
/* If one link of bidirectional dgram pipe is disconnected,
* we signal error. Messages are lost. Do not make this,
* when peer was not connected to us.
*/
if (!sock_flag(other, SOCK_DEAD) && unix_peer(other) == sk) {
other->sk_err = ECONNRESET;
other->sk_error_report(other);
}
}
}
static void unix_sock_destructor(struct sock *sk)
{
struct unix_sock *u = unix_sk(sk);
skb_queue_purge(&sk->sk_receive_queue);
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(!sk_unhashed(sk));
WARN_ON(sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {
pr_info("Attempt to release alive unix socket: %p\n", sk);
return;
}
if (u->addr)
unix_release_addr(u->addr);
atomic_long_dec(&unix_nr_socks);
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
local_bh_enable();
#ifdef UNIX_REFCNT_DEBUG
pr_debug("UNIX %p is destroyed, %ld are still alive.\n", sk,
atomic_long_read(&unix_nr_socks));
#endif
}
static void unix_release_sock(struct sock *sk, int embrion)
{
struct unix_sock *u = unix_sk(sk);
struct path path;
struct sock *skpair;
struct sk_buff *skb;
int state;
unix_remove_socket(sk);
/* Clear state */
unix_state_lock(sk);
sock_orphan(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
path = u->path;
u->path.dentry = NULL;
u->path.mnt = NULL;
state = sk->sk_state;
sk->sk_state = TCP_CLOSE;
unix_state_unlock(sk);
wake_up_interruptible_all(&u->peer_wait);
skpair = unix_peer(sk);
if (skpair != NULL) {
if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
unix_state_lock(skpair);
/* No more writes */
skpair->sk_shutdown = SHUTDOWN_MASK;
if (!skb_queue_empty(&sk->sk_receive_queue) || embrion)
skpair->sk_err = ECONNRESET;
unix_state_unlock(skpair);
skpair->sk_state_change(skpair);
sk_wake_async(skpair, SOCK_WAKE_WAITD, POLL_HUP);
}
unix_dgram_peer_wake_disconnect(sk, skpair);
sock_put(skpair); /* It may now die */
unix_peer(sk) = NULL;
}
/* Try to flush out this socket. Throw out buffers at least */
while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) {
if (state == TCP_LISTEN)
unix_release_sock(skb->sk, 1);
/* passed fds are erased in the kfree_skb hook */
UNIXCB(skb).consumed = skb->len;
kfree_skb(skb);
}
if (path.dentry)
path_put(&path);
sock_put(sk);
/* ---- Socket is dead now and most probably destroyed ---- */
/*
* Fixme: BSD difference: In BSD all sockets connected to us get
* ECONNRESET and we die on the spot. In Linux we behave
* like files and pipes do and wait for the last
* dereference.
*
* Can't we simply set sock->err?
*
* What the above comment does talk about? --ANK(980817)
*/
if (unix_tot_inflight)
unix_gc(); /* Garbage collect fds */
}
static void init_peercred(struct sock *sk)
{
put_pid(sk->sk_peer_pid);
if (sk->sk_peer_cred)
put_cred(sk->sk_peer_cred);
sk->sk_peer_pid = get_pid(task_tgid(current));
sk->sk_peer_cred = get_current_cred();
}
static void copy_peercred(struct sock *sk, struct sock *peersk)
{
put_pid(sk->sk_peer_pid);
if (sk->sk_peer_cred)
put_cred(sk->sk_peer_cred);
sk->sk_peer_pid = get_pid(peersk->sk_peer_pid);
sk->sk_peer_cred = get_cred(peersk->sk_peer_cred);
}
static int unix_listen(struct socket *sock, int backlog)
{
int err;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
struct pid *old_pid = NULL;
err = -EOPNOTSUPP;
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto out; /* Only stream/seqpacket sockets accept */
err = -EINVAL;
if (!u->addr)
goto out; /* No listens on an unbound socket */
unix_state_lock(sk);
if (sk->sk_state != TCP_CLOSE && sk->sk_state != TCP_LISTEN)
goto out_unlock;
if (backlog > sk->sk_max_ack_backlog)
wake_up_interruptible_all(&u->peer_wait);
sk->sk_max_ack_backlog = backlog;
sk->sk_state = TCP_LISTEN;
/* set credentials so connect can copy them */
init_peercred(sk);
err = 0;
out_unlock:
unix_state_unlock(sk);
put_pid(old_pid);
out:
return err;
}
static int unix_release(struct socket *);
static int unix_bind(struct socket *, struct sockaddr *, int);
static int unix_stream_connect(struct socket *, struct sockaddr *,
int addr_len, int flags);
static int unix_socketpair(struct socket *, struct socket *);
static int unix_accept(struct socket *, struct socket *, int);
static int unix_getname(struct socket *, struct sockaddr *, int *, int);
static unsigned int unix_poll(struct file *, struct socket *, poll_table *);
static unsigned int unix_dgram_poll(struct file *, struct socket *,
poll_table *);
static int unix_ioctl(struct socket *, unsigned int, unsigned long);
static int unix_shutdown(struct socket *, int);
static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
static int unix_stream_recvmsg(struct socket *, struct msghdr *, size_t, int);
static ssize_t unix_stream_sendpage(struct socket *, struct page *, int offset,
size_t size, int flags);
static ssize_t unix_stream_splice_read(struct socket *, loff_t *ppos,
struct pipe_inode_info *, size_t size,
unsigned int flags);
static int unix_dgram_sendmsg(struct socket *, struct msghdr *, size_t);
static int unix_dgram_recvmsg(struct socket *, struct msghdr *, size_t, int);
static int unix_dgram_connect(struct socket *, struct sockaddr *,
int, int);
static int unix_seqpacket_sendmsg(struct socket *, struct msghdr *, size_t);
static int unix_seqpacket_recvmsg(struct socket *, struct msghdr *, size_t,
int);
static int unix_set_peek_off(struct sock *sk, int val)
{
struct unix_sock *u = unix_sk(sk);
if (mutex_lock_interruptible(&u->readlock))
return -EINTR;
sk->sk_peek_off = val;
mutex_unlock(&u->readlock);
return 0;
}
static const struct proto_ops unix_stream_ops = {
.family = PF_UNIX,
.owner = THIS_MODULE,
.release = unix_release,
.bind = unix_bind,
.connect = unix_stream_connect,
.socketpair = unix_socketpair,
.accept = unix_accept,
.getname = unix_getname,
.poll = unix_poll,
.ioctl = unix_ioctl,
.listen = unix_listen,
.shutdown = unix_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = unix_stream_sendmsg,
.recvmsg = unix_stream_recvmsg,
.mmap = sock_no_mmap,
.sendpage = unix_stream_sendpage,
.splice_read = unix_stream_splice_read,
.set_peek_off = unix_set_peek_off,
};
static const struct proto_ops unix_dgram_ops = {
.family = PF_UNIX,
.owner = THIS_MODULE,
.release = unix_release,
.bind = unix_bind,
.connect = unix_dgram_connect,
.socketpair = unix_socketpair,
.accept = sock_no_accept,
.getname = unix_getname,
.poll = unix_dgram_poll,
.ioctl = unix_ioctl,
.listen = sock_no_listen,
.shutdown = unix_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = unix_dgram_sendmsg,
.recvmsg = unix_dgram_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
.set_peek_off = unix_set_peek_off,
};
static const struct proto_ops unix_seqpacket_ops = {
.family = PF_UNIX,
.owner = THIS_MODULE,
.release = unix_release,
.bind = unix_bind,
.connect = unix_stream_connect,
.socketpair = unix_socketpair,
.accept = unix_accept,
.getname = unix_getname,
.poll = unix_dgram_poll,
.ioctl = unix_ioctl,
.listen = unix_listen,
.shutdown = unix_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = unix_seqpacket_sendmsg,
.recvmsg = unix_seqpacket_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
.set_peek_off = unix_set_peek_off,
};
static struct proto unix_proto = {
.name = "UNIX",
.owner = THIS_MODULE,
.obj_size = sizeof(struct unix_sock),
};
/*
* AF_UNIX sockets do not interact with hardware, hence they
* dont trigger interrupts - so it's safe for them to have
* bh-unsafe locking for their sk_receive_queue.lock. Split off
* this special lock-class by reinitializing the spinlock key:
*/
static struct lock_class_key af_unix_sk_receive_queue_lock_key;
static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
{
struct sock *sk = NULL;
struct unix_sock *u;
atomic_long_inc(&unix_nr_socks);
if (atomic_long_read(&unix_nr_socks) > 2 * get_max_files())
goto out;
sk = sk_alloc(net, PF_UNIX, GFP_KERNEL, &unix_proto, kern);
if (!sk)
goto out;
sock_init_data(sock, sk);
lockdep_set_class(&sk->sk_receive_queue.lock,
&af_unix_sk_receive_queue_lock_key);
sk->sk_write_space = unix_write_space;
sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
sk->sk_destruct = unix_sock_destructor;
u = unix_sk(sk);
u->path.dentry = NULL;
u->path.mnt = NULL;
spin_lock_init(&u->lock);
atomic_long_set(&u->inflight, 0);
INIT_LIST_HEAD(&u->link);
mutex_init(&u->readlock); /* single task reading lock */
init_waitqueue_head(&u->peer_wait);
init_waitqueue_func_entry(&u->peer_wake, unix_dgram_peer_wake_relay);
unix_insert_socket(unix_sockets_unbound(sk), sk);
out:
if (sk == NULL)
atomic_long_dec(&unix_nr_socks);
else {
local_bh_disable();
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
local_bh_enable();
}
return sk;
}
static int unix_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
if (protocol && protocol != PF_UNIX)
return -EPROTONOSUPPORT;
sock->state = SS_UNCONNECTED;
switch (sock->type) {
case SOCK_STREAM:
sock->ops = &unix_stream_ops;
break;
/*
* Believe it or not BSD has AF_UNIX, SOCK_RAW though
* nothing uses it.
*/
case SOCK_RAW:
sock->type = SOCK_DGRAM;
case SOCK_DGRAM:
sock->ops = &unix_dgram_ops;
break;
case SOCK_SEQPACKET:
sock->ops = &unix_seqpacket_ops;
break;
default:
return -ESOCKTNOSUPPORT;
}
return unix_create1(net, sock, kern) ? 0 : -ENOMEM;
}
static int unix_release(struct socket *sock)
{
struct sock *sk = sock->sk;
if (!sk)
return 0;
unix_release_sock(sk, 0);
sock->sk = NULL;
return 0;
}
static int unix_autobind(struct socket *sock)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk);
static u32 ordernum = 1;
struct unix_address *addr;
int err;
unsigned int retries = 0;
err = mutex_lock_interruptible(&u->readlock);
if (err)
return err;
err = 0;
if (u->addr)
goto out;
err = -ENOMEM;
addr = kzalloc(sizeof(*addr) + sizeof(short) + 16, GFP_KERNEL);
if (!addr)
goto out;
addr->name->sun_family = AF_UNIX;
atomic_set(&addr->refcnt, 1);
retry:
addr->len = sprintf(addr->name->sun_path+1, "%05x", ordernum) + 1 + sizeof(short);
addr->hash = unix_hash_fold(csum_partial(addr->name, addr->len, 0));
spin_lock(&unix_table_lock);
ordernum = (ordernum+1)&0xFFFFF;
if (__unix_find_socket_byname(net, addr->name, addr->len, sock->type,
addr->hash)) {
spin_unlock(&unix_table_lock);
/*
* __unix_find_socket_byname() may take long time if many names
* are already in use.
*/
cond_resched();
/* Give up if all names seems to be in use. */
if (retries++ == 0xFFFFF) {
err = -ENOSPC;
kfree(addr);
goto out;
}
goto retry;
}
addr->hash ^= sk->sk_type;
__unix_remove_socket(sk);
u->addr = addr;
__unix_insert_socket(&unix_socket_table[addr->hash], sk);
spin_unlock(&unix_table_lock);
err = 0;
out: mutex_unlock(&u->readlock);
return err;
}
static struct sock *unix_find_other(struct net *net,
struct sockaddr_un *sunname, int len,
int type, unsigned int hash, int *error)
{
struct sock *u;
struct path path;
int err = 0;
if (sunname->sun_path[0]) {
struct inode *inode;
err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
if (err)
goto fail;
inode = d_backing_inode(path.dentry);
err = inode_permission(inode, MAY_WRITE);
if (err)
goto put_fail;
err = -ECONNREFUSED;
if (!S_ISSOCK(inode->i_mode))
goto put_fail;
u = unix_find_socket_byinode(inode);
if (!u)
goto put_fail;
if (u->sk_type == type)
touch_atime(&path);
path_put(&path);
err = -EPROTOTYPE;
if (u->sk_type != type) {
sock_put(u);
goto fail;
}
} else {
err = -ECONNREFUSED;
u = unix_find_socket_byname(net, sunname, len, type, hash);
if (u) {
struct dentry *dentry;
dentry = unix_sk(u)->path.dentry;
if (dentry)
touch_atime(&unix_sk(u)->path);
} else
goto fail;
}
return u;
put_fail:
path_put(&path);
fail:
*error = err;
return NULL;
}
static int unix_mknod(struct dentry *dentry, struct path *path, umode_t mode,
struct path *res)
{
int err;
err = security_path_mknod(path, dentry, mode, 0);
if (!err) {
err = vfs_mknod(d_inode(path->dentry), dentry, mode, 0);
if (!err) {
res->mnt = mntget(path->mnt);
res->dentry = dget(dentry);
}
}
return err;
}
static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk);
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
char *sun_path = sunaddr->sun_path;
int err, name_err;
unsigned int hash;
struct unix_address *addr;
struct hlist_head *list;
struct path path;
struct dentry *dentry;
err = -EINVAL;
if (sunaddr->sun_family != AF_UNIX)
goto out;
if (addr_len == sizeof(short)) {
err = unix_autobind(sock);
goto out;
}
err = unix_mkname(sunaddr, addr_len, &hash);
if (err < 0)
goto out;
addr_len = err;
name_err = 0;
dentry = NULL;
if (sun_path[0]) {
/* Get the parent directory, calculate the hash for last
* component.
*/
dentry = kern_path_create(AT_FDCWD, sun_path, &path, 0);
if (IS_ERR(dentry)) {
/* delay report until after 'already bound' check */
name_err = PTR_ERR(dentry);
dentry = NULL;
}
}
err = mutex_lock_interruptible(&u->readlock);
if (err)
goto out_path;
err = -EINVAL;
if (u->addr)
goto out_up;
if (name_err) {
err = name_err == -EEXIST ? -EADDRINUSE : name_err;
goto out_up;
}
err = -ENOMEM;
addr = kmalloc(sizeof(*addr)+addr_len, GFP_KERNEL);
if (!addr)
goto out_up;
memcpy(addr->name, sunaddr, addr_len);
addr->len = addr_len;
addr->hash = hash ^ sk->sk_type;
atomic_set(&addr->refcnt, 1);
if (dentry) {
struct path u_path;
umode_t mode = S_IFSOCK |
(SOCK_INODE(sock)->i_mode & ~current_umask());
err = unix_mknod(dentry, &path, mode, &u_path);
if (err) {
if (err == -EEXIST)
err = -EADDRINUSE;
unix_release_addr(addr);
goto out_up;
}
addr->hash = UNIX_HASH_SIZE;
hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
spin_lock(&unix_table_lock);
u->path = u_path;
list = &unix_socket_table[hash];
} else {
spin_lock(&unix_table_lock);
err = -EADDRINUSE;
if (__unix_find_socket_byname(net, sunaddr, addr_len,
sk->sk_type, hash)) {
unix_release_addr(addr);
goto out_unlock;
}
list = &unix_socket_table[addr->hash];
}
err = 0;
__unix_remove_socket(sk);
u->addr = addr;
__unix_insert_socket(list, sk);
out_unlock:
spin_unlock(&unix_table_lock);
out_up:
mutex_unlock(&u->readlock);
out_path:
if (dentry)
done_path_create(&path, dentry);
out:
return err;
}
static void unix_state_double_lock(struct sock *sk1, struct sock *sk2)
{
if (unlikely(sk1 == sk2) || !sk2) {
unix_state_lock(sk1);
return;
}
if (sk1 < sk2) {
unix_state_lock(sk1);
unix_state_lock_nested(sk2);
} else {
unix_state_lock(sk2);
unix_state_lock_nested(sk1);
}
}
static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2)
{
if (unlikely(sk1 == sk2) || !sk2) {
unix_state_unlock(sk1);
return;
}
unix_state_unlock(sk1);
unix_state_unlock(sk2);
}
static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
int alen, int flags)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct sockaddr_un *sunaddr = (struct sockaddr_un *)addr;
struct sock *other;
unsigned int hash;
int err;
if (addr->sa_family != AF_UNSPEC) {
err = unix_mkname(sunaddr, alen, &hash);
if (err < 0)
goto out;
alen = err;
if (test_bit(SOCK_PASSCRED, &sock->flags) &&
!unix_sk(sk)->addr && (err = unix_autobind(sock)) != 0)
goto out;
restart:
other = unix_find_other(net, sunaddr, alen, sock->type, hash, &err);
if (!other)
goto out;
unix_state_double_lock(sk, other);
/* Apparently VFS overslept socket death. Retry. */
if (sock_flag(other, SOCK_DEAD)) {
unix_state_double_unlock(sk, other);
sock_put(other);
goto restart;
}
err = -EPERM;
if (!unix_may_send(sk, other))
goto out_unlock;
err = security_unix_may_send(sk->sk_socket, other->sk_socket);
if (err)
goto out_unlock;
} else {
/*
* 1003.1g breaking connected state with AF_UNSPEC
*/
other = NULL;
unix_state_double_lock(sk, other);
}
/*
* If it was connected, reconnect.
*/
if (unix_peer(sk)) {
struct sock *old_peer = unix_peer(sk);
unix_peer(sk) = other;
unix_dgram_peer_wake_disconnect_wakeup(sk, old_peer);
unix_state_double_unlock(sk, other);
if (other != old_peer)
unix_dgram_disconnected(sk, old_peer);
sock_put(old_peer);
} else {
unix_peer(sk) = other;
unix_state_double_unlock(sk, other);
}
return 0;
out_unlock:
unix_state_double_unlock(sk, other);
sock_put(other);
out:
return err;
}
static long unix_wait_for_peer(struct sock *other, long timeo)
{
struct unix_sock *u = unix_sk(other);
int sched;
DEFINE_WAIT(wait);
prepare_to_wait_exclusive(&u->peer_wait, &wait, TASK_INTERRUPTIBLE);
sched = !sock_flag(other, SOCK_DEAD) &&
!(other->sk_shutdown & RCV_SHUTDOWN) &&
unix_recvq_full(other);
unix_state_unlock(other);
if (sched)
timeo = schedule_timeout(timeo);
finish_wait(&u->peer_wait, &wait);
return timeo;
}
static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags)
{
struct sockaddr_un *sunaddr = (struct sockaddr_un *)uaddr;
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk), *newu, *otheru;
struct sock *newsk = NULL;
struct sock *other = NULL;
struct sk_buff *skb = NULL;
unsigned int hash;
int st;
int err;
long timeo;
err = unix_mkname(sunaddr, addr_len, &hash);
if (err < 0)
goto out;
addr_len = err;
if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr &&
(err = unix_autobind(sock)) != 0)
goto out;
timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
/* First of all allocate resources.
If we will make it after state is locked,
we will have to recheck all again in any case.
*/
err = -ENOMEM;
/* create new sock for complete connection */
newsk = unix_create1(sock_net(sk), NULL, 0);
if (newsk == NULL)
goto out;
/* Allocate skb for sending to listening sock */
skb = sock_wmalloc(newsk, 1, 0, GFP_KERNEL);
if (skb == NULL)
goto out;
restart:
/* Find listening sock. */
other = unix_find_other(net, sunaddr, addr_len, sk->sk_type, hash, &err);
if (!other)
goto out;
/* Latch state of peer */
unix_state_lock(other);
/* Apparently VFS overslept socket death. Retry. */
if (sock_flag(other, SOCK_DEAD)) {
unix_state_unlock(other);
sock_put(other);
goto restart;
}
err = -ECONNREFUSED;
if (other->sk_state != TCP_LISTEN)
goto out_unlock;
if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;
if (unix_recvq_full(other)) {
err = -EAGAIN;
if (!timeo)
goto out_unlock;
timeo = unix_wait_for_peer(other, timeo);
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out;
sock_put(other);
goto restart;
}
/* Latch our state.
It is tricky place. We need to grab our state lock and cannot
drop lock on peer. It is dangerous because deadlock is
possible. Connect to self case and simultaneous
attempt to connect are eliminated by checking socket
state. other is TCP_LISTEN, if sk is TCP_LISTEN we
check this before attempt to grab lock.
Well, and we have to recheck the state after socket locked.
*/
st = sk->sk_state;
switch (st) {
case TCP_CLOSE:
/* This is ok... continue with connect */
break;
case TCP_ESTABLISHED:
/* Socket is already connected */
err = -EISCONN;
goto out_unlock;
default:
err = -EINVAL;
goto out_unlock;
}
unix_state_lock_nested(sk);
if (sk->sk_state != st) {
unix_state_unlock(sk);
unix_state_unlock(other);
sock_put(other);
goto restart;
}
err = security_unix_stream_connect(sk, other, newsk);
if (err) {
unix_state_unlock(sk);
goto out_unlock;
}
/* The way is open! Fastly set all the necessary fields... */
sock_hold(sk);
unix_peer(newsk) = sk;
newsk->sk_state = TCP_ESTABLISHED;
newsk->sk_type = sk->sk_type;
init_peercred(newsk);
newu = unix_sk(newsk);
RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
otheru = unix_sk(other);
/* copy address information from listening to new sock*/
if (otheru->addr) {
atomic_inc(&otheru->addr->refcnt);
newu->addr = otheru->addr;
}
if (otheru->path.dentry) {
path_get(&otheru->path);
newu->path = otheru->path;
}
/* Set credentials */
copy_peercred(sk, other);
sock->state = SS_CONNECTED;
sk->sk_state = TCP_ESTABLISHED;
sock_hold(newsk);
smp_mb__after_atomic(); /* sock_hold() does an atomic_inc() */
unix_peer(sk) = newsk;
unix_state_unlock(sk);
/* take ten and and send info to listening sock */
spin_lock(&other->sk_receive_queue.lock);
__skb_queue_tail(&other->sk_receive_queue, skb);
spin_unlock(&other->sk_receive_queue.lock);
unix_state_unlock(other);
other->sk_data_ready(other);
sock_put(other);
return 0;
out_unlock:
if (other)
unix_state_unlock(other);
out:
kfree_skb(skb);
if (newsk)
unix_release_sock(newsk, 0);
if (other)
sock_put(other);
return err;
}
static int unix_socketpair(struct socket *socka, struct socket *sockb)
{
struct sock *ska = socka->sk, *skb = sockb->sk;
/* Join our sockets back to back */
sock_hold(ska);
sock_hold(skb);
unix_peer(ska) = skb;
unix_peer(skb) = ska;
init_peercred(ska);
init_peercred(skb);
if (ska->sk_type != SOCK_DGRAM) {
ska->sk_state = TCP_ESTABLISHED;
skb->sk_state = TCP_ESTABLISHED;
socka->state = SS_CONNECTED;
sockb->state = SS_CONNECTED;
}
return 0;
}
static void unix_sock_inherit_flags(const struct socket *old,
struct socket *new)
{
if (test_bit(SOCK_PASSCRED, &old->flags))
set_bit(SOCK_PASSCRED, &new->flags);
if (test_bit(SOCK_PASSSEC, &old->flags))
set_bit(SOCK_PASSSEC, &new->flags);
}
static int unix_accept(struct socket *sock, struct socket *newsock, int flags)
{
struct sock *sk = sock->sk;
struct sock *tsk;
struct sk_buff *skb;
int err;
err = -EOPNOTSUPP;
if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET)
goto out;
err = -EINVAL;
if (sk->sk_state != TCP_LISTEN)
goto out;
/* If socket state is TCP_LISTEN it cannot change (for now...),
* so that no locks are necessary.
*/
skb = skb_recv_datagram(sk, 0, flags&O_NONBLOCK, &err);
if (!skb) {
/* This means receive shutdown. */
if (err == 0)
err = -EINVAL;
goto out;
}
tsk = skb->sk;
skb_free_datagram(sk, skb);
wake_up_interruptible(&unix_sk(sk)->peer_wait);
/* attach accepted sock to socket */
unix_state_lock(tsk);
newsock->state = SS_CONNECTED;
unix_sock_inherit_flags(sock, newsock);
sock_graft(tsk, newsock);
unix_state_unlock(tsk);
return 0;
out:
return err;
}
static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer)
{
struct sock *sk = sock->sk;
struct unix_sock *u;
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
int err = 0;
if (peer) {
sk = unix_peer_get(sk);
err = -ENOTCONN;
if (!sk)
goto out;
err = 0;
} else {
sock_hold(sk);
}
u = unix_sk(sk);
unix_state_lock(sk);
if (!u->addr) {
sunaddr->sun_family = AF_UNIX;
sunaddr->sun_path[0] = 0;
*uaddr_len = sizeof(short);
} else {
struct unix_address *addr = u->addr;
*uaddr_len = addr->len;
memcpy(sunaddr, addr->name, *uaddr_len);
}
unix_state_unlock(sk);
sock_put(sk);
out:
return err;
}
static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
int i;
scm->fp = UNIXCB(skb).fp;
UNIXCB(skb).fp = NULL;
for (i = scm->fp->count-1; i >= 0; i--)
unix_notinflight(scm->fp->fp[i]);
}
static void unix_destruct_scm(struct sk_buff *skb)
{
struct scm_cookie scm;
memset(&scm, 0, sizeof(scm));
scm.pid = UNIXCB(skb).pid;
if (UNIXCB(skb).fp)
unix_detach_fds(&scm, skb);
/* Alas, it calls VFS */
/* So fscking what? fput() had been SMP-safe since the last Summer */
scm_destroy(&scm);
sock_wfree(skb);
}
/*
* The "user->unix_inflight" variable is protected by the garbage
* collection lock, and we just read it locklessly here. If you go
* over the limit, there might be a tiny race in actually noticing
* it across threads. Tough.
*/
static inline bool too_many_unix_fds(struct task_struct *p)
{
struct user_struct *user = current_user();
if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
return false;
}
#define MAX_RECURSION_LEVEL 4
static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
{
int i;
unsigned char max_level = 0;
int unix_sock_count = 0;
if (too_many_unix_fds(current))
return -ETOOMANYREFS;
for (i = scm->fp->count - 1; i >= 0; i--) {
struct sock *sk = unix_get_socket(scm->fp->fp[i]);
if (sk) {
unix_sock_count++;
max_level = max(max_level,
unix_sk(sk)->recursion_level);
}
}
if (unlikely(max_level > MAX_RECURSION_LEVEL))
return -ETOOMANYREFS;
/*
* Need to duplicate file references for the sake of garbage
* collection. Otherwise a socket in the fps might become a
* candidate for GC while the skb is not yet queued.
*/
UNIXCB(skb).fp = scm_fp_dup(scm->fp);
if (!UNIXCB(skb).fp)
return -ENOMEM;
for (i = scm->fp->count - 1; i >= 0; i--)
unix_inflight(scm->fp->fp[i]);
return max_level;
}
static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
{
int err = 0;
UNIXCB(skb).pid = get_pid(scm->pid);
UNIXCB(skb).uid = scm->creds.uid;
UNIXCB(skb).gid = scm->creds.gid;
UNIXCB(skb).fp = NULL;
unix_get_secdata(scm, skb);
if (scm->fp && send_fds)
err = unix_attach_fds(scm, skb);
skb->destructor = unix_destruct_scm;
return err;
}
static bool unix_passcred_enabled(const struct socket *sock,
const struct sock *other)
{
return test_bit(SOCK_PASSCRED, &sock->flags) ||
!other->sk_socket ||
test_bit(SOCK_PASSCRED, &other->sk_socket->flags);
}
/*
* Some apps rely on write() giving SCM_CREDENTIALS
* We include credentials if source or destination socket
* asserted SOCK_PASSCRED.
*/
static void maybe_add_creds(struct sk_buff *skb, const struct socket *sock,
const struct sock *other)
{
if (UNIXCB(skb).pid)
return;
if (unix_passcred_enabled(sock, other)) {
UNIXCB(skb).pid = get_pid(task_tgid(current));
current_uid_gid(&UNIXCB(skb).uid, &UNIXCB(skb).gid);
}
}
static int maybe_init_creds(struct scm_cookie *scm,
struct socket *socket,
const struct sock *other)
{
int err;
struct msghdr msg = { .msg_controllen = 0 };
err = scm_send(socket, &msg, scm, false);
if (err)
return err;
if (unix_passcred_enabled(socket, other)) {
scm->pid = get_pid(task_tgid(current));
current_uid_gid(&scm->creds.uid, &scm->creds.gid);
}
return err;
}
static bool unix_skb_scm_eq(struct sk_buff *skb,
struct scm_cookie *scm)
{
const struct unix_skb_parms *u = &UNIXCB(skb);
return u->pid == scm->pid &&
uid_eq(u->uid, scm->creds.uid) &&
gid_eq(u->gid, scm->creds.gid) &&
unix_secdata_eq(scm, skb);
}
/*
* Send AF_UNIX data.
*/
static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
struct unix_sock *u = unix_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
struct sock *other = NULL;
int namelen = 0; /* fake GCC */
int err;
unsigned int hash;
struct sk_buff *skb;
long timeo;
struct scm_cookie scm;
int max_level;
int data_len = 0;
int sk_locked;
wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false);
if (err < 0)
return err;
err = -EOPNOTSUPP;
if (msg->msg_flags&MSG_OOB)
goto out;
if (msg->msg_namelen) {
err = unix_mkname(sunaddr, msg->msg_namelen, &hash);
if (err < 0)
goto out;
namelen = err;
} else {
sunaddr = NULL;
err = -ENOTCONN;
other = unix_peer_get(sk);
if (!other)
goto out;
}
if (test_bit(SOCK_PASSCRED, &sock->flags) && !u->addr
&& (err = unix_autobind(sock)) != 0)
goto out;
err = -EMSGSIZE;
if (len > sk->sk_sndbuf - 32)
goto out;
if (len > SKB_MAX_ALLOC) {
data_len = min_t(size_t,
len - SKB_MAX_ALLOC,
MAX_SKB_FRAGS * PAGE_SIZE);
data_len = PAGE_ALIGN(data_len);
BUILD_BUG_ON(SKB_MAX_ALLOC < PAGE_SIZE);
}
skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
msg->msg_flags & MSG_DONTWAIT, &err,
PAGE_ALLOC_COSTLY_ORDER);
if (skb == NULL)
goto out;
err = unix_scm_to_skb(&scm, skb, true);
if (err < 0)
goto out_free;
max_level = err + 1;
skb_put(skb, len - data_len);
skb->data_len = data_len;
skb->len = len;
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, len);
if (err)
goto out_free;
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
restart:
if (!other) {
err = -ECONNRESET;
if (sunaddr == NULL)
goto out_free;
other = unix_find_other(net, sunaddr, namelen, sk->sk_type,
hash, &err);
if (other == NULL)
goto out_free;
}
if (sk_filter(other, skb) < 0) {
/* Toss the packet but do not return any error to the sender */
err = len;
goto out_free;
}
sk_locked = 0;
unix_state_lock(other);
restart_locked:
err = -EPERM;
if (!unix_may_send(sk, other))
goto out_unlock;
if (unlikely(sock_flag(other, SOCK_DEAD))) {
/*
* Check with 1003.1g - what should
* datagram error
*/
unix_state_unlock(other);
sock_put(other);
if (!sk_locked)
unix_state_lock(sk);
err = 0;
if (unix_peer(sk) == other) {
unix_peer(sk) = NULL;
unix_dgram_peer_wake_disconnect_wakeup(sk, other);
unix_state_unlock(sk);
unix_dgram_disconnected(sk, other);
sock_put(other);
err = -ECONNREFUSED;
} else {
unix_state_unlock(sk);
}
other = NULL;
if (err)
goto out_free;
goto restart;
}
err = -EPIPE;
if (other->sk_shutdown & RCV_SHUTDOWN)
goto out_unlock;
if (sk->sk_type != SOCK_SEQPACKET) {
err = security_unix_may_send(sk->sk_socket, other->sk_socket);
if (err)
goto out_unlock;
}
if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
if (timeo) {
timeo = unix_wait_for_peer(other, timeo);
err = sock_intr_errno(timeo);
if (signal_pending(current))
goto out_free;
goto restart;
}
if (!sk_locked) {
unix_state_unlock(other);
unix_state_double_lock(sk, other);
}
if (unix_peer(sk) != other ||
unix_dgram_peer_wake_me(sk, other)) {
err = -EAGAIN;
sk_locked = 1;
goto out_unlock;
}
if (!sk_locked) {
sk_locked = 1;
goto restart_locked;
}
}
if (unlikely(sk_locked))
unix_state_unlock(sk);
if (sock_flag(other, SOCK_RCVTSTAMP))
__net_timestamp(skb);
maybe_add_creds(skb, sock, other);
skb_queue_tail(&other->sk_receive_queue, skb);
if (max_level > unix_sk(other)->recursion_level)
unix_sk(other)->recursion_level = max_level;
unix_state_unlock(other);
other->sk_data_ready(other);
sock_put(other);
scm_destroy(&scm);
return len;
out_unlock:
if (sk_locked)
unix_state_unlock(sk);
unix_state_unlock(other);
out_free:
kfree_skb(skb);
out:
if (other)
sock_put(other);
scm_destroy(&scm);
return err;
}
/* We use paged skbs for stream sockets, and limit occupancy to 32768
* bytes, and a minimun of a full page.
*/
#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
struct sock *sk = sock->sk;
struct sock *other = NULL;
int err, size;
struct sk_buff *skb;
int sent = 0;
struct scm_cookie scm;
bool fds_sent = false;
int max_level;
int data_len;
wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false);
if (err < 0)
return err;
err = -EOPNOTSUPP;
if (msg->msg_flags&MSG_OOB)
goto out_err;
if (msg->msg_namelen) {
err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
goto out_err;
} else {
err = -ENOTCONN;
other = unix_peer(sk);
if (!other)
goto out_err;
}
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto pipe_err;
while (sent < len) {
size = len - sent;
/* Keep two messages in the pipe so it schedules better */
size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
/* allow fallback to order-0 allocations */
size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
data_len = min_t(size_t, size, PAGE_ALIGN(data_len));
skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
msg->msg_flags & MSG_DONTWAIT, &err,
get_order(UNIX_SKB_FRAGS_SZ));
if (!skb)
goto out_err;
/* Only send the fds in the first buffer */
err = unix_scm_to_skb(&scm, skb, !fds_sent);
if (err < 0) {
kfree_skb(skb);
goto out_err;
}
max_level = err + 1;
fds_sent = true;
skb_put(skb, size - data_len);
skb->data_len = data_len;
skb->len = size;
err = skb_copy_datagram_from_iter(skb, 0, &msg->msg_iter, size);
if (err) {
kfree_skb(skb);
goto out_err;
}
unix_state_lock(other);
if (sock_flag(other, SOCK_DEAD) ||
(other->sk_shutdown & RCV_SHUTDOWN))
goto pipe_err_free;
maybe_add_creds(skb, sock, other);
skb_queue_tail(&other->sk_receive_queue, skb);
if (max_level > unix_sk(other)->recursion_level)
unix_sk(other)->recursion_level = max_level;
unix_state_unlock(other);
other->sk_data_ready(other);
sent += size;
}
scm_destroy(&scm);
return sent;
pipe_err_free:
unix_state_unlock(other);
kfree_skb(skb);
pipe_err:
if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
send_sig(SIGPIPE, current, 0);
err = -EPIPE;
out_err:
scm_destroy(&scm);
return sent ? : err;
}
static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page,
int offset, size_t size, int flags)
{
int err;
bool send_sigpipe = false;
bool init_scm = true;
struct scm_cookie scm;
struct sock *other, *sk = socket->sk;
struct sk_buff *skb, *newskb = NULL, *tail = NULL;
if (flags & MSG_OOB)
return -EOPNOTSUPP;
other = unix_peer(sk);
if (!other || sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
if (false) {
alloc_skb:
unix_state_unlock(other);
mutex_unlock(&unix_sk(other)->readlock);
newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT,
&err, 0);
if (!newskb)
goto err;
}
/* we must acquire readlock as we modify already present
* skbs in the sk_receive_queue and mess with skb->len
*/
err = mutex_lock_interruptible(&unix_sk(other)->readlock);
if (err) {
err = flags & MSG_DONTWAIT ? -EAGAIN : -ERESTARTSYS;
goto err;
}
if (sk->sk_shutdown & SEND_SHUTDOWN) {
err = -EPIPE;
send_sigpipe = true;
goto err_unlock;
}
unix_state_lock(other);
if (sock_flag(other, SOCK_DEAD) ||
other->sk_shutdown & RCV_SHUTDOWN) {
err = -EPIPE;
send_sigpipe = true;
goto err_state_unlock;
}
if (init_scm) {
err = maybe_init_creds(&scm, socket, other);
if (err)
goto err_state_unlock;
init_scm = false;
}
skb = skb_peek_tail(&other->sk_receive_queue);
if (tail && tail == skb) {
skb = newskb;
} else if (!skb || !unix_skb_scm_eq(skb, &scm)) {
if (newskb) {
skb = newskb;
} else {
tail = skb;
goto alloc_skb;
}
} else if (newskb) {
/* this is fast path, we don't necessarily need to
* call to kfree_skb even though with newskb == NULL
* this - does no harm
*/
consume_skb(newskb);
newskb = NULL;
}
if (skb_append_pagefrags(skb, page, offset, size)) {
tail = skb;
goto alloc_skb;
}
skb->len += size;
skb->data_len += size;
skb->truesize += size;
atomic_add(size, &sk->sk_wmem_alloc);
if (newskb) {
err = unix_scm_to_skb(&scm, skb, false);
if (err)
goto err_state_unlock;
spin_lock(&other->sk_receive_queue.lock);
__skb_queue_tail(&other->sk_receive_queue, newskb);
spin_unlock(&other->sk_receive_queue.lock);
}
unix_state_unlock(other);
mutex_unlock(&unix_sk(other)->readlock);
other->sk_data_ready(other);
scm_destroy(&scm);
return size;
err_state_unlock:
unix_state_unlock(other);
err_unlock:
mutex_unlock(&unix_sk(other)->readlock);
err:
kfree_skb(newskb);
if (send_sigpipe && !(flags & MSG_NOSIGNAL))
send_sig(SIGPIPE, current, 0);
if (!init_scm)
scm_destroy(&scm);
return err;
}
static int unix_seqpacket_sendmsg(struct socket *sock, struct msghdr *msg,
size_t len)
{
int err;
struct sock *sk = sock->sk;
err = sock_error(sk);
if (err)
return err;
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
if (msg->msg_namelen)
msg->msg_namelen = 0;
return unix_dgram_sendmsg(sock, msg, len);
}
static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct sock *sk = sock->sk;
if (sk->sk_state != TCP_ESTABLISHED)
return -ENOTCONN;
return unix_dgram_recvmsg(sock, msg, size, flags);
}
static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
{
struct unix_sock *u = unix_sk(sk);
if (u->addr) {
msg->msg_namelen = u->addr->len;
memcpy(msg->msg_name, u->addr->name, u->addr->len);
}
}
static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct scm_cookie scm;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
struct sk_buff *skb, *last;
long timeo;
int err;
int peeked, skip;
err = -EOPNOTSUPP;
if (flags&MSG_OOB)
goto out;
timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
do {
mutex_lock(&u->readlock);
skip = sk_peek_offset(sk, flags);
skb = __skb_try_recv_datagram(sk, flags, &peeked, &skip, &err,
&last);
if (skb)
break;
mutex_unlock(&u->readlock);
if (err != -EAGAIN)
break;
} while (timeo &&
!__skb_wait_for_more_packets(sk, &err, &timeo, last));
if (!skb) { /* implies readlock unlocked */
unix_state_lock(sk);
/* Signal EOF on disconnected non-blocking SEQPACKET socket. */
if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN &&
(sk->sk_shutdown & RCV_SHUTDOWN))
err = 0;
unix_state_unlock(sk);
goto out;
}
if (wq_has_sleeper(&u->peer_wait))
wake_up_interruptible_sync_poll(&u->peer_wait,
POLLOUT | POLLWRNORM |
POLLWRBAND);
if (msg->msg_name)
unix_copy_addr(msg, skb->sk);
if (size > skb->len - skip)
size = skb->len - skip;
else if (size < skb->len - skip)
msg->msg_flags |= MSG_TRUNC;
err = skb_copy_datagram_msg(skb, skip, msg, size);
if (err)
goto out_free;
if (sock_flag(sk, SOCK_RCVTSTAMP))
__sock_recv_timestamp(msg, sk, skb);
memset(&scm, 0, sizeof(scm));
scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
unix_set_secdata(&scm, skb);
if (!(flags & MSG_PEEK)) {
if (UNIXCB(skb).fp)
unix_detach_fds(&scm, skb);
sk_peek_offset_bwd(sk, skb->len);
} else {
/* It is questionable: on PEEK we could:
- do not return fds - good, but too simple 8)
- return fds, and do not return them on read (old strategy,
apparently wrong)
- clone fds (I chose it for now, it is the most universal
solution)
POSIX 1003.1g does not actually define this clearly
at all. POSIX 1003.1g doesn't define a lot of things
clearly however!
*/
sk_peek_offset_fwd(sk, size);
if (UNIXCB(skb).fp)
scm.fp = scm_fp_dup(UNIXCB(skb).fp);
}
err = (flags & MSG_TRUNC) ? skb->len - skip : size;
scm_recv(sock, msg, &scm, flags);
out_free:
skb_free_datagram(sk, skb);
mutex_unlock(&u->readlock);
out:
return err;
}
/*
* Sleep until more data has arrived. But check for races..
*/
static long unix_stream_data_wait(struct sock *sk, long timeo,
struct sk_buff *last, unsigned int last_len)
{
struct sk_buff *tail;
DEFINE_WAIT(wait);
unix_state_lock(sk);
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
tail = skb_peek_tail(&sk->sk_receive_queue);
if (tail != last ||
(tail && tail->len != last_len) ||
sk->sk_err ||
(sk->sk_shutdown & RCV_SHUTDOWN) ||
signal_pending(current) ||
!timeo)
break;
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
unix_state_unlock(sk);
timeo = freezable_schedule_timeout(timeo);
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD))
break;
sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
}
finish_wait(sk_sleep(sk), &wait);
unix_state_unlock(sk);
return timeo;
}
static unsigned int unix_skb_len(const struct sk_buff *skb)
{
return skb->len - UNIXCB(skb).consumed;
}
struct unix_stream_read_state {
int (*recv_actor)(struct sk_buff *, int, int,
struct unix_stream_read_state *);
struct socket *socket;
struct msghdr *msg;
struct pipe_inode_info *pipe;
size_t size;
int flags;
unsigned int splice_flags;
};
static int unix_stream_read_generic(struct unix_stream_read_state *state)
{
struct scm_cookie scm;
struct socket *sock = state->socket;
struct sock *sk = sock->sk;
struct unix_sock *u = unix_sk(sk);
int copied = 0;
int flags = state->flags;
int noblock = flags & MSG_DONTWAIT;
bool check_creds = false;
int target;
int err = 0;
long timeo;
int skip;
size_t size = state->size;
unsigned int last_len;
err = -EINVAL;
if (sk->sk_state != TCP_ESTABLISHED)
goto out;
err = -EOPNOTSUPP;
if (flags & MSG_OOB)
goto out;
target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
timeo = sock_rcvtimeo(sk, noblock);
memset(&scm, 0, sizeof(scm));
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
*/
mutex_lock(&u->readlock);
if (flags & MSG_PEEK)
skip = sk_peek_offset(sk, flags);
else
skip = 0;
do {
int chunk;
bool drop_skb;
struct sk_buff *skb, *last;
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD)) {
err = -ECONNRESET;
goto unlock;
}
last = skb = skb_peek(&sk->sk_receive_queue);
last_len = last ? last->len : 0;
again:
if (skb == NULL) {
unix_sk(sk)->recursion_level = 0;
if (copied >= target)
goto unlock;
/*
* POSIX 1003.1g mandates this order.
*/
err = sock_error(sk);
if (err)
goto unlock;
if (sk->sk_shutdown & RCV_SHUTDOWN)
goto unlock;
unix_state_unlock(sk);
err = -EAGAIN;
if (!timeo)
break;
mutex_unlock(&u->readlock);
timeo = unix_stream_data_wait(sk, timeo, last,
last_len);
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
scm_destroy(&scm);
goto out;
}
mutex_lock(&u->readlock);
continue;
unlock:
unix_state_unlock(sk);
break;
}
while (skip >= unix_skb_len(skb)) {
skip -= unix_skb_len(skb);
last = skb;
last_len = skb->len;
skb = skb_peek_next(skb, &sk->sk_receive_queue);
if (!skb)
goto again;
}
unix_state_unlock(sk);
if (check_creds) {
/* Never glue messages from different writers */
if (!unix_skb_scm_eq(skb, &scm))
break;
} else if (test_bit(SOCK_PASSCRED, &sock->flags)) {
/* Copy credentials */
scm_set_cred(&scm, UNIXCB(skb).pid, UNIXCB(skb).uid, UNIXCB(skb).gid);
unix_set_secdata(&scm, skb);
check_creds = true;
}
/* Copy address just once */
if (state->msg && state->msg->msg_name) {
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr,
state->msg->msg_name);
unix_copy_addr(state->msg, skb->sk);
sunaddr = NULL;
}
chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
skb_get(skb);
chunk = state->recv_actor(skb, skip, chunk, state);
drop_skb = !unix_skb_len(skb);
/* skb is only safe to use if !drop_skb */
consume_skb(skb);
if (chunk < 0) {
if (copied == 0)
copied = -EFAULT;
break;
}
copied += chunk;
size -= chunk;
if (drop_skb) {
/* the skb was touched by a concurrent reader;
* we should not expect anything from this skb
* anymore and assume it invalid - we can be
* sure it was dropped from the socket queue
*
* let's report a short read
*/
err = 0;
break;
}
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
UNIXCB(skb).consumed += chunk;
sk_peek_offset_bwd(sk, chunk);
if (UNIXCB(skb).fp)
unix_detach_fds(&scm, skb);
if (unix_skb_len(skb))
break;
skb_unlink(skb, &sk->sk_receive_queue);
consume_skb(skb);
if (scm.fp)
break;
} else {
/* It is questionable, see note in unix_dgram_recvmsg.
*/
if (UNIXCB(skb).fp)
scm.fp = scm_fp_dup(UNIXCB(skb).fp);
sk_peek_offset_fwd(sk, chunk);
if (UNIXCB(skb).fp)
break;
skip = 0;
last = skb;
last_len = skb->len;
unix_state_lock(sk);
skb = skb_peek_next(skb, &sk->sk_receive_queue);
if (skb)
goto again;
unix_state_unlock(sk);
break;
}
} while (size);
mutex_unlock(&u->readlock);
if (state->msg)
scm_recv(sock, state->msg, &scm, flags);
else
scm_destroy(&scm);
out:
return copied ? : err;
}
static int unix_stream_read_actor(struct sk_buff *skb,
int skip, int chunk,
struct unix_stream_read_state *state)
{
int ret;
ret = skb_copy_datagram_msg(skb, UNIXCB(skb).consumed + skip,
state->msg, chunk);
return ret ?: chunk;
}
static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
size_t size, int flags)
{
struct unix_stream_read_state state = {
.recv_actor = unix_stream_read_actor,
.socket = sock,
.msg = msg,
.size = size,
.flags = flags
};
return unix_stream_read_generic(&state);
}
static ssize_t skb_unix_socket_splice(struct sock *sk,
struct pipe_inode_info *pipe,
struct splice_pipe_desc *spd)
{
int ret;
struct unix_sock *u = unix_sk(sk);
mutex_unlock(&u->readlock);
ret = splice_to_pipe(pipe, spd);
mutex_lock(&u->readlock);
return ret;
}
static int unix_stream_splice_actor(struct sk_buff *skb,
int skip, int chunk,
struct unix_stream_read_state *state)
{
return skb_splice_bits(skb, state->socket->sk,
UNIXCB(skb).consumed + skip,
state->pipe, chunk, state->splice_flags,
skb_unix_socket_splice);
}
static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t size, unsigned int flags)
{
struct unix_stream_read_state state = {
.recv_actor = unix_stream_splice_actor,
.socket = sock,
.pipe = pipe,
.size = size,
.splice_flags = flags,
};
if (unlikely(*ppos))
return -ESPIPE;
if (sock->file->f_flags & O_NONBLOCK ||
flags & SPLICE_F_NONBLOCK)
state.flags = MSG_DONTWAIT;
return unix_stream_read_generic(&state);
}
static int unix_shutdown(struct socket *sock, int mode)
{
struct sock *sk = sock->sk;
struct sock *other;
if (mode < SHUT_RD || mode > SHUT_RDWR)
return -EINVAL;
/* This maps:
* SHUT_RD (0) -> RCV_SHUTDOWN (1)
* SHUT_WR (1) -> SEND_SHUTDOWN (2)
* SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
*/
++mode;
unix_state_lock(sk);
sk->sk_shutdown |= mode;
other = unix_peer(sk);
if (other)
sock_hold(other);
unix_state_unlock(sk);
sk->sk_state_change(sk);
if (other &&
(sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET)) {
int peer_mode = 0;
if (mode&RCV_SHUTDOWN)
peer_mode |= SEND_SHUTDOWN;
if (mode&SEND_SHUTDOWN)
peer_mode |= RCV_SHUTDOWN;
unix_state_lock(other);
other->sk_shutdown |= peer_mode;
unix_state_unlock(other);
other->sk_state_change(other);
if (peer_mode == SHUTDOWN_MASK)
sk_wake_async(other, SOCK_WAKE_WAITD, POLL_HUP);
else if (peer_mode & RCV_SHUTDOWN)
sk_wake_async(other, SOCK_WAKE_WAITD, POLL_IN);
}
if (other)
sock_put(other);
return 0;
}
long unix_inq_len(struct sock *sk)
{
struct sk_buff *skb;
long amount = 0;
if (sk->sk_state == TCP_LISTEN)
return -EINVAL;
spin_lock(&sk->sk_receive_queue.lock);
if (sk->sk_type == SOCK_STREAM ||
sk->sk_type == SOCK_SEQPACKET) {
skb_queue_walk(&sk->sk_receive_queue, skb)
amount += unix_skb_len(skb);
} else {
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
amount = skb->len;
}
spin_unlock(&sk->sk_receive_queue.lock);
return amount;
}
EXPORT_SYMBOL_GPL(unix_inq_len);
long unix_outq_len(struct sock *sk)
{
return sk_wmem_alloc_get(sk);
}
EXPORT_SYMBOL_GPL(unix_outq_len);
static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
long amount = 0;
int err;
switch (cmd) {
case SIOCOUTQ:
amount = unix_outq_len(sk);
err = put_user(amount, (int __user *)arg);
break;
case SIOCINQ:
amount = unix_inq_len(sk);
if (amount < 0)
err = amount;
else
err = put_user(amount, (int __user *)arg);
break;
default:
err = -ENOIOCTLCMD;
break;
}
return err;
}
static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
unsigned int mask;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* exceptional events? */
if (sk->sk_err)
mask |= POLLERR;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLRDHUP | POLLIN | POLLRDNORM;
/* readable? */
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
/* Connection-based need to check for termination and startup */
if ((sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) &&
sk->sk_state == TCP_CLOSE)
mask |= POLLHUP;
/*
* we set writable also when the other side has shut down the
* connection. This prevents stuck sockets.
*/
if (unix_writable(sk))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
return mask;
}
static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk, *other;
unsigned int mask, writable;
sock_poll_wait(file, sk_sleep(sk), wait);
mask = 0;
/* exceptional events? */
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
mask |= POLLERR |
(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLRDHUP | POLLIN | POLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
mask |= POLLHUP;
/* readable? */
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
/* Connection-based need to check for termination and startup */
if (sk->sk_type == SOCK_SEQPACKET) {
if (sk->sk_state == TCP_CLOSE)
mask |= POLLHUP;
/* connection hasn't started yet? */
if (sk->sk_state == TCP_SYN_SENT)
return mask;
}
/* No write status requested, avoid expensive OUT tests. */
if (!(poll_requested_events(wait) & (POLLWRBAND|POLLWRNORM|POLLOUT)))
return mask;
writable = unix_writable(sk);
if (writable) {
unix_state_lock(sk);
other = unix_peer(sk);
if (other && unix_peer(other) != sk &&
unix_recvq_full(other) &&
unix_dgram_peer_wake_me(sk, other))
writable = 0;
unix_state_unlock(sk);
}
if (writable)
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
else
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
return mask;
}
#ifdef CONFIG_PROC_FS
#define BUCKET_SPACE (BITS_PER_LONG - (UNIX_HASH_BITS + 1) - 1)
#define get_bucket(x) ((x) >> BUCKET_SPACE)
#define get_offset(x) ((x) & ((1L << BUCKET_SPACE) - 1))
#define set_bucket_offset(b, o) ((b) << BUCKET_SPACE | (o))
static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
{
unsigned long offset = get_offset(*pos);
unsigned long bucket = get_bucket(*pos);
struct sock *sk;
unsigned long count = 0;
for (sk = sk_head(&unix_socket_table[bucket]); sk; sk = sk_next(sk)) {
if (sock_net(sk) != seq_file_net(seq))
continue;
if (++count == offset)
break;
}
return sk;
}
static struct sock *unix_next_socket(struct seq_file *seq,
struct sock *sk,
loff_t *pos)
{
unsigned long bucket;
while (sk > (struct sock *)SEQ_START_TOKEN) {
sk = sk_next(sk);
if (!sk)
goto next_bucket;
if (sock_net(sk) == seq_file_net(seq))
return sk;
}
do {
sk = unix_from_bucket(seq, pos);
if (sk)
return sk;
next_bucket:
bucket = get_bucket(*pos) + 1;
*pos = set_bucket_offset(bucket, 1);
} while (bucket < ARRAY_SIZE(unix_socket_table));
return NULL;
}
static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(unix_table_lock)
{
spin_lock(&unix_table_lock);
if (!*pos)
return SEQ_START_TOKEN;
if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
return NULL;
return unix_next_socket(seq, NULL, pos);
}
static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return unix_next_socket(seq, v, pos);
}
static void unix_seq_stop(struct seq_file *seq, void *v)
__releases(unix_table_lock)
{
spin_unlock(&unix_table_lock);
}
static int unix_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_puts(seq, "Num RefCount Protocol Flags Type St "
"Inode Path\n");
else {
struct sock *s = v;
struct unix_sock *u = unix_sk(s);
unix_state_lock(s);
seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
s,
atomic_read(&s->sk_refcnt),
0,
s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
s->sk_type,
s->sk_socket ?
(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTED : SS_UNCONNECTED) :
(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
sock_i_ino(s));
if (u->addr) {
int i, len;
seq_putc(seq, ' ');
i = 0;
len = u->addr->len - sizeof(short);
if (!UNIX_ABSTRACT(s))
len--;
else {
seq_putc(seq, '@');
i++;
}
for ( ; i < len; i++)
seq_putc(seq, u->addr->name->sun_path[i]);
}
unix_state_unlock(s);
seq_putc(seq, '\n');
}
return 0;
}
static const struct seq_operations unix_seq_ops = {
.start = unix_seq_start,
.next = unix_seq_next,
.stop = unix_seq_stop,
.show = unix_seq_show,
};
static int unix_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &unix_seq_ops,
sizeof(struct seq_net_private));
}
static const struct file_operations unix_seq_fops = {
.owner = THIS_MODULE,
.open = unix_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
#endif
static const struct net_proto_family unix_family_ops = {
.family = PF_UNIX,
.create = unix_create,
.owner = THIS_MODULE,
};
static int __net_init unix_net_init(struct net *net)
{
int error = -ENOMEM;
net->unx.sysctl_max_dgram_qlen = 10;
if (unix_sysctl_register(net))
goto out;
#ifdef CONFIG_PROC_FS
if (!proc_create("unix", 0, net->proc_net, &unix_seq_fops)) {
unix_sysctl_unregister(net);
goto out;
}
#endif
error = 0;
out:
return error;
}
static void __net_exit unix_net_exit(struct net *net)
{
unix_sysctl_unregister(net);
remove_proc_entry("unix", net->proc_net);
}
static struct pernet_operations unix_net_ops = {
.init = unix_net_init,
.exit = unix_net_exit,
};
static int __init af_unix_init(void)
{
int rc = -1;
BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb));
rc = proto_register(&unix_proto, 1);
if (rc != 0) {
pr_crit("%s: Cannot create unix_sock SLAB cache!\n", __func__);
goto out;
}
sock_register(&unix_family_ops);
register_pernet_subsys(&unix_net_ops);
out:
return rc;
}
static void __exit af_unix_exit(void)
{
sock_unregister(PF_UNIX);
proto_unregister(&unix_proto);
unregister_pernet_subsys(&unix_net_ops);
}
/* Earlier than device_initcall() so that other drivers invoking
request_module() don't end up in a loop when modprobe tries
to use a UNIX socket. But later than subsys_initcall() because
we depend on stuff initialised there */
fs_initcall(af_unix_init);
module_exit(af_unix_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_UNIX);
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_4966_3 |
crossvul-cpp_data_bad_3486_5 | /*
* linux/arch/arm/kernel/swp_emulate.c
*
* Copyright (C) 2009 ARM Limited
* __user_* functions adapted from include/asm/uaccess.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Implements emulation of the SWP/SWPB instructions using load-exclusive and
* store-exclusive for processors that have them disabled (or future ones that
* might not implement them).
*
* Syntax of SWP{B} instruction: SWP{B}<c> <Rt>, <Rt2>, [<Rn>]
* Where: Rt = destination
* Rt2 = source
* Rn = address
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
#include <linux/perf_event.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
/*
* Error-checking SWP macros implemented using ldrex{b}/strex{b}
*/
#define __user_swpX_asm(data, addr, res, temp, B) \
__asm__ __volatile__( \
" mov %2, %1\n" \
"0: ldrex"B" %1, [%3]\n" \
"1: strex"B" %0, %2, [%3]\n" \
" cmp %0, #0\n" \
" movne %0, %4\n" \
"2:\n" \
" .section .fixup,\"ax\"\n" \
" .align 2\n" \
"3: mov %0, %5\n" \
" b 2b\n" \
" .previous\n" \
" .section __ex_table,\"a\"\n" \
" .align 3\n" \
" .long 0b, 3b\n" \
" .long 1b, 3b\n" \
" .previous" \
: "=&r" (res), "+r" (data), "=&r" (temp) \
: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
: "cc", "memory")
#define __user_swp_asm(data, addr, res, temp) \
__user_swpX_asm(data, addr, res, temp, "")
#define __user_swpb_asm(data, addr, res, temp) \
__user_swpX_asm(data, addr, res, temp, "b")
/*
* Macros/defines for extracting register numbers from instruction.
*/
#define EXTRACT_REG_NUM(instruction, offset) \
(((instruction) & (0xf << (offset))) >> (offset))
#define RN_OFFSET 16
#define RT_OFFSET 12
#define RT2_OFFSET 0
/*
* Bit 22 of the instruction encoding distinguishes between
* the SWP and SWPB variants (bit set means SWPB).
*/
#define TYPE_SWPB (1 << 22)
static unsigned long swpcounter;
static unsigned long swpbcounter;
static unsigned long abtcounter;
static pid_t previous_pid;
#ifdef CONFIG_PROC_FS
static int proc_read_status(char *page, char **start, off_t off, int count,
int *eof, void *data)
{
char *p = page;
int len;
p += sprintf(p, "Emulated SWP:\t\t%lu\n", swpcounter);
p += sprintf(p, "Emulated SWPB:\t\t%lu\n", swpbcounter);
p += sprintf(p, "Aborted SWP{B}:\t\t%lu\n", abtcounter);
if (previous_pid != 0)
p += sprintf(p, "Last process:\t\t%d\n", previous_pid);
len = (p - page) - off;
if (len < 0)
len = 0;
*eof = (len <= count) ? 1 : 0;
*start = page + off;
return len;
}
#endif
/*
* Set up process info to signal segmentation fault - called on access error.
*/
static void set_segfault(struct pt_regs *regs, unsigned long addr)
{
siginfo_t info;
if (find_vma(current->mm, addr) == NULL)
info.si_code = SEGV_MAPERR;
else
info.si_code = SEGV_ACCERR;
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_addr = (void *) instruction_pointer(regs);
pr_debug("SWP{B} emulation: access caused memory abort!\n");
arm_notify_die("Illegal memory access", regs, &info, 0, 0);
abtcounter++;
}
static int emulate_swpX(unsigned int address, unsigned int *data,
unsigned int type)
{
unsigned int res = 0;
if ((type != TYPE_SWPB) && (address & 0x3)) {
/* SWP to unaligned address not permitted */
pr_debug("SWP instruction on unaligned pointer!\n");
return -EFAULT;
}
while (1) {
unsigned long temp;
/*
* Barrier required between accessing protected resource and
* releasing a lock for it. Legacy code might not have done
* this, and we cannot determine that this is not the case
* being emulated, so insert always.
*/
smp_mb();
if (type == TYPE_SWPB)
__user_swpb_asm(*data, address, res, temp);
else
__user_swp_asm(*data, address, res, temp);
if (likely(res != -EAGAIN) || signal_pending(current))
break;
cond_resched();
}
if (res == 0) {
/*
* Barrier also required between acquiring a lock for a
* protected resource and accessing the resource. Inserted for
* same reason as above.
*/
smp_mb();
if (type == TYPE_SWPB)
swpbcounter++;
else
swpcounter++;
}
return res;
}
/*
* swp_handler logs the id of calling process, dissects the instruction, sanity
* checks the memory location, calls emulate_swpX for the actual operation and
* deals with fixup/error handling before returning
*/
static int swp_handler(struct pt_regs *regs, unsigned int instr)
{
unsigned int address, destreg, data, type;
unsigned int res = 0;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, regs->ARM_pc);
if (current->pid != previous_pid) {
pr_debug("\"%s\" (%ld) uses deprecated SWP{B} instruction\n",
current->comm, (unsigned long)current->pid);
previous_pid = current->pid;
}
address = regs->uregs[EXTRACT_REG_NUM(instr, RN_OFFSET)];
data = regs->uregs[EXTRACT_REG_NUM(instr, RT2_OFFSET)];
destreg = EXTRACT_REG_NUM(instr, RT_OFFSET);
type = instr & TYPE_SWPB;
pr_debug("addr in r%d->0x%08x, dest is r%d, source in r%d->0x%08x)\n",
EXTRACT_REG_NUM(instr, RN_OFFSET), address,
destreg, EXTRACT_REG_NUM(instr, RT2_OFFSET), data);
/* Check access in reasonable access range for both SWP and SWPB */
if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
pr_debug("SWP{B} emulation: access to %p not allowed!\n",
(void *)address);
res = -EFAULT;
} else {
res = emulate_swpX(address, &data, type);
}
if (res == 0) {
/*
* On successful emulation, revert the adjustment to the PC
* made in kernel/traps.c in order to resume execution at the
* instruction following the SWP{B}.
*/
regs->ARM_pc += 4;
regs->uregs[destreg] = data;
} else if (res == -EFAULT) {
/*
* Memory errors do not mean emulation failed.
* Set up signal info to return SEGV, then return OK
*/
set_segfault(regs, address);
}
return 0;
}
/*
* Only emulate SWP/SWPB executed in ARM state/User mode.
* The kernel must be SWP free and SWP{B} does not exist in Thumb/ThumbEE.
*/
static struct undef_hook swp_hook = {
.instr_mask = 0x0fb00ff0,
.instr_val = 0x01000090,
.cpsr_mask = MODE_MASK | PSR_T_BIT | PSR_J_BIT,
.cpsr_val = USR_MODE,
.fn = swp_handler
};
/*
* Register handler and create status file in /proc/cpu
* Invoked as late_initcall, since not needed before init spawned.
*/
static int __init swp_emulation_init(void)
{
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *res;
res = create_proc_entry("cpu/swp_emulation", S_IRUGO, NULL);
if (!res)
return -ENOMEM;
res->read_proc = proc_read_status;
#endif /* CONFIG_PROC_FS */
printk(KERN_NOTICE "Registering SWP/SWPB emulation handler\n");
register_undef_hook(&swp_hook);
return 0;
}
late_initcall(swp_emulation_init);
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3486_5 |
crossvul-cpp_data_bad_5006_1 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IPv4 Forwarding Information Base: FIB frontend.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <asm/uaccess.h>
#include <linux/bitops.h>
#include <linux/capability.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/if_addr.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
#include <net/tcp.h>
#include <net/sock.h>
#include <net/arp.h>
#include <net/ip_fib.h>
#include <net/rtnetlink.h>
#include <net/xfrm.h>
#include <net/l3mdev.h>
#include <trace/events/fib.h>
#ifndef CONFIG_IP_MULTIPLE_TABLES
static int __net_init fib4_rules_init(struct net *net)
{
struct fib_table *local_table, *main_table;
main_table = fib_trie_table(RT_TABLE_MAIN, NULL);
if (!main_table)
return -ENOMEM;
local_table = fib_trie_table(RT_TABLE_LOCAL, main_table);
if (!local_table)
goto fail;
hlist_add_head_rcu(&local_table->tb_hlist,
&net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX]);
hlist_add_head_rcu(&main_table->tb_hlist,
&net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]);
return 0;
fail:
fib_free_table(main_table);
return -ENOMEM;
}
#else
struct fib_table *fib_new_table(struct net *net, u32 id)
{
struct fib_table *tb, *alias = NULL;
unsigned int h;
if (id == 0)
id = RT_TABLE_MAIN;
tb = fib_get_table(net, id);
if (tb)
return tb;
if (id == RT_TABLE_LOCAL)
alias = fib_new_table(net, RT_TABLE_MAIN);
tb = fib_trie_table(id, alias);
if (!tb)
return NULL;
switch (id) {
case RT_TABLE_LOCAL:
rcu_assign_pointer(net->ipv4.fib_local, tb);
break;
case RT_TABLE_MAIN:
rcu_assign_pointer(net->ipv4.fib_main, tb);
break;
case RT_TABLE_DEFAULT:
rcu_assign_pointer(net->ipv4.fib_default, tb);
break;
default:
break;
}
h = id & (FIB_TABLE_HASHSZ - 1);
hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
return tb;
}
/* caller must hold either rtnl or rcu read lock */
struct fib_table *fib_get_table(struct net *net, u32 id)
{
struct fib_table *tb;
struct hlist_head *head;
unsigned int h;
if (id == 0)
id = RT_TABLE_MAIN;
h = id & (FIB_TABLE_HASHSZ - 1);
head = &net->ipv4.fib_table_hash[h];
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
if (tb->tb_id == id)
return tb;
}
return NULL;
}
#endif /* CONFIG_IP_MULTIPLE_TABLES */
static void fib_replace_table(struct net *net, struct fib_table *old,
struct fib_table *new)
{
#ifdef CONFIG_IP_MULTIPLE_TABLES
switch (new->tb_id) {
case RT_TABLE_LOCAL:
rcu_assign_pointer(net->ipv4.fib_local, new);
break;
case RT_TABLE_MAIN:
rcu_assign_pointer(net->ipv4.fib_main, new);
break;
case RT_TABLE_DEFAULT:
rcu_assign_pointer(net->ipv4.fib_default, new);
break;
default:
break;
}
#endif
/* replace the old table in the hlist */
hlist_replace_rcu(&old->tb_hlist, &new->tb_hlist);
}
int fib_unmerge(struct net *net)
{
struct fib_table *old, *new;
/* attempt to fetch local table if it has been allocated */
old = fib_get_table(net, RT_TABLE_LOCAL);
if (!old)
return 0;
new = fib_trie_unmerge(old);
if (!new)
return -ENOMEM;
/* replace merged table with clean table */
if (new != old) {
fib_replace_table(net, old, new);
fib_free_table(old);
}
return 0;
}
static void fib_flush(struct net *net)
{
int flushed = 0;
unsigned int h;
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct hlist_node *tmp;
struct fib_table *tb;
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
flushed += fib_table_flush(tb);
}
if (flushed)
rt_cache_flush(net);
}
void fib_flush_external(struct net *net)
{
struct fib_table *tb;
struct hlist_head *head;
unsigned int h;
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
head = &net->ipv4.fib_table_hash[h];
hlist_for_each_entry(tb, head, tb_hlist)
fib_table_flush_external(tb);
}
}
/*
* Find address type as if only "dev" was present in the system. If
* on_dev is NULL then all interfaces are taken into consideration.
*/
static inline unsigned int __inet_dev_addr_type(struct net *net,
const struct net_device *dev,
__be32 addr, u32 tb_id)
{
struct flowi4 fl4 = { .daddr = addr };
struct fib_result res;
unsigned int ret = RTN_BROADCAST;
struct fib_table *table;
if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
return RTN_BROADCAST;
if (ipv4_is_multicast(addr))
return RTN_MULTICAST;
rcu_read_lock();
table = fib_get_table(net, tb_id);
if (table) {
ret = RTN_UNICAST;
if (!fib_table_lookup(table, &fl4, &res, FIB_LOOKUP_NOREF)) {
if (!dev || dev == res.fi->fib_dev)
ret = res.type;
}
}
rcu_read_unlock();
return ret;
}
unsigned int inet_addr_type_table(struct net *net, __be32 addr, u32 tb_id)
{
return __inet_dev_addr_type(net, NULL, addr, tb_id);
}
EXPORT_SYMBOL(inet_addr_type_table);
unsigned int inet_addr_type(struct net *net, __be32 addr)
{
return __inet_dev_addr_type(net, NULL, addr, RT_TABLE_LOCAL);
}
EXPORT_SYMBOL(inet_addr_type);
unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
__be32 addr)
{
u32 rt_table = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
return __inet_dev_addr_type(net, dev, addr, rt_table);
}
EXPORT_SYMBOL(inet_dev_addr_type);
/* inet_addr_type with dev == NULL but using the table from a dev
* if one is associated
*/
unsigned int inet_addr_type_dev_table(struct net *net,
const struct net_device *dev,
__be32 addr)
{
u32 rt_table = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
return __inet_dev_addr_type(net, NULL, addr, rt_table);
}
EXPORT_SYMBOL(inet_addr_type_dev_table);
__be32 fib_compute_spec_dst(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct in_device *in_dev;
struct fib_result res;
struct rtable *rt;
struct flowi4 fl4;
struct net *net;
int scope;
rt = skb_rtable(skb);
if ((rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST | RTCF_LOCAL)) ==
RTCF_LOCAL)
return ip_hdr(skb)->daddr;
in_dev = __in_dev_get_rcu(dev);
BUG_ON(!in_dev);
net = dev_net(dev);
scope = RT_SCOPE_UNIVERSE;
if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
fl4.flowi4_oif = 0;
fl4.flowi4_iif = LOOPBACK_IFINDEX;
fl4.daddr = ip_hdr(skb)->saddr;
fl4.saddr = 0;
fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
fl4.flowi4_scope = scope;
fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
fl4.flowi4_tun_key.tun_id = 0;
if (!fib_lookup(net, &fl4, &res, 0))
return FIB_RES_PREFSRC(net, res);
} else {
scope = RT_SCOPE_LINK;
}
return inet_select_addr(dev, ip_hdr(skb)->saddr, scope);
}
/* Given (packet source, input interface) and optional (dst, oif, tos):
* - (main) check, that source is valid i.e. not broadcast or our local
* address.
* - figure out what "logical" interface this packet arrived
* and calculate "specific destination" address.
* - check, that packet arrived from expected physical interface.
* called with rcu_read_lock()
*/
static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
u8 tos, int oif, struct net_device *dev,
int rpf, struct in_device *idev, u32 *itag)
{
int ret, no_addr;
struct fib_result res;
struct flowi4 fl4;
struct net *net;
bool dev_match;
fl4.flowi4_oif = 0;
fl4.flowi4_iif = l3mdev_master_ifindex_rcu(dev);
if (!fl4.flowi4_iif)
fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
fl4.daddr = src;
fl4.saddr = dst;
fl4.flowi4_tos = tos;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_tun_key.tun_id = 0;
fl4.flowi4_flags = 0;
no_addr = idev->ifa_list == NULL;
fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
trace_fib_validate_source(dev, &fl4);
net = dev_net(dev);
if (fib_lookup(net, &fl4, &res, 0))
goto last_resort;
if (res.type != RTN_UNICAST &&
(res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev)))
goto e_inval;
if (!rpf && !fib_num_tclassid_users(dev_net(dev)) &&
(dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev)))
goto last_resort;
fib_combine_itag(itag, &res);
dev_match = false;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
for (ret = 0; ret < res.fi->fib_nhs; ret++) {
struct fib_nh *nh = &res.fi->fib_nh[ret];
if (nh->nh_dev == dev) {
dev_match = true;
break;
} else if (l3mdev_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) {
dev_match = true;
break;
}
}
#else
if (FIB_RES_DEV(res) == dev)
dev_match = true;
#endif
if (dev_match) {
ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
return ret;
}
if (no_addr)
goto last_resort;
if (rpf == 1)
goto e_rpf;
fl4.flowi4_oif = dev->ifindex;
ret = 0;
if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) {
if (res.type == RTN_UNICAST)
ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
}
return ret;
last_resort:
if (rpf)
goto e_rpf;
*itag = 0;
return 0;
e_inval:
return -EINVAL;
e_rpf:
return -EXDEV;
}
/* Ignore rp_filter for packets protected by IPsec. */
int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
u8 tos, int oif, struct net_device *dev,
struct in_device *idev, u32 *itag)
{
int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev);
if (!r && !fib_num_tclassid_users(dev_net(dev)) &&
IN_DEV_ACCEPT_LOCAL(idev) &&
(dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) {
*itag = 0;
return 0;
}
return __fib_validate_source(skb, src, dst, tos, oif, dev, r, idev, itag);
}
static inline __be32 sk_extract_addr(struct sockaddr *addr)
{
return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
}
static int put_rtax(struct nlattr *mx, int len, int type, u32 value)
{
struct nlattr *nla;
nla = (struct nlattr *) ((char *) mx + len);
nla->nla_type = type;
nla->nla_len = nla_attr_size(4);
*(u32 *) nla_data(nla) = value;
return len + nla_total_size(4);
}
static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
struct fib_config *cfg)
{
__be32 addr;
int plen;
memset(cfg, 0, sizeof(*cfg));
cfg->fc_nlinfo.nl_net = net;
if (rt->rt_dst.sa_family != AF_INET)
return -EAFNOSUPPORT;
/*
* Check mask for validity:
* a) it must be contiguous.
* b) destination must have all host bits clear.
* c) if application forgot to set correct family (AF_INET),
* reject request unless it is absolutely clear i.e.
* both family and mask are zero.
*/
plen = 32;
addr = sk_extract_addr(&rt->rt_dst);
if (!(rt->rt_flags & RTF_HOST)) {
__be32 mask = sk_extract_addr(&rt->rt_genmask);
if (rt->rt_genmask.sa_family != AF_INET) {
if (mask || rt->rt_genmask.sa_family)
return -EAFNOSUPPORT;
}
if (bad_mask(mask, addr))
return -EINVAL;
plen = inet_mask_len(mask);
}
cfg->fc_dst_len = plen;
cfg->fc_dst = addr;
if (cmd != SIOCDELRT) {
cfg->fc_nlflags = NLM_F_CREATE;
cfg->fc_protocol = RTPROT_BOOT;
}
if (rt->rt_metric)
cfg->fc_priority = rt->rt_metric - 1;
if (rt->rt_flags & RTF_REJECT) {
cfg->fc_scope = RT_SCOPE_HOST;
cfg->fc_type = RTN_UNREACHABLE;
return 0;
}
cfg->fc_scope = RT_SCOPE_NOWHERE;
cfg->fc_type = RTN_UNICAST;
if (rt->rt_dev) {
char *colon;
struct net_device *dev;
char devname[IFNAMSIZ];
if (copy_from_user(devname, rt->rt_dev, IFNAMSIZ-1))
return -EFAULT;
devname[IFNAMSIZ-1] = 0;
colon = strchr(devname, ':');
if (colon)
*colon = 0;
dev = __dev_get_by_name(net, devname);
if (!dev)
return -ENODEV;
cfg->fc_oif = dev->ifindex;
if (colon) {
struct in_ifaddr *ifa;
struct in_device *in_dev = __in_dev_get_rtnl(dev);
if (!in_dev)
return -ENODEV;
*colon = ':';
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
if (strcmp(ifa->ifa_label, devname) == 0)
break;
if (!ifa)
return -ENODEV;
cfg->fc_prefsrc = ifa->ifa_local;
}
}
addr = sk_extract_addr(&rt->rt_gateway);
if (rt->rt_gateway.sa_family == AF_INET && addr) {
unsigned int addr_type;
cfg->fc_gw = addr;
addr_type = inet_addr_type_table(net, addr, cfg->fc_table);
if (rt->rt_flags & RTF_GATEWAY &&
addr_type == RTN_UNICAST)
cfg->fc_scope = RT_SCOPE_UNIVERSE;
}
if (cmd == SIOCDELRT)
return 0;
if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw)
return -EINVAL;
if (cfg->fc_scope == RT_SCOPE_NOWHERE)
cfg->fc_scope = RT_SCOPE_LINK;
if (rt->rt_flags & (RTF_MTU | RTF_WINDOW | RTF_IRTT)) {
struct nlattr *mx;
int len = 0;
mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
if (!mx)
return -ENOMEM;
if (rt->rt_flags & RTF_MTU)
len = put_rtax(mx, len, RTAX_ADVMSS, rt->rt_mtu - 40);
if (rt->rt_flags & RTF_WINDOW)
len = put_rtax(mx, len, RTAX_WINDOW, rt->rt_window);
if (rt->rt_flags & RTF_IRTT)
len = put_rtax(mx, len, RTAX_RTT, rt->rt_irtt << 3);
cfg->fc_mx = mx;
cfg->fc_mx_len = len;
}
return 0;
}
/*
* Handle IP routing ioctl calls.
* These are used to manipulate the routing tables
*/
int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
{
struct fib_config cfg;
struct rtentry rt;
int err;
switch (cmd) {
case SIOCADDRT: /* Add a route */
case SIOCDELRT: /* Delete a route */
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&rt, arg, sizeof(rt)))
return -EFAULT;
rtnl_lock();
err = rtentry_to_fib_config(net, cmd, &rt, &cfg);
if (err == 0) {
struct fib_table *tb;
if (cmd == SIOCDELRT) {
tb = fib_get_table(net, cfg.fc_table);
if (tb)
err = fib_table_delete(tb, &cfg);
else
err = -ESRCH;
} else {
tb = fib_new_table(net, cfg.fc_table);
if (tb)
err = fib_table_insert(tb, &cfg);
else
err = -ENOBUFS;
}
/* allocated by rtentry_to_fib_config() */
kfree(cfg.fc_mx);
}
rtnl_unlock();
return err;
}
return -EINVAL;
}
const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
[RTA_DST] = { .type = NLA_U32 },
[RTA_SRC] = { .type = NLA_U32 },
[RTA_IIF] = { .type = NLA_U32 },
[RTA_OIF] = { .type = NLA_U32 },
[RTA_GATEWAY] = { .type = NLA_U32 },
[RTA_PRIORITY] = { .type = NLA_U32 },
[RTA_PREFSRC] = { .type = NLA_U32 },
[RTA_METRICS] = { .type = NLA_NESTED },
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
[RTA_FLOW] = { .type = NLA_U32 },
[RTA_ENCAP_TYPE] = { .type = NLA_U16 },
[RTA_ENCAP] = { .type = NLA_NESTED },
};
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
struct nlmsghdr *nlh, struct fib_config *cfg)
{
struct nlattr *attr;
int err, remaining;
struct rtmsg *rtm;
err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy);
if (err < 0)
goto errout;
memset(cfg, 0, sizeof(*cfg));
rtm = nlmsg_data(nlh);
cfg->fc_dst_len = rtm->rtm_dst_len;
cfg->fc_tos = rtm->rtm_tos;
cfg->fc_table = rtm->rtm_table;
cfg->fc_protocol = rtm->rtm_protocol;
cfg->fc_scope = rtm->rtm_scope;
cfg->fc_type = rtm->rtm_type;
cfg->fc_flags = rtm->rtm_flags;
cfg->fc_nlflags = nlh->nlmsg_flags;
cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
cfg->fc_nlinfo.nlh = nlh;
cfg->fc_nlinfo.nl_net = net;
if (cfg->fc_type > RTN_MAX) {
err = -EINVAL;
goto errout;
}
nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), remaining) {
switch (nla_type(attr)) {
case RTA_DST:
cfg->fc_dst = nla_get_be32(attr);
break;
case RTA_OIF:
cfg->fc_oif = nla_get_u32(attr);
break;
case RTA_GATEWAY:
cfg->fc_gw = nla_get_be32(attr);
break;
case RTA_PRIORITY:
cfg->fc_priority = nla_get_u32(attr);
break;
case RTA_PREFSRC:
cfg->fc_prefsrc = nla_get_be32(attr);
break;
case RTA_METRICS:
cfg->fc_mx = nla_data(attr);
cfg->fc_mx_len = nla_len(attr);
break;
case RTA_MULTIPATH:
cfg->fc_mp = nla_data(attr);
cfg->fc_mp_len = nla_len(attr);
break;
case RTA_FLOW:
cfg->fc_flow = nla_get_u32(attr);
break;
case RTA_TABLE:
cfg->fc_table = nla_get_u32(attr);
break;
case RTA_ENCAP:
cfg->fc_encap = attr;
break;
case RTA_ENCAP_TYPE:
cfg->fc_encap_type = nla_get_u16(attr);
break;
}
}
return 0;
errout:
return err;
}
static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct fib_config cfg;
struct fib_table *tb;
int err;
err = rtm_to_fib_config(net, skb, nlh, &cfg);
if (err < 0)
goto errout;
tb = fib_get_table(net, cfg.fc_table);
if (!tb) {
err = -ESRCH;
goto errout;
}
err = fib_table_delete(tb, &cfg);
errout:
return err;
}
static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct fib_config cfg;
struct fib_table *tb;
int err;
err = rtm_to_fib_config(net, skb, nlh, &cfg);
if (err < 0)
goto errout;
tb = fib_new_table(net, cfg.fc_table);
if (!tb) {
err = -ENOBUFS;
goto errout;
}
err = fib_table_insert(tb, &cfg);
errout:
return err;
}
static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
unsigned int h, s_h;
unsigned int e = 0, s_e;
struct fib_table *tb;
struct hlist_head *head;
int dumped = 0;
if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
return skb->len;
s_h = cb->args[0];
s_e = cb->args[1];
rcu_read_lock();
for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
e = 0;
head = &net->ipv4.fib_table_hash[h];
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
if (e < s_e)
goto next;
if (dumped)
memset(&cb->args[2], 0, sizeof(cb->args) -
2 * sizeof(cb->args[0]));
if (fib_table_dump(tb, skb, cb) < 0)
goto out;
dumped = 1;
next:
e++;
}
}
out:
rcu_read_unlock();
cb->args[1] = e;
cb->args[0] = h;
return skb->len;
}
/* Prepare and feed intra-kernel routing request.
* Really, it should be netlink message, but :-( netlink
* can be not configured, so that we feed it directly
* to fib engine. It is legal, because all events occur
* only when netlink is already locked.
*/
static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
{
struct net *net = dev_net(ifa->ifa_dev->dev);
u32 tb_id = l3mdev_fib_table(ifa->ifa_dev->dev);
struct fib_table *tb;
struct fib_config cfg = {
.fc_protocol = RTPROT_KERNEL,
.fc_type = type,
.fc_dst = dst,
.fc_dst_len = dst_len,
.fc_prefsrc = ifa->ifa_local,
.fc_oif = ifa->ifa_dev->dev->ifindex,
.fc_nlflags = NLM_F_CREATE | NLM_F_APPEND,
.fc_nlinfo = {
.nl_net = net,
},
};
if (!tb_id)
tb_id = (type == RTN_UNICAST) ? RT_TABLE_MAIN : RT_TABLE_LOCAL;
tb = fib_new_table(net, tb_id);
if (!tb)
return;
cfg.fc_table = tb->tb_id;
if (type != RTN_LOCAL)
cfg.fc_scope = RT_SCOPE_LINK;
else
cfg.fc_scope = RT_SCOPE_HOST;
if (cmd == RTM_NEWROUTE)
fib_table_insert(tb, &cfg);
else
fib_table_delete(tb, &cfg);
}
void fib_add_ifaddr(struct in_ifaddr *ifa)
{
struct in_device *in_dev = ifa->ifa_dev;
struct net_device *dev = in_dev->dev;
struct in_ifaddr *prim = ifa;
__be32 mask = ifa->ifa_mask;
__be32 addr = ifa->ifa_local;
__be32 prefix = ifa->ifa_address & mask;
if (ifa->ifa_flags & IFA_F_SECONDARY) {
prim = inet_ifa_byprefix(in_dev, prefix, mask);
if (!prim) {
pr_warn("%s: bug: prim == NULL\n", __func__);
return;
}
}
fib_magic(RTM_NEWROUTE, RTN_LOCAL, addr, 32, prim);
if (!(dev->flags & IFF_UP))
return;
/* Add broadcast address, if it is explicitly assigned. */
if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF))
fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
(prefix != addr || ifa->ifa_prefixlen < 32)) {
if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
fib_magic(RTM_NEWROUTE,
dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
prefix, ifa->ifa_prefixlen, prim);
/* Add network specific broadcasts, when it takes a sense */
if (ifa->ifa_prefixlen < 31) {
fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix, 32, prim);
fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix | ~mask,
32, prim);
}
}
}
/* Delete primary or secondary address.
* Optionally, on secondary address promotion consider the addresses
* from subnet iprim as deleted, even if they are in device list.
* In this case the secondary ifa can be in device list.
*/
void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
{
struct in_device *in_dev = ifa->ifa_dev;
struct net_device *dev = in_dev->dev;
struct in_ifaddr *ifa1;
struct in_ifaddr *prim = ifa, *prim1 = NULL;
__be32 brd = ifa->ifa_address | ~ifa->ifa_mask;
__be32 any = ifa->ifa_address & ifa->ifa_mask;
#define LOCAL_OK 1
#define BRD_OK 2
#define BRD0_OK 4
#define BRD1_OK 8
unsigned int ok = 0;
int subnet = 0; /* Primary network */
int gone = 1; /* Address is missing */
int same_prefsrc = 0; /* Another primary with same IP */
if (ifa->ifa_flags & IFA_F_SECONDARY) {
prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
if (!prim) {
pr_warn("%s: bug: prim == NULL\n", __func__);
return;
}
if (iprim && iprim != prim) {
pr_warn("%s: bug: iprim != prim\n", __func__);
return;
}
} else if (!ipv4_is_zeronet(any) &&
(any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) {
if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
fib_magic(RTM_DELROUTE,
dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
any, ifa->ifa_prefixlen, prim);
subnet = 1;
}
/* Deletion is more complicated than add.
* We should take care of not to delete too much :-)
*
* Scan address list to be sure that addresses are really gone.
*/
for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
if (ifa1 == ifa) {
/* promotion, keep the IP */
gone = 0;
continue;
}
/* Ignore IFAs from our subnet */
if (iprim && ifa1->ifa_mask == iprim->ifa_mask &&
inet_ifa_match(ifa1->ifa_address, iprim))
continue;
/* Ignore ifa1 if it uses different primary IP (prefsrc) */
if (ifa1->ifa_flags & IFA_F_SECONDARY) {
/* Another address from our subnet? */
if (ifa1->ifa_mask == prim->ifa_mask &&
inet_ifa_match(ifa1->ifa_address, prim))
prim1 = prim;
else {
/* We reached the secondaries, so
* same_prefsrc should be determined.
*/
if (!same_prefsrc)
continue;
/* Search new prim1 if ifa1 is not
* using the current prim1
*/
if (!prim1 ||
ifa1->ifa_mask != prim1->ifa_mask ||
!inet_ifa_match(ifa1->ifa_address, prim1))
prim1 = inet_ifa_byprefix(in_dev,
ifa1->ifa_address,
ifa1->ifa_mask);
if (!prim1)
continue;
if (prim1->ifa_local != prim->ifa_local)
continue;
}
} else {
if (prim->ifa_local != ifa1->ifa_local)
continue;
prim1 = ifa1;
if (prim != prim1)
same_prefsrc = 1;
}
if (ifa->ifa_local == ifa1->ifa_local)
ok |= LOCAL_OK;
if (ifa->ifa_broadcast == ifa1->ifa_broadcast)
ok |= BRD_OK;
if (brd == ifa1->ifa_broadcast)
ok |= BRD1_OK;
if (any == ifa1->ifa_broadcast)
ok |= BRD0_OK;
/* primary has network specific broadcasts */
if (prim1 == ifa1 && ifa1->ifa_prefixlen < 31) {
__be32 brd1 = ifa1->ifa_address | ~ifa1->ifa_mask;
__be32 any1 = ifa1->ifa_address & ifa1->ifa_mask;
if (!ipv4_is_zeronet(any1)) {
if (ifa->ifa_broadcast == brd1 ||
ifa->ifa_broadcast == any1)
ok |= BRD_OK;
if (brd == brd1 || brd == any1)
ok |= BRD1_OK;
if (any == brd1 || any == any1)
ok |= BRD0_OK;
}
}
}
if (!(ok & BRD_OK))
fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
if (subnet && ifa->ifa_prefixlen < 31) {
if (!(ok & BRD1_OK))
fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim);
if (!(ok & BRD0_OK))
fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
}
if (!(ok & LOCAL_OK)) {
unsigned int addr_type;
fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
/* Check, that this local address finally disappeared. */
addr_type = inet_addr_type_dev_table(dev_net(dev), dev,
ifa->ifa_local);
if (gone && addr_type != RTN_LOCAL) {
/* And the last, but not the least thing.
* We must flush stray FIB entries.
*
* First of all, we scan fib_info list searching
* for stray nexthop entries, then ignite fib_flush.
*/
if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local))
fib_flush(dev_net(dev));
}
}
#undef LOCAL_OK
#undef BRD_OK
#undef BRD0_OK
#undef BRD1_OK
}
static void nl_fib_lookup(struct net *net, struct fib_result_nl *frn)
{
struct fib_result res;
struct flowi4 fl4 = {
.flowi4_mark = frn->fl_mark,
.daddr = frn->fl_addr,
.flowi4_tos = frn->fl_tos,
.flowi4_scope = frn->fl_scope,
};
struct fib_table *tb;
rcu_read_lock();
tb = fib_get_table(net, frn->tb_id_in);
frn->err = -ENOENT;
if (tb) {
local_bh_disable();
frn->tb_id = tb->tb_id;
frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
if (!frn->err) {
frn->prefixlen = res.prefixlen;
frn->nh_sel = res.nh_sel;
frn->type = res.type;
frn->scope = res.scope;
}
local_bh_enable();
}
rcu_read_unlock();
}
static void nl_fib_input(struct sk_buff *skb)
{
struct net *net;
struct fib_result_nl *frn;
struct nlmsghdr *nlh;
u32 portid;
net = sock_net(skb->sk);
nlh = nlmsg_hdr(skb);
if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
nlmsg_len(nlh) < sizeof(*frn))
return;
skb = netlink_skb_clone(skb, GFP_KERNEL);
if (!skb)
return;
nlh = nlmsg_hdr(skb);
frn = (struct fib_result_nl *) nlmsg_data(nlh);
nl_fib_lookup(net, frn);
portid = NETLINK_CB(skb).portid; /* netlink portid */
NETLINK_CB(skb).portid = 0; /* from kernel */
NETLINK_CB(skb).dst_group = 0; /* unicast */
netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT);
}
static int __net_init nl_fib_lookup_init(struct net *net)
{
struct sock *sk;
struct netlink_kernel_cfg cfg = {
.input = nl_fib_input,
};
sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg);
if (!sk)
return -EAFNOSUPPORT;
net->ipv4.fibnl = sk;
return 0;
}
static void nl_fib_lookup_exit(struct net *net)
{
netlink_kernel_release(net->ipv4.fibnl);
net->ipv4.fibnl = NULL;
}
static void fib_disable_ip(struct net_device *dev, unsigned long event,
bool force)
{
if (fib_sync_down_dev(dev, event, force))
fib_flush(dev_net(dev));
rt_cache_flush(dev_net(dev));
arp_ifdown(dev);
}
static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct net_device *dev = ifa->ifa_dev->dev;
struct net *net = dev_net(dev);
switch (event) {
case NETDEV_UP:
fib_add_ifaddr(ifa);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
fib_sync_up(dev, RTNH_F_DEAD);
#endif
atomic_inc(&net->ipv4.dev_addr_genid);
rt_cache_flush(dev_net(dev));
break;
case NETDEV_DOWN:
fib_del_ifaddr(ifa, NULL);
atomic_inc(&net->ipv4.dev_addr_genid);
if (!ifa->ifa_dev->ifa_list) {
/* Last address was deleted from this interface.
* Disable IP.
*/
fib_disable_ip(dev, event, true);
} else {
rt_cache_flush(dev_net(dev));
}
break;
}
return NOTIFY_DONE;
}
static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info;
struct in_device *in_dev;
struct net *net = dev_net(dev);
unsigned int flags;
if (event == NETDEV_UNREGISTER) {
fib_disable_ip(dev, event, true);
rt_flush_dev(dev);
return NOTIFY_DONE;
}
in_dev = __in_dev_get_rtnl(dev);
if (!in_dev)
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
for_ifa(in_dev) {
fib_add_ifaddr(ifa);
} endfor_ifa(in_dev);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
fib_sync_up(dev, RTNH_F_DEAD);
#endif
atomic_inc(&net->ipv4.dev_addr_genid);
rt_cache_flush(net);
break;
case NETDEV_DOWN:
fib_disable_ip(dev, event, false);
break;
case NETDEV_CHANGE:
flags = dev_get_flags(dev);
if (flags & (IFF_RUNNING | IFF_LOWER_UP))
fib_sync_up(dev, RTNH_F_LINKDOWN);
else
fib_sync_down_dev(dev, event, false);
/* fall through */
case NETDEV_CHANGEMTU:
rt_cache_flush(net);
break;
case NETDEV_CHANGEUPPER:
info = ptr;
/* flush all routes if dev is linked to or unlinked from
* an L3 master device (e.g., VRF)
*/
if (info->upper_dev && netif_is_l3_master(info->upper_dev))
fib_disable_ip(dev, NETDEV_DOWN, true);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block fib_inetaddr_notifier = {
.notifier_call = fib_inetaddr_event,
};
static struct notifier_block fib_netdev_notifier = {
.notifier_call = fib_netdev_event,
};
static int __net_init ip_fib_net_init(struct net *net)
{
int err;
size_t size = sizeof(struct hlist_head) * FIB_TABLE_HASHSZ;
/* Avoid false sharing : Use at least a full cache line */
size = max_t(size_t, size, L1_CACHE_BYTES);
net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL);
if (!net->ipv4.fib_table_hash)
return -ENOMEM;
err = fib4_rules_init(net);
if (err < 0)
goto fail;
return 0;
fail:
kfree(net->ipv4.fib_table_hash);
return err;
}
static void ip_fib_net_exit(struct net *net)
{
unsigned int i;
rtnl_lock();
#ifdef CONFIG_IP_MULTIPLE_TABLES
RCU_INIT_POINTER(net->ipv4.fib_local, NULL);
RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
#endif
for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[i];
struct hlist_node *tmp;
struct fib_table *tb;
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
hlist_del(&tb->tb_hlist);
fib_table_flush(tb);
fib_free_table(tb);
}
}
#ifdef CONFIG_IP_MULTIPLE_TABLES
fib4_rules_exit(net);
#endif
rtnl_unlock();
kfree(net->ipv4.fib_table_hash);
}
static int __net_init fib_net_init(struct net *net)
{
int error;
#ifdef CONFIG_IP_ROUTE_CLASSID
net->ipv4.fib_num_tclassid_users = 0;
#endif
error = ip_fib_net_init(net);
if (error < 0)
goto out;
error = nl_fib_lookup_init(net);
if (error < 0)
goto out_nlfl;
error = fib_proc_init(net);
if (error < 0)
goto out_proc;
out:
return error;
out_proc:
nl_fib_lookup_exit(net);
out_nlfl:
ip_fib_net_exit(net);
goto out;
}
static void __net_exit fib_net_exit(struct net *net)
{
fib_proc_exit(net);
nl_fib_lookup_exit(net);
ip_fib_net_exit(net);
}
static struct pernet_operations fib_net_ops = {
.init = fib_net_init,
.exit = fib_net_exit,
};
void __init ip_fib_init(void)
{
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
register_pernet_subsys(&fib_net_ops);
register_netdevice_notifier(&fib_netdev_notifier);
register_inetaddr_notifier(&fib_inetaddr_notifier);
fib_trie_init();
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_5006_1 |
crossvul-cpp_data_bad_5587_0 | /*
* Resizable virtual memory filesystem for Linux.
*
* Copyright (C) 2000 Linus Torvalds.
* 2000 Transmeta Corp.
* 2000-2001 Christoph Rohland
* 2000-2001 SAP AG
* 2002 Red Hat Inc.
* Copyright (C) 2002-2011 Hugh Dickins.
* Copyright (C) 2011 Google Inc.
* Copyright (C) 2002-2005 VERITAS Software Corporation.
* Copyright (C) 2004 Andi Kleen, SuSE Labs
*
* Extended attribute support for tmpfs:
* Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
* Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
*
* tiny-shmem:
* Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
*
* This file is released under the GPL.
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/vfs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/swap.h>
static struct vfsmount *shm_mnt;
#ifdef CONFIG_SHMEM
/*
* This virtual memory filesystem is heavily based on the ramfs. It
* extends ramfs by the ability to use swap and honor resource limits
* which makes it a completely usable filesystem.
*/
#include <linux/xattr.h>
#include <linux/exportfs.h>
#include <linux/posix_acl.h>
#include <linux/generic_acl.h>
#include <linux/mman.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/shmem_fs.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/pagevec.h>
#include <linux/percpu_counter.h>
#include <linux/falloc.h>
#include <linux/splice.h>
#include <linux/security.h>
#include <linux/swapops.h>
#include <linux/mempolicy.h>
#include <linux/namei.h>
#include <linux/ctype.h>
#include <linux/migrate.h>
#include <linux/highmem.h>
#include <linux/seq_file.h>
#include <linux/magic.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
/* Pretend that each entry is of this size in directory's i_size */
#define BOGO_DIRENT_SIZE 20
/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
#define SHORT_SYMLINK_LEN 128
/*
* shmem_fallocate and shmem_writepage communicate via inode->i_private
* (with i_mutex making sure that it has only one user at a time):
* we would prefer not to enlarge the shmem inode just for that.
*/
struct shmem_falloc {
pgoff_t start; /* start of range currently being fallocated */
pgoff_t next; /* the next page offset to be fallocated */
pgoff_t nr_falloced; /* how many new pages have been fallocated */
pgoff_t nr_unswapped; /* how often writepage refused to swap out */
};
/* Flag allocation requirements to shmem_getpage */
enum sgp_type {
SGP_READ, /* don't exceed i_size, don't allocate page */
SGP_CACHE, /* don't exceed i_size, may allocate page */
SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
#ifdef CONFIG_TMPFS
static unsigned long shmem_default_max_blocks(void)
{
return totalram_pages / 2;
}
static unsigned long shmem_default_max_inodes(void)
{
return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
}
#endif
static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
static int shmem_replace_page(struct page **pagep, gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index);
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
static inline int shmem_getpage(struct inode *inode, pgoff_t index,
struct page **pagep, enum sgp_type sgp, int *fault_type)
{
return shmem_getpage_gfp(inode, index, pagep, sgp,
mapping_gfp_mask(inode->i_mapping), fault_type);
}
static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
{
return sb->s_fs_info;
}
/*
* shmem_file_setup pre-accounts the whole fixed size of a VM object,
* for shared memory and for shared anonymous (/dev/zero) mappings
* (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
* consistent with the pre-accounting of private mappings ...
*/
static inline int shmem_acct_size(unsigned long flags, loff_t size)
{
return (flags & VM_NORESERVE) ?
0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
}
static inline void shmem_unacct_size(unsigned long flags, loff_t size)
{
if (!(flags & VM_NORESERVE))
vm_unacct_memory(VM_ACCT(size));
}
/*
* ... whereas tmpfs objects are accounted incrementally as
* pages are allocated, in order to allow huge sparse files.
* shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
* so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
*/
static inline int shmem_acct_block(unsigned long flags)
{
return (flags & VM_NORESERVE) ?
security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0;
}
static inline void shmem_unacct_blocks(unsigned long flags, long pages)
{
if (flags & VM_NORESERVE)
vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
}
static const struct super_operations shmem_ops;
static const struct address_space_operations shmem_aops;
static const struct file_operations shmem_file_operations;
static const struct inode_operations shmem_inode_operations;
static const struct inode_operations shmem_dir_inode_operations;
static const struct inode_operations shmem_special_inode_operations;
static const struct vm_operations_struct shmem_vm_ops;
static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
};
static LIST_HEAD(shmem_swaplist);
static DEFINE_MUTEX(shmem_swaplist_mutex);
static int shmem_reserve_inode(struct super_block *sb)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
if (sbinfo->max_inodes) {
spin_lock(&sbinfo->stat_lock);
if (!sbinfo->free_inodes) {
spin_unlock(&sbinfo->stat_lock);
return -ENOSPC;
}
sbinfo->free_inodes--;
spin_unlock(&sbinfo->stat_lock);
}
return 0;
}
static void shmem_free_inode(struct super_block *sb)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
if (sbinfo->max_inodes) {
spin_lock(&sbinfo->stat_lock);
sbinfo->free_inodes++;
spin_unlock(&sbinfo->stat_lock);
}
}
/**
* shmem_recalc_inode - recalculate the block usage of an inode
* @inode: inode to recalc
*
* We have to calculate the free blocks since the mm can drop
* undirtied hole pages behind our back.
*
* But normally info->alloced == inode->i_mapping->nrpages + info->swapped
* So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
*
* It has to be called with the spinlock held.
*/
static void shmem_recalc_inode(struct inode *inode)
{
struct shmem_inode_info *info = SHMEM_I(inode);
long freed;
freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
if (freed > 0) {
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks)
percpu_counter_add(&sbinfo->used_blocks, -freed);
info->alloced -= freed;
inode->i_blocks -= freed * BLOCKS_PER_PAGE;
shmem_unacct_blocks(info->flags, freed);
}
}
/*
* Replace item expected in radix tree by a new item, while holding tree lock.
*/
static int shmem_radix_tree_replace(struct address_space *mapping,
pgoff_t index, void *expected, void *replacement)
{
void **pslot;
void *item = NULL;
VM_BUG_ON(!expected);
pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
if (pslot)
item = radix_tree_deref_slot_protected(pslot,
&mapping->tree_lock);
if (item != expected)
return -ENOENT;
if (replacement)
radix_tree_replace_slot(pslot, replacement);
else
radix_tree_delete(&mapping->page_tree, index);
return 0;
}
/*
* Sometimes, before we decide whether to proceed or to fail, we must check
* that an entry was not already brought back from swap by a racing thread.
*
* Checking page is not enough: by the time a SwapCache page is locked, it
* might be reused, and again be SwapCache, using the same swap as before.
*/
static bool shmem_confirm_swap(struct address_space *mapping,
pgoff_t index, swp_entry_t swap)
{
void *item;
rcu_read_lock();
item = radix_tree_lookup(&mapping->page_tree, index);
rcu_read_unlock();
return item == swp_to_radix_entry(swap);
}
/*
* Like add_to_page_cache_locked, but error if expected item has gone.
*/
static int shmem_add_to_page_cache(struct page *page,
struct address_space *mapping,
pgoff_t index, gfp_t gfp, void *expected)
{
int error;
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(!PageSwapBacked(page));
page_cache_get(page);
page->mapping = mapping;
page->index = index;
spin_lock_irq(&mapping->tree_lock);
if (!expected)
error = radix_tree_insert(&mapping->page_tree, index, page);
else
error = shmem_radix_tree_replace(mapping, index, expected,
page);
if (!error) {
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
__inc_zone_page_state(page, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
} else {
page->mapping = NULL;
spin_unlock_irq(&mapping->tree_lock);
page_cache_release(page);
}
return error;
}
/*
* Like delete_from_page_cache, but substitutes swap for page.
*/
static void shmem_delete_from_page_cache(struct page *page, void *radswap)
{
struct address_space *mapping = page->mapping;
int error;
spin_lock_irq(&mapping->tree_lock);
error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
page->mapping = NULL;
mapping->nrpages--;
__dec_zone_page_state(page, NR_FILE_PAGES);
__dec_zone_page_state(page, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
page_cache_release(page);
BUG_ON(error);
}
/*
* Like find_get_pages, but collecting swap entries as well as pages.
*/
static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
pgoff_t start, unsigned int nr_pages,
struct page **pages, pgoff_t *indices)
{
void **slot;
unsigned int ret = 0;
struct radix_tree_iter iter;
if (!nr_pages)
return 0;
rcu_read_lock();
restart:
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
struct page *page;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page))
goto restart;
/*
* Otherwise, we must be storing a swap entry
* here as an exceptional entry: so return it
* without attempting to raise page count.
*/
goto export;
}
if (!page_cache_get_speculative(page))
goto repeat;
/* Has the page moved? */
if (unlikely(page != *slot)) {
page_cache_release(page);
goto repeat;
}
export:
indices[ret] = iter.index;
pages[ret] = page;
if (++ret == nr_pages)
break;
}
rcu_read_unlock();
return ret;
}
/*
* Remove swap entry from radix tree, free the swap and its page cache.
*/
static int shmem_free_swap(struct address_space *mapping,
pgoff_t index, void *radswap)
{
int error;
spin_lock_irq(&mapping->tree_lock);
error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
spin_unlock_irq(&mapping->tree_lock);
if (!error)
free_swap_and_cache(radix_to_swp_entry(radswap));
return error;
}
/*
* Pagevec may contain swap entries, so shuffle up pages before releasing.
*/
static void shmem_deswap_pagevec(struct pagevec *pvec)
{
int i, j;
for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
if (!radix_tree_exceptional_entry(page))
pvec->pages[j++] = page;
}
pvec->nr = j;
}
/*
* SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
*/
void shmem_unlock_mapping(struct address_space *mapping)
{
struct pagevec pvec;
pgoff_t indices[PAGEVEC_SIZE];
pgoff_t index = 0;
pagevec_init(&pvec, 0);
/*
* Minor point, but we might as well stop if someone else SHM_LOCKs it.
*/
while (!mapping_unevictable(mapping)) {
/*
* Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
* has finished, if it hits a row of PAGEVEC_SIZE swap entries.
*/
pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
PAGEVEC_SIZE, pvec.pages, indices);
if (!pvec.nr)
break;
index = indices[pvec.nr - 1] + 1;
shmem_deswap_pagevec(&pvec);
check_move_unevictable_pages(pvec.pages, pvec.nr);
pagevec_release(&pvec);
cond_resched();
}
}
/*
* Remove range of pages and swap entries from radix tree, and free them.
* If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
*/
static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
bool unfalloc)
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
struct pagevec pvec;
pgoff_t indices[PAGEVEC_SIZE];
long nr_swaps_freed = 0;
pgoff_t index;
int i;
if (lend == -1)
end = -1; /* unsigned, so actually very big */
pagevec_init(&pvec, 0);
index = start;
while (index < end) {
pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
pvec.pages, indices);
if (!pvec.nr)
break;
mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
index = indices[i];
if (index >= end)
break;
if (radix_tree_exceptional_entry(page)) {
if (unfalloc)
continue;
nr_swaps_freed += !shmem_free_swap(mapping,
index, page);
continue;
}
if (!trylock_page(page))
continue;
if (!unfalloc || !PageUptodate(page)) {
if (page->mapping == mapping) {
VM_BUG_ON(PageWriteback(page));
truncate_inode_page(mapping, page);
}
}
unlock_page(page);
}
shmem_deswap_pagevec(&pvec);
pagevec_release(&pvec);
mem_cgroup_uncharge_end();
cond_resched();
index++;
}
if (partial_start) {
struct page *page = NULL;
shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
if (page) {
unsigned int top = PAGE_CACHE_SIZE;
if (start > end) {
top = partial_end;
partial_end = 0;
}
zero_user_segment(page, partial_start, top);
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
}
}
if (partial_end) {
struct page *page = NULL;
shmem_getpage(inode, end, &page, SGP_READ, NULL);
if (page) {
zero_user_segment(page, 0, partial_end);
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
}
}
if (start >= end)
return;
index = start;
for ( ; ; ) {
cond_resched();
pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
pvec.pages, indices);
if (!pvec.nr) {
if (index == start || unfalloc)
break;
index = start;
continue;
}
if ((index == start || unfalloc) && indices[0] >= end) {
shmem_deswap_pagevec(&pvec);
pagevec_release(&pvec);
break;
}
mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
index = indices[i];
if (index >= end)
break;
if (radix_tree_exceptional_entry(page)) {
if (unfalloc)
continue;
nr_swaps_freed += !shmem_free_swap(mapping,
index, page);
continue;
}
lock_page(page);
if (!unfalloc || !PageUptodate(page)) {
if (page->mapping == mapping) {
VM_BUG_ON(PageWriteback(page));
truncate_inode_page(mapping, page);
}
}
unlock_page(page);
}
shmem_deswap_pagevec(&pvec);
pagevec_release(&pvec);
mem_cgroup_uncharge_end();
index++;
}
spin_lock(&info->lock);
info->swapped -= nr_swaps_freed;
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
}
void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
{
shmem_undo_range(inode, lstart, lend, false);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
}
EXPORT_SYMBOL_GPL(shmem_truncate_range);
static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
int error;
error = inode_change_ok(inode, attr);
if (error)
return error;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
loff_t oldsize = inode->i_size;
loff_t newsize = attr->ia_size;
if (newsize != oldsize) {
i_size_write(inode, newsize);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
}
if (newsize < oldsize) {
loff_t holebegin = round_up(newsize, PAGE_SIZE);
unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
shmem_truncate_range(inode, newsize, (loff_t)-1);
/* unmap again to remove racily COWed private pages */
unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
}
}
setattr_copy(inode, attr);
#ifdef CONFIG_TMPFS_POSIX_ACL
if (attr->ia_valid & ATTR_MODE)
error = generic_acl_chmod(inode);
#endif
return error;
}
static void shmem_evict_inode(struct inode *inode)
{
struct shmem_inode_info *info = SHMEM_I(inode);
if (inode->i_mapping->a_ops == &shmem_aops) {
shmem_unacct_size(info->flags, inode->i_size);
inode->i_size = 0;
shmem_truncate_range(inode, 0, (loff_t)-1);
if (!list_empty(&info->swaplist)) {
mutex_lock(&shmem_swaplist_mutex);
list_del_init(&info->swaplist);
mutex_unlock(&shmem_swaplist_mutex);
}
} else
kfree(info->symlink);
simple_xattrs_free(&info->xattrs);
WARN_ON(inode->i_blocks);
shmem_free_inode(inode->i_sb);
clear_inode(inode);
}
/*
* If swap found in inode, free it and move page from swapcache to filecache.
*/
static int shmem_unuse_inode(struct shmem_inode_info *info,
swp_entry_t swap, struct page **pagep)
{
struct address_space *mapping = info->vfs_inode.i_mapping;
void *radswap;
pgoff_t index;
gfp_t gfp;
int error = 0;
radswap = swp_to_radix_entry(swap);
index = radix_tree_locate_item(&mapping->page_tree, radswap);
if (index == -1)
return 0;
/*
* Move _head_ to start search for next from here.
* But be careful: shmem_evict_inode checks list_empty without taking
* mutex, and there's an instant in list_move_tail when info->swaplist
* would appear empty, if it were the only one on shmem_swaplist.
*/
if (shmem_swaplist.next != &info->swaplist)
list_move_tail(&shmem_swaplist, &info->swaplist);
gfp = mapping_gfp_mask(mapping);
if (shmem_should_replace_page(*pagep, gfp)) {
mutex_unlock(&shmem_swaplist_mutex);
error = shmem_replace_page(pagep, gfp, info, index);
mutex_lock(&shmem_swaplist_mutex);
/*
* We needed to drop mutex to make that restrictive page
* allocation, but the inode might have been freed while we
* dropped it: although a racing shmem_evict_inode() cannot
* complete without emptying the radix_tree, our page lock
* on this swapcache page is not enough to prevent that -
* free_swap_and_cache() of our swap entry will only
* trylock_page(), removing swap from radix_tree whatever.
*
* We must not proceed to shmem_add_to_page_cache() if the
* inode has been freed, but of course we cannot rely on
* inode or mapping or info to check that. However, we can
* safely check if our swap entry is still in use (and here
* it can't have got reused for another page): if it's still
* in use, then the inode cannot have been freed yet, and we
* can safely proceed (if it's no longer in use, that tells
* nothing about the inode, but we don't need to unuse swap).
*/
if (!page_swapcount(*pagep))
error = -ENOENT;
}
/*
* We rely on shmem_swaplist_mutex, not only to protect the swaplist,
* but also to hold up shmem_evict_inode(): so inode cannot be freed
* beneath us (pagelock doesn't help until the page is in pagecache).
*/
if (!error)
error = shmem_add_to_page_cache(*pagep, mapping, index,
GFP_NOWAIT, radswap);
if (error != -ENOMEM) {
/*
* Truncation and eviction use free_swap_and_cache(), which
* only does trylock page: if we raced, best clean up here.
*/
delete_from_swap_cache(*pagep);
set_page_dirty(*pagep);
if (!error) {
spin_lock(&info->lock);
info->swapped--;
spin_unlock(&info->lock);
swap_free(swap);
}
error = 1; /* not an error, but entry was found */
}
return error;
}
/*
* Search through swapped inodes to find and replace swap by page.
*/
int shmem_unuse(swp_entry_t swap, struct page *page)
{
struct list_head *this, *next;
struct shmem_inode_info *info;
int found = 0;
int error = 0;
/*
* There's a faint possibility that swap page was replaced before
* caller locked it: caller will come back later with the right page.
*/
if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
goto out;
/*
* Charge page using GFP_KERNEL while we can wait, before taking
* the shmem_swaplist_mutex which might hold up shmem_writepage().
* Charged back to the user (not to caller) when swap account is used.
*/
error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
if (error)
goto out;
/* No radix_tree_preload: swap entry keeps a place for page in tree */
mutex_lock(&shmem_swaplist_mutex);
list_for_each_safe(this, next, &shmem_swaplist) {
info = list_entry(this, struct shmem_inode_info, swaplist);
if (info->swapped)
found = shmem_unuse_inode(info, swap, &page);
else
list_del_init(&info->swaplist);
cond_resched();
if (found)
break;
}
mutex_unlock(&shmem_swaplist_mutex);
if (found < 0)
error = found;
out:
unlock_page(page);
page_cache_release(page);
return error;
}
/*
* Move the page from the page cache to the swap cache.
*/
static int shmem_writepage(struct page *page, struct writeback_control *wbc)
{
struct shmem_inode_info *info;
struct address_space *mapping;
struct inode *inode;
swp_entry_t swap;
pgoff_t index;
BUG_ON(!PageLocked(page));
mapping = page->mapping;
index = page->index;
inode = mapping->host;
info = SHMEM_I(inode);
if (info->flags & VM_LOCKED)
goto redirty;
if (!total_swap_pages)
goto redirty;
/*
* shmem_backing_dev_info's capabilities prevent regular writeback or
* sync from ever calling shmem_writepage; but a stacking filesystem
* might use ->writepage of its underlying filesystem, in which case
* tmpfs should write out to swap only in response to memory pressure,
* and not for the writeback threads or sync.
*/
if (!wbc->for_reclaim) {
WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
goto redirty;
}
/*
* This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
* value into swapfile.c, the only way we can correctly account for a
* fallocated page arriving here is now to initialize it and write it.
*
* That's okay for a page already fallocated earlier, but if we have
* not yet completed the fallocation, then (a) we want to keep track
* of this page in case we have to undo it, and (b) it may not be a
* good idea to continue anyway, once we're pushing into swap. So
* reactivate the page, and let shmem_fallocate() quit when too many.
*/
if (!PageUptodate(page)) {
if (inode->i_private) {
struct shmem_falloc *shmem_falloc;
spin_lock(&inode->i_lock);
shmem_falloc = inode->i_private;
if (shmem_falloc &&
index >= shmem_falloc->start &&
index < shmem_falloc->next)
shmem_falloc->nr_unswapped++;
else
shmem_falloc = NULL;
spin_unlock(&inode->i_lock);
if (shmem_falloc)
goto redirty;
}
clear_highpage(page);
flush_dcache_page(page);
SetPageUptodate(page);
}
swap = get_swap_page();
if (!swap.val)
goto redirty;
/*
* Add inode to shmem_unuse()'s list of swapped-out inodes,
* if it's not already there. Do it now before the page is
* moved to swap cache, when its pagelock no longer protects
* the inode from eviction. But don't unlock the mutex until
* we've incremented swapped, because shmem_unuse_inode() will
* prune a !swapped inode from the swaplist under this mutex.
*/
mutex_lock(&shmem_swaplist_mutex);
if (list_empty(&info->swaplist))
list_add_tail(&info->swaplist, &shmem_swaplist);
if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
swap_shmem_alloc(swap);
shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
spin_lock(&info->lock);
info->swapped++;
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
mutex_unlock(&shmem_swaplist_mutex);
BUG_ON(page_mapped(page));
swap_writepage(page, wbc);
return 0;
}
mutex_unlock(&shmem_swaplist_mutex);
swapcache_free(swap, NULL);
redirty:
set_page_dirty(page);
if (wbc->for_reclaim)
return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
unlock_page(page);
return 0;
}
#ifdef CONFIG_NUMA
#ifdef CONFIG_TMPFS
static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
{
char buffer[64];
if (!mpol || mpol->mode == MPOL_DEFAULT)
return; /* show nothing */
mpol_to_str(buffer, sizeof(buffer), mpol);
seq_printf(seq, ",mpol=%s", buffer);
}
static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
{
struct mempolicy *mpol = NULL;
if (sbinfo->mpol) {
spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
mpol = sbinfo->mpol;
mpol_get(mpol);
spin_unlock(&sbinfo->stat_lock);
}
return mpol;
}
#endif /* CONFIG_TMPFS */
static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
struct vm_area_struct pvma;
struct page *page;
/* Create a pseudo vma that just contains the policy */
pvma.vm_start = 0;
/* Bias interleave by inode number to distribute better across nodes */
pvma.vm_pgoff = index + info->vfs_inode.i_ino;
pvma.vm_ops = NULL;
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
page = swapin_readahead(swap, gfp, &pvma, 0);
/* Drop reference taken by mpol_shared_policy_lookup() */
mpol_cond_put(pvma.vm_policy);
return page;
}
static struct page *shmem_alloc_page(gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
struct vm_area_struct pvma;
struct page *page;
/* Create a pseudo vma that just contains the policy */
pvma.vm_start = 0;
/* Bias interleave by inode number to distribute better across nodes */
pvma.vm_pgoff = index + info->vfs_inode.i_ino;
pvma.vm_ops = NULL;
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
page = alloc_page_vma(gfp, &pvma, 0);
/* Drop reference taken by mpol_shared_policy_lookup() */
mpol_cond_put(pvma.vm_policy);
return page;
}
#else /* !CONFIG_NUMA */
#ifdef CONFIG_TMPFS
static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
{
}
#endif /* CONFIG_TMPFS */
static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
return swapin_readahead(swap, gfp, NULL, 0);
}
static inline struct page *shmem_alloc_page(gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
return alloc_page(gfp);
}
#endif /* CONFIG_NUMA */
#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
{
return NULL;
}
#endif
/*
* When a page is moved from swapcache to shmem filecache (either by the
* usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
* shmem_unuse_inode()), it may have been read in earlier from swap, in
* ignorance of the mapping it belongs to. If that mapping has special
* constraints (like the gma500 GEM driver, which requires RAM below 4GB),
* we may need to copy to a suitable page before moving to filecache.
*
* In a future release, this may well be extended to respect cpuset and
* NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
* but for now it is a simple matter of zone.
*/
static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
{
return page_zonenum(page) > gfp_zone(gfp);
}
static int shmem_replace_page(struct page **pagep, gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
struct page *oldpage, *newpage;
struct address_space *swap_mapping;
pgoff_t swap_index;
int error;
oldpage = *pagep;
swap_index = page_private(oldpage);
swap_mapping = page_mapping(oldpage);
/*
* We have arrived here because our zones are constrained, so don't
* limit chance of success by further cpuset and node constraints.
*/
gfp &= ~GFP_CONSTRAINT_MASK;
newpage = shmem_alloc_page(gfp, info, index);
if (!newpage)
return -ENOMEM;
page_cache_get(newpage);
copy_highpage(newpage, oldpage);
flush_dcache_page(newpage);
__set_page_locked(newpage);
SetPageUptodate(newpage);
SetPageSwapBacked(newpage);
set_page_private(newpage, swap_index);
SetPageSwapCache(newpage);
/*
* Our caller will very soon move newpage out of swapcache, but it's
* a nice clean interface for us to replace oldpage by newpage there.
*/
spin_lock_irq(&swap_mapping->tree_lock);
error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
newpage);
if (!error) {
__inc_zone_page_state(newpage, NR_FILE_PAGES);
__dec_zone_page_state(oldpage, NR_FILE_PAGES);
}
spin_unlock_irq(&swap_mapping->tree_lock);
if (unlikely(error)) {
/*
* Is this possible? I think not, now that our callers check
* both PageSwapCache and page_private after getting page lock;
* but be defensive. Reverse old to newpage for clear and free.
*/
oldpage = newpage;
} else {
mem_cgroup_replace_page_cache(oldpage, newpage);
lru_cache_add_anon(newpage);
*pagep = newpage;
}
ClearPageSwapCache(oldpage);
set_page_private(oldpage, 0);
unlock_page(oldpage);
page_cache_release(oldpage);
page_cache_release(oldpage);
return error;
}
/*
* shmem_getpage_gfp - find page in cache, or get from swap, or allocate
*
* If we allocate a new one we do not mark it dirty. That's up to the
* vm. If we swap it in we mark it dirty since we also free the swap
* entry since a page cannot live in both the swap and page cache
*/
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info;
struct shmem_sb_info *sbinfo;
struct page *page;
swp_entry_t swap;
int error;
int once = 0;
int alloced = 0;
if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
return -EFBIG;
repeat:
swap.val = 0;
page = find_lock_page(mapping, index);
if (radix_tree_exceptional_entry(page)) {
swap = radix_to_swp_entry(page);
page = NULL;
}
if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
error = -EINVAL;
goto failed;
}
/* fallocated page? */
if (page && !PageUptodate(page)) {
if (sgp != SGP_READ)
goto clear;
unlock_page(page);
page_cache_release(page);
page = NULL;
}
if (page || (sgp == SGP_READ && !swap.val)) {
*pagep = page;
return 0;
}
/*
* Fast cache lookup did not find it:
* bring it back from swap or allocate.
*/
info = SHMEM_I(inode);
sbinfo = SHMEM_SB(inode->i_sb);
if (swap.val) {
/* Look it up and read it in.. */
page = lookup_swap_cache(swap);
if (!page) {
/* here we actually do the io */
if (fault_type)
*fault_type |= VM_FAULT_MAJOR;
page = shmem_swapin(swap, gfp, info, index);
if (!page) {
error = -ENOMEM;
goto failed;
}
}
/* We have to do this with page locked to prevent races */
lock_page(page);
if (!PageSwapCache(page) || page_private(page) != swap.val ||
!shmem_confirm_swap(mapping, index, swap)) {
error = -EEXIST; /* try again */
goto unlock;
}
if (!PageUptodate(page)) {
error = -EIO;
goto failed;
}
wait_on_page_writeback(page);
if (shmem_should_replace_page(page, gfp)) {
error = shmem_replace_page(&page, gfp, info, index);
if (error)
goto failed;
}
error = mem_cgroup_cache_charge(page, current->mm,
gfp & GFP_RECLAIM_MASK);
if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
gfp, swp_to_radix_entry(swap));
/*
* We already confirmed swap under page lock, and make
* no memory allocation here, so usually no possibility
* of error; but free_swap_and_cache() only trylocks a
* page, so it is just possible that the entry has been
* truncated or holepunched since swap was confirmed.
* shmem_undo_range() will have done some of the
* unaccounting, now delete_from_swap_cache() will do
* the rest (including mem_cgroup_uncharge_swapcache).
* Reset swap.val? No, leave it so "failed" goes back to
* "repeat": reading a hole and writing should succeed.
*/
if (error)
delete_from_swap_cache(page);
}
if (error)
goto failed;
spin_lock(&info->lock);
info->swapped--;
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
delete_from_swap_cache(page);
set_page_dirty(page);
swap_free(swap);
} else {
if (shmem_acct_block(info->flags)) {
error = -ENOSPC;
goto failed;
}
if (sbinfo->max_blocks) {
if (percpu_counter_compare(&sbinfo->used_blocks,
sbinfo->max_blocks) >= 0) {
error = -ENOSPC;
goto unacct;
}
percpu_counter_inc(&sbinfo->used_blocks);
}
page = shmem_alloc_page(gfp, info, index);
if (!page) {
error = -ENOMEM;
goto decused;
}
SetPageSwapBacked(page);
__set_page_locked(page);
error = mem_cgroup_cache_charge(page, current->mm,
gfp & GFP_RECLAIM_MASK);
if (error)
goto decused;
error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
gfp, NULL);
radix_tree_preload_end();
}
if (error) {
mem_cgroup_uncharge_cache_page(page);
goto decused;
}
lru_cache_add_anon(page);
spin_lock(&info->lock);
info->alloced++;
inode->i_blocks += BLOCKS_PER_PAGE;
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
alloced = true;
/*
* Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
*/
if (sgp == SGP_FALLOC)
sgp = SGP_WRITE;
clear:
/*
* Let SGP_WRITE caller clear ends if write does not fill page;
* but SGP_FALLOC on a page fallocated earlier must initialize
* it now, lest undo on failure cancel our earlier guarantee.
*/
if (sgp != SGP_WRITE) {
clear_highpage(page);
flush_dcache_page(page);
SetPageUptodate(page);
}
if (sgp == SGP_DIRTY)
set_page_dirty(page);
}
/* Perhaps the file has been truncated since we checked */
if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
error = -EINVAL;
if (alloced)
goto trunc;
else
goto failed;
}
*pagep = page;
return 0;
/*
* Error recovery.
*/
trunc:
info = SHMEM_I(inode);
ClearPageDirty(page);
delete_from_page_cache(page);
spin_lock(&info->lock);
info->alloced--;
inode->i_blocks -= BLOCKS_PER_PAGE;
spin_unlock(&info->lock);
decused:
sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks)
percpu_counter_add(&sbinfo->used_blocks, -1);
unacct:
shmem_unacct_blocks(info->flags, 1);
failed:
if (swap.val && error != -EINVAL &&
!shmem_confirm_swap(mapping, index, swap))
error = -EEXIST;
unlock:
if (page) {
unlock_page(page);
page_cache_release(page);
}
if (error == -ENOSPC && !once++) {
info = SHMEM_I(inode);
spin_lock(&info->lock);
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
goto repeat;
}
if (error == -EEXIST) /* from above or from radix_tree_insert */
goto repeat;
return error;
}
static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
int error;
int ret = VM_FAULT_LOCKED;
error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
if (error)
return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
if (ret & VM_FAULT_MAJOR) {
count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
}
return ret;
}
#ifdef CONFIG_NUMA
static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
{
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
}
static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
unsigned long addr)
{
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
pgoff_t index;
index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
}
#endif
int shmem_lock(struct file *file, int lock, struct user_struct *user)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct shmem_inode_info *info = SHMEM_I(inode);
int retval = -ENOMEM;
spin_lock(&info->lock);
if (lock && !(info->flags & VM_LOCKED)) {
if (!user_shm_lock(inode->i_size, user))
goto out_nomem;
info->flags |= VM_LOCKED;
mapping_set_unevictable(file->f_mapping);
}
if (!lock && (info->flags & VM_LOCKED) && user) {
user_shm_unlock(inode->i_size, user);
info->flags &= ~VM_LOCKED;
mapping_clear_unevictable(file->f_mapping);
}
retval = 0;
out_nomem:
spin_unlock(&info->lock);
return retval;
}
static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
{
file_accessed(file);
vma->vm_ops = &shmem_vm_ops;
return 0;
}
static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
umode_t mode, dev_t dev, unsigned long flags)
{
struct inode *inode;
struct shmem_inode_info *info;
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
if (shmem_reserve_inode(sb))
return NULL;
inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
inode->i_blocks = 0;
inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_generation = get_seconds();
info = SHMEM_I(inode);
memset(info, 0, (char *)inode - (char *)info);
spin_lock_init(&info->lock);
info->flags = flags & VM_NORESERVE;
INIT_LIST_HEAD(&info->swaplist);
simple_xattrs_init(&info->xattrs);
cache_no_acl(inode);
switch (mode & S_IFMT) {
default:
inode->i_op = &shmem_special_inode_operations;
init_special_inode(inode, mode, dev);
break;
case S_IFREG:
inode->i_mapping->a_ops = &shmem_aops;
inode->i_op = &shmem_inode_operations;
inode->i_fop = &shmem_file_operations;
mpol_shared_policy_init(&info->policy,
shmem_get_sbmpol(sbinfo));
break;
case S_IFDIR:
inc_nlink(inode);
/* Some things misbehave if size == 0 on a directory */
inode->i_size = 2 * BOGO_DIRENT_SIZE;
inode->i_op = &shmem_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
break;
case S_IFLNK:
/*
* Must not load anything in the rbtree,
* mpol_free_shared_policy will not be called.
*/
mpol_shared_policy_init(&info->policy, NULL);
break;
}
} else
shmem_free_inode(sb);
return inode;
}
#ifdef CONFIG_TMPFS
static const struct inode_operations shmem_symlink_inode_operations;
static const struct inode_operations shmem_short_symlink_operations;
#ifdef CONFIG_TMPFS_XATTR
static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
#else
#define shmem_initxattrs NULL
#endif
static int
shmem_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
}
static int
shmem_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
if (pos + copied > inode->i_size)
i_size_write(inode, pos + copied);
if (!PageUptodate(page)) {
if (copied < PAGE_CACHE_SIZE) {
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
zero_user_segments(page, 0, from,
from + copied, PAGE_CACHE_SIZE);
}
SetPageUptodate(page);
}
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
return copied;
}
static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
{
struct inode *inode = filp->f_path.dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
pgoff_t index;
unsigned long offset;
enum sgp_type sgp = SGP_READ;
/*
* Might this read be for a stacking filesystem? Then when reading
* holes of a sparse file, we actually need to allocate those pages,
* and even mark them dirty, so it cannot exceed the max_blocks limit.
*/
if (segment_eq(get_fs(), KERNEL_DS))
sgp = SGP_DIRTY;
index = *ppos >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;
for (;;) {
struct page *page = NULL;
pgoff_t end_index;
unsigned long nr, ret;
loff_t i_size = i_size_read(inode);
end_index = i_size >> PAGE_CACHE_SHIFT;
if (index > end_index)
break;
if (index == end_index) {
nr = i_size & ~PAGE_CACHE_MASK;
if (nr <= offset)
break;
}
desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
if (desc->error) {
if (desc->error == -EINVAL)
desc->error = 0;
break;
}
if (page)
unlock_page(page);
/*
* We must evaluate after, since reads (unlike writes)
* are called without i_mutex protection against truncate
*/
nr = PAGE_CACHE_SIZE;
i_size = i_size_read(inode);
end_index = i_size >> PAGE_CACHE_SHIFT;
if (index == end_index) {
nr = i_size & ~PAGE_CACHE_MASK;
if (nr <= offset) {
if (page)
page_cache_release(page);
break;
}
}
nr -= offset;
if (page) {
/*
* If users can be writing to this page using arbitrary
* virtual addresses, take care about potential aliasing
* before reading the page on the kernel side.
*/
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
/*
* Mark the page accessed if we read the beginning.
*/
if (!offset)
mark_page_accessed(page);
} else {
page = ZERO_PAGE(0);
page_cache_get(page);
}
/*
* Ok, we have the page, and it's up-to-date, so
* now we can copy it to user space...
*
* The actor routine returns how many bytes were actually used..
* NOTE! This may not be the same as how much of a user buffer
* we filled up (we may be padding etc), so we can only update
* "pos" here (the actor routine has to update the user buffer
* pointers and the remaining count).
*/
ret = actor(desc, page, offset, nr);
offset += ret;
index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK;
page_cache_release(page);
if (ret != nr || !desc->count)
break;
cond_resched();
}
*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
file_accessed(filp);
}
static ssize_t shmem_file_aio_read(struct kiocb *iocb,
const struct iovec *iov, unsigned long nr_segs, loff_t pos)
{
struct file *filp = iocb->ki_filp;
ssize_t retval;
unsigned long seg;
size_t count;
loff_t *ppos = &iocb->ki_pos;
retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (retval)
return retval;
for (seg = 0; seg < nr_segs; seg++) {
read_descriptor_t desc;
desc.written = 0;
desc.arg.buf = iov[seg].iov_base;
desc.count = iov[seg].iov_len;
if (desc.count == 0)
continue;
desc.error = 0;
do_shmem_file_read(filp, ppos, &desc, file_read_actor);
retval += desc.written;
if (desc.error) {
retval = retval ?: desc.error;
break;
}
if (desc.count > 0)
break;
}
return retval;
}
static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
struct address_space *mapping = in->f_mapping;
struct inode *inode = mapping->host;
unsigned int loff, nr_pages, req_pages;
struct page *pages[PIPE_DEF_BUFFERS];
struct partial_page partial[PIPE_DEF_BUFFERS];
struct page *page;
pgoff_t index, end_index;
loff_t isize, left;
int error, page_nr;
struct splice_pipe_desc spd = {
.pages = pages,
.partial = partial,
.nr_pages_max = PIPE_DEF_BUFFERS,
.flags = flags,
.ops = &page_cache_pipe_buf_ops,
.spd_release = spd_release_page,
};
isize = i_size_read(inode);
if (unlikely(*ppos >= isize))
return 0;
left = isize - *ppos;
if (unlikely(left < len))
len = left;
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
index = *ppos >> PAGE_CACHE_SHIFT;
loff = *ppos & ~PAGE_CACHE_MASK;
req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
nr_pages = min(req_pages, pipe->buffers);
spd.nr_pages = find_get_pages_contig(mapping, index,
nr_pages, spd.pages);
index += spd.nr_pages;
error = 0;
while (spd.nr_pages < nr_pages) {
error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
if (error)
break;
unlock_page(page);
spd.pages[spd.nr_pages++] = page;
index++;
}
index = *ppos >> PAGE_CACHE_SHIFT;
nr_pages = spd.nr_pages;
spd.nr_pages = 0;
for (page_nr = 0; page_nr < nr_pages; page_nr++) {
unsigned int this_len;
if (!len)
break;
this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
page = spd.pages[page_nr];
if (!PageUptodate(page) || page->mapping != mapping) {
error = shmem_getpage(inode, index, &page,
SGP_CACHE, NULL);
if (error)
break;
unlock_page(page);
page_cache_release(spd.pages[page_nr]);
spd.pages[page_nr] = page;
}
isize = i_size_read(inode);
end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
if (unlikely(!isize || index > end_index))
break;
if (end_index == index) {
unsigned int plen;
plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
if (plen <= loff)
break;
this_len = min(this_len, plen - loff);
len = this_len;
}
spd.partial[page_nr].offset = loff;
spd.partial[page_nr].len = this_len;
len -= this_len;
loff = 0;
spd.nr_pages++;
index++;
}
while (page_nr < nr_pages)
page_cache_release(spd.pages[page_nr++]);
if (spd.nr_pages)
error = splice_to_pipe(pipe, &spd);
splice_shrink_spd(&spd);
if (error > 0) {
*ppos += error;
file_accessed(in);
}
return error;
}
/*
* llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
*/
static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
pgoff_t index, pgoff_t end, int whence)
{
struct page *page;
struct pagevec pvec;
pgoff_t indices[PAGEVEC_SIZE];
bool done = false;
int i;
pagevec_init(&pvec, 0);
pvec.nr = 1; /* start small: we may be there already */
while (!done) {
pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
pvec.nr, pvec.pages, indices);
if (!pvec.nr) {
if (whence == SEEK_DATA)
index = end;
break;
}
for (i = 0; i < pvec.nr; i++, index++) {
if (index < indices[i]) {
if (whence == SEEK_HOLE) {
done = true;
break;
}
index = indices[i];
}
page = pvec.pages[i];
if (page && !radix_tree_exceptional_entry(page)) {
if (!PageUptodate(page))
page = NULL;
}
if (index >= end ||
(page && whence == SEEK_DATA) ||
(!page && whence == SEEK_HOLE)) {
done = true;
break;
}
}
shmem_deswap_pagevec(&pvec);
pagevec_release(&pvec);
pvec.nr = PAGEVEC_SIZE;
cond_resched();
}
return index;
}
static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
{
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
pgoff_t start, end;
loff_t new_offset;
if (whence != SEEK_DATA && whence != SEEK_HOLE)
return generic_file_llseek_size(file, offset, whence,
MAX_LFS_FILESIZE, i_size_read(inode));
mutex_lock(&inode->i_mutex);
/* We're holding i_mutex so we can access i_size directly */
if (offset < 0)
offset = -EINVAL;
else if (offset >= inode->i_size)
offset = -ENXIO;
else {
start = offset >> PAGE_CACHE_SHIFT;
end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
new_offset = shmem_seek_hole_data(mapping, start, end, whence);
new_offset <<= PAGE_CACHE_SHIFT;
if (new_offset > offset) {
if (new_offset < inode->i_size)
offset = new_offset;
else if (whence == SEEK_DATA)
offset = -ENXIO;
else
offset = inode->i_size;
}
}
if (offset >= 0 && offset != file->f_pos) {
file->f_pos = offset;
file->f_version = 0;
}
mutex_unlock(&inode->i_mutex);
return offset;
}
static long shmem_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
struct shmem_falloc shmem_falloc;
pgoff_t start, index, end;
int error;
mutex_lock(&inode->i_mutex);
if (mode & FALLOC_FL_PUNCH_HOLE) {
struct address_space *mapping = file->f_mapping;
loff_t unmap_start = round_up(offset, PAGE_SIZE);
loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
if ((u64)unmap_end > (u64)unmap_start)
unmap_mapping_range(mapping, unmap_start,
1 + unmap_end - unmap_start, 0);
shmem_truncate_range(inode, offset, offset + len - 1);
/* No need to unmap again: hole-punching leaves COWed pages */
error = 0;
goto out;
}
/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
error = inode_newsize_ok(inode, offset + len);
if (error)
goto out;
start = offset >> PAGE_CACHE_SHIFT;
end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
/* Try to avoid a swapstorm if len is impossible to satisfy */
if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
error = -ENOSPC;
goto out;
}
shmem_falloc.start = start;
shmem_falloc.next = start;
shmem_falloc.nr_falloced = 0;
shmem_falloc.nr_unswapped = 0;
spin_lock(&inode->i_lock);
inode->i_private = &shmem_falloc;
spin_unlock(&inode->i_lock);
for (index = start; index < end; index++) {
struct page *page;
/*
* Good, the fallocate(2) manpage permits EINTR: we may have
* been interrupted because we are using up too much memory.
*/
if (signal_pending(current))
error = -EINTR;
else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
error = -ENOMEM;
else
error = shmem_getpage(inode, index, &page, SGP_FALLOC,
NULL);
if (error) {
/* Remove the !PageUptodate pages we added */
shmem_undo_range(inode,
(loff_t)start << PAGE_CACHE_SHIFT,
(loff_t)index << PAGE_CACHE_SHIFT, true);
goto undone;
}
/*
* Inform shmem_writepage() how far we have reached.
* No need for lock or barrier: we have the page lock.
*/
shmem_falloc.next++;
if (!PageUptodate(page))
shmem_falloc.nr_falloced++;
/*
* If !PageUptodate, leave it that way so that freeable pages
* can be recognized if we need to rollback on error later.
* But set_page_dirty so that memory pressure will swap rather
* than free the pages we are allocating (and SGP_CACHE pages
* might still be clean: we now need to mark those dirty too).
*/
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
cond_resched();
}
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
i_size_write(inode, offset + len);
inode->i_ctime = CURRENT_TIME;
undone:
spin_lock(&inode->i_lock);
inode->i_private = NULL;
spin_unlock(&inode->i_lock);
out:
mutex_unlock(&inode->i_mutex);
return error;
}
static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
buf->f_type = TMPFS_MAGIC;
buf->f_bsize = PAGE_CACHE_SIZE;
buf->f_namelen = NAME_MAX;
if (sbinfo->max_blocks) {
buf->f_blocks = sbinfo->max_blocks;
buf->f_bavail =
buf->f_bfree = sbinfo->max_blocks -
percpu_counter_sum(&sbinfo->used_blocks);
}
if (sbinfo->max_inodes) {
buf->f_files = sbinfo->max_inodes;
buf->f_ffree = sbinfo->free_inodes;
}
/* else leave those fields 0 like simple_statfs */
return 0;
}
/*
* File creation. Allocate an inode, and we're done..
*/
static int
shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
struct inode *inode;
int error = -ENOSPC;
inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
if (inode) {
error = security_inode_init_security(inode, dir,
&dentry->d_name,
shmem_initxattrs, NULL);
if (error) {
if (error != -EOPNOTSUPP) {
iput(inode);
return error;
}
}
#ifdef CONFIG_TMPFS_POSIX_ACL
error = generic_acl_init(inode, dir);
if (error) {
iput(inode);
return error;
}
#else
error = 0;
#endif
dir->i_size += BOGO_DIRENT_SIZE;
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
}
return error;
}
static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int error;
if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
return error;
inc_nlink(dir);
return 0;
}
static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
}
/*
* Link a file..
*/
static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
struct inode *inode = old_dentry->d_inode;
int ret;
/*
* No ordinary (disk based) filesystem counts links as inodes;
* but each new link needs a new dentry, pinning lowmem, and
* tmpfs dentries cannot be pruned until they are unlinked.
*/
ret = shmem_reserve_inode(inode->i_sb);
if (ret)
goto out;
dir->i_size += BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
inc_nlink(inode);
ihold(inode); /* New dentry reference */
dget(dentry); /* Extra pinning count for the created dentry */
d_instantiate(dentry, inode);
out:
return ret;
}
static int shmem_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
shmem_free_inode(inode->i_sb);
dir->i_size -= BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
drop_nlink(inode);
dput(dentry); /* Undo the count from "create" - this does all the work */
return 0;
}
static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
{
if (!simple_empty(dentry))
return -ENOTEMPTY;
drop_nlink(dentry->d_inode);
drop_nlink(dir);
return shmem_unlink(dir, dentry);
}
/*
* The VFS layer already does all the dentry stuff for rename,
* we just have to decrement the usage count for the target if
* it exists so that the VFS layer correctly free's it when it
* gets overwritten.
*/
static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
{
struct inode *inode = old_dentry->d_inode;
int they_are_dirs = S_ISDIR(inode->i_mode);
if (!simple_empty(new_dentry))
return -ENOTEMPTY;
if (new_dentry->d_inode) {
(void) shmem_unlink(new_dir, new_dentry);
if (they_are_dirs)
drop_nlink(old_dir);
} else if (they_are_dirs) {
drop_nlink(old_dir);
inc_nlink(new_dir);
}
old_dir->i_size -= BOGO_DIRENT_SIZE;
new_dir->i_size += BOGO_DIRENT_SIZE;
old_dir->i_ctime = old_dir->i_mtime =
new_dir->i_ctime = new_dir->i_mtime =
inode->i_ctime = CURRENT_TIME;
return 0;
}
static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
{
int error;
int len;
struct inode *inode;
struct page *page;
char *kaddr;
struct shmem_inode_info *info;
len = strlen(symname) + 1;
if (len > PAGE_CACHE_SIZE)
return -ENAMETOOLONG;
inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
if (!inode)
return -ENOSPC;
error = security_inode_init_security(inode, dir, &dentry->d_name,
shmem_initxattrs, NULL);
if (error) {
if (error != -EOPNOTSUPP) {
iput(inode);
return error;
}
error = 0;
}
info = SHMEM_I(inode);
inode->i_size = len-1;
if (len <= SHORT_SYMLINK_LEN) {
info->symlink = kmemdup(symname, len, GFP_KERNEL);
if (!info->symlink) {
iput(inode);
return -ENOMEM;
}
inode->i_op = &shmem_short_symlink_operations;
} else {
error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
if (error) {
iput(inode);
return error;
}
inode->i_mapping->a_ops = &shmem_aops;
inode->i_op = &shmem_symlink_inode_operations;
kaddr = kmap_atomic(page);
memcpy(kaddr, symname, len);
kunmap_atomic(kaddr);
SetPageUptodate(page);
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
}
dir->i_size += BOGO_DIRENT_SIZE;
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d_instantiate(dentry, inode);
dget(dentry);
return 0;
}
static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
{
nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
return NULL;
}
static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct page *page = NULL;
int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
if (page)
unlock_page(page);
return page;
}
static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
{
if (!IS_ERR(nd_get_link(nd))) {
struct page *page = cookie;
kunmap(page);
mark_page_accessed(page);
page_cache_release(page);
}
}
#ifdef CONFIG_TMPFS_XATTR
/*
* Superblocks without xattr inode operations may get some security.* xattr
* support from the LSM "for free". As soon as we have any other xattrs
* like ACLs, we also need to implement the security.* handlers at
* filesystem level, though.
*/
/*
* Callback for security_inode_init_security() for acquiring xattrs.
*/
static int shmem_initxattrs(struct inode *inode,
const struct xattr *xattr_array,
void *fs_info)
{
struct shmem_inode_info *info = SHMEM_I(inode);
const struct xattr *xattr;
struct simple_xattr *new_xattr;
size_t len;
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
if (!new_xattr)
return -ENOMEM;
len = strlen(xattr->name) + 1;
new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
GFP_KERNEL);
if (!new_xattr->name) {
kfree(new_xattr);
return -ENOMEM;
}
memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
XATTR_SECURITY_PREFIX_LEN);
memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
xattr->name, len);
simple_xattr_list_add(&info->xattrs, new_xattr);
}
return 0;
}
static const struct xattr_handler *shmem_xattr_handlers[] = {
#ifdef CONFIG_TMPFS_POSIX_ACL
&generic_acl_access_handler,
&generic_acl_default_handler,
#endif
NULL
};
static int shmem_xattr_validate(const char *name)
{
struct { const char *prefix; size_t len; } arr[] = {
{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
};
int i;
for (i = 0; i < ARRAY_SIZE(arr); i++) {
size_t preflen = arr[i].len;
if (strncmp(name, arr[i].prefix, preflen) == 0) {
if (!name[preflen])
return -EINVAL;
return 0;
}
}
return -EOPNOTSUPP;
}
static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size)
{
struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
int err;
/*
* If this is a request for a synthetic attribute in the system.*
* namespace use the generic infrastructure to resolve a handler
* for it via sb->s_xattr.
*/
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_getxattr(dentry, name, buffer, size);
err = shmem_xattr_validate(name);
if (err)
return err;
return simple_xattr_get(&info->xattrs, name, buffer, size);
}
static int shmem_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
int err;
/*
* If this is a request for a synthetic attribute in the system.*
* namespace use the generic infrastructure to resolve a handler
* for it via sb->s_xattr.
*/
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_setxattr(dentry, name, value, size, flags);
err = shmem_xattr_validate(name);
if (err)
return err;
return simple_xattr_set(&info->xattrs, name, value, size, flags);
}
static int shmem_removexattr(struct dentry *dentry, const char *name)
{
struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
int err;
/*
* If this is a request for a synthetic attribute in the system.*
* namespace use the generic infrastructure to resolve a handler
* for it via sb->s_xattr.
*/
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_removexattr(dentry, name);
err = shmem_xattr_validate(name);
if (err)
return err;
return simple_xattr_remove(&info->xattrs, name);
}
static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
return simple_xattr_list(&info->xattrs, buffer, size);
}
#endif /* CONFIG_TMPFS_XATTR */
static const struct inode_operations shmem_short_symlink_operations = {
.readlink = generic_readlink,
.follow_link = shmem_follow_short_symlink,
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.getxattr = shmem_getxattr,
.listxattr = shmem_listxattr,
.removexattr = shmem_removexattr,
#endif
};
static const struct inode_operations shmem_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = shmem_follow_link,
.put_link = shmem_put_link,
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.getxattr = shmem_getxattr,
.listxattr = shmem_listxattr,
.removexattr = shmem_removexattr,
#endif
};
static struct dentry *shmem_get_parent(struct dentry *child)
{
return ERR_PTR(-ESTALE);
}
static int shmem_match(struct inode *ino, void *vfh)
{
__u32 *fh = vfh;
__u64 inum = fh[2];
inum = (inum << 32) | fh[1];
return ino->i_ino == inum && fh[0] == ino->i_generation;
}
static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
struct inode *inode;
struct dentry *dentry = NULL;
u64 inum;
if (fh_len < 3)
return NULL;
inum = fid->raw[2];
inum = (inum << 32) | fid->raw[1];
inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
shmem_match, fid->raw);
if (inode) {
dentry = d_find_alias(inode);
iput(inode);
}
return dentry;
}
static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
struct inode *parent)
{
if (*len < 3) {
*len = 3;
return 255;
}
if (inode_unhashed(inode)) {
/* Unfortunately insert_inode_hash is not idempotent,
* so as we hash inodes here rather than at creation
* time, we need a lock to ensure we only try
* to do it once
*/
static DEFINE_SPINLOCK(lock);
spin_lock(&lock);
if (inode_unhashed(inode))
__insert_inode_hash(inode,
inode->i_ino + inode->i_generation);
spin_unlock(&lock);
}
fh[0] = inode->i_generation;
fh[1] = inode->i_ino;
fh[2] = ((__u64)inode->i_ino) >> 32;
*len = 3;
return 1;
}
static const struct export_operations shmem_export_ops = {
.get_parent = shmem_get_parent,
.encode_fh = shmem_encode_fh,
.fh_to_dentry = shmem_fh_to_dentry,
};
static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
bool remount)
{
char *this_char, *value, *rest;
uid_t uid;
gid_t gid;
while (options != NULL) {
this_char = options;
for (;;) {
/*
* NUL-terminate this option: unfortunately,
* mount options form a comma-separated list,
* but mpol's nodelist may also contain commas.
*/
options = strchr(options, ',');
if (options == NULL)
break;
options++;
if (!isdigit(*options)) {
options[-1] = '\0';
break;
}
}
if (!*this_char)
continue;
if ((value = strchr(this_char,'=')) != NULL) {
*value++ = 0;
} else {
printk(KERN_ERR
"tmpfs: No value for mount option '%s'\n",
this_char);
return 1;
}
if (!strcmp(this_char,"size")) {
unsigned long long size;
size = memparse(value,&rest);
if (*rest == '%') {
size <<= PAGE_SHIFT;
size *= totalram_pages;
do_div(size, 100);
rest++;
}
if (*rest)
goto bad_val;
sbinfo->max_blocks =
DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
} else if (!strcmp(this_char,"nr_blocks")) {
sbinfo->max_blocks = memparse(value, &rest);
if (*rest)
goto bad_val;
} else if (!strcmp(this_char,"nr_inodes")) {
sbinfo->max_inodes = memparse(value, &rest);
if (*rest)
goto bad_val;
} else if (!strcmp(this_char,"mode")) {
if (remount)
continue;
sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
if (*rest)
goto bad_val;
} else if (!strcmp(this_char,"uid")) {
if (remount)
continue;
uid = simple_strtoul(value, &rest, 0);
if (*rest)
goto bad_val;
sbinfo->uid = make_kuid(current_user_ns(), uid);
if (!uid_valid(sbinfo->uid))
goto bad_val;
} else if (!strcmp(this_char,"gid")) {
if (remount)
continue;
gid = simple_strtoul(value, &rest, 0);
if (*rest)
goto bad_val;
sbinfo->gid = make_kgid(current_user_ns(), gid);
if (!gid_valid(sbinfo->gid))
goto bad_val;
} else if (!strcmp(this_char,"mpol")) {
if (mpol_parse_str(value, &sbinfo->mpol))
goto bad_val;
} else {
printk(KERN_ERR "tmpfs: Bad mount option %s\n",
this_char);
return 1;
}
}
return 0;
bad_val:
printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
value, this_char);
return 1;
}
static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
struct shmem_sb_info config = *sbinfo;
unsigned long inodes;
int error = -EINVAL;
if (shmem_parse_options(data, &config, true))
return error;
spin_lock(&sbinfo->stat_lock);
inodes = sbinfo->max_inodes - sbinfo->free_inodes;
if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
goto out;
if (config.max_inodes < inodes)
goto out;
/*
* Those tests disallow limited->unlimited while any are in use;
* but we must separately disallow unlimited->limited, because
* in that case we have no record of how much is already in use.
*/
if (config.max_blocks && !sbinfo->max_blocks)
goto out;
if (config.max_inodes && !sbinfo->max_inodes)
goto out;
error = 0;
sbinfo->max_blocks = config.max_blocks;
sbinfo->max_inodes = config.max_inodes;
sbinfo->free_inodes = config.max_inodes - inodes;
mpol_put(sbinfo->mpol);
sbinfo->mpol = config.mpol; /* transfers initial ref */
out:
spin_unlock(&sbinfo->stat_lock);
return error;
}
static int shmem_show_options(struct seq_file *seq, struct dentry *root)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
if (sbinfo->max_blocks != shmem_default_max_blocks())
seq_printf(seq, ",size=%luk",
sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
if (sbinfo->max_inodes != shmem_default_max_inodes())
seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
seq_printf(seq, ",mode=%03ho", sbinfo->mode);
if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
seq_printf(seq, ",uid=%u",
from_kuid_munged(&init_user_ns, sbinfo->uid));
if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
seq_printf(seq, ",gid=%u",
from_kgid_munged(&init_user_ns, sbinfo->gid));
shmem_show_mpol(seq, sbinfo->mpol);
return 0;
}
#endif /* CONFIG_TMPFS */
static void shmem_put_super(struct super_block *sb)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
percpu_counter_destroy(&sbinfo->used_blocks);
kfree(sbinfo);
sb->s_fs_info = NULL;
}
int shmem_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *inode;
struct shmem_sb_info *sbinfo;
int err = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
L1_CACHE_BYTES), GFP_KERNEL);
if (!sbinfo)
return -ENOMEM;
sbinfo->mode = S_IRWXUGO | S_ISVTX;
sbinfo->uid = current_fsuid();
sbinfo->gid = current_fsgid();
sb->s_fs_info = sbinfo;
#ifdef CONFIG_TMPFS
/*
* Per default we only allow half of the physical ram per
* tmpfs instance, limiting inodes to one per page of lowmem;
* but the internal instance is left unlimited.
*/
if (!(sb->s_flags & MS_NOUSER)) {
sbinfo->max_blocks = shmem_default_max_blocks();
sbinfo->max_inodes = shmem_default_max_inodes();
if (shmem_parse_options(data, sbinfo, false)) {
err = -EINVAL;
goto failed;
}
}
sb->s_export_op = &shmem_export_ops;
sb->s_flags |= MS_NOSEC;
#else
sb->s_flags |= MS_NOUSER;
#endif
spin_lock_init(&sbinfo->stat_lock);
if (percpu_counter_init(&sbinfo->used_blocks, 0))
goto failed;
sbinfo->free_inodes = sbinfo->max_inodes;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = TMPFS_MAGIC;
sb->s_op = &shmem_ops;
sb->s_time_gran = 1;
#ifdef CONFIG_TMPFS_XATTR
sb->s_xattr = shmem_xattr_handlers;
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
sb->s_flags |= MS_POSIXACL;
#endif
inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
if (!inode)
goto failed;
inode->i_uid = sbinfo->uid;
inode->i_gid = sbinfo->gid;
sb->s_root = d_make_root(inode);
if (!sb->s_root)
goto failed;
return 0;
failed:
shmem_put_super(sb);
return err;
}
static struct kmem_cache *shmem_inode_cachep;
static struct inode *shmem_alloc_inode(struct super_block *sb)
{
struct shmem_inode_info *info;
info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
if (!info)
return NULL;
return &info->vfs_inode;
}
static void shmem_destroy_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
}
static void shmem_destroy_inode(struct inode *inode)
{
if (S_ISREG(inode->i_mode))
mpol_free_shared_policy(&SHMEM_I(inode)->policy);
call_rcu(&inode->i_rcu, shmem_destroy_callback);
}
static void shmem_init_inode(void *foo)
{
struct shmem_inode_info *info = foo;
inode_init_once(&info->vfs_inode);
}
static int shmem_init_inodecache(void)
{
shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
sizeof(struct shmem_inode_info),
0, SLAB_PANIC, shmem_init_inode);
return 0;
}
static void shmem_destroy_inodecache(void)
{
kmem_cache_destroy(shmem_inode_cachep);
}
static const struct address_space_operations shmem_aops = {
.writepage = shmem_writepage,
.set_page_dirty = __set_page_dirty_no_writeback,
#ifdef CONFIG_TMPFS
.write_begin = shmem_write_begin,
.write_end = shmem_write_end,
#endif
.migratepage = migrate_page,
.error_remove_page = generic_error_remove_page,
};
static const struct file_operations shmem_file_operations = {
.mmap = shmem_mmap,
#ifdef CONFIG_TMPFS
.llseek = shmem_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = shmem_file_aio_read,
.aio_write = generic_file_aio_write,
.fsync = noop_fsync,
.splice_read = shmem_file_splice_read,
.splice_write = generic_file_splice_write,
.fallocate = shmem_fallocate,
#endif
};
static const struct inode_operations shmem_inode_operations = {
.setattr = shmem_setattr,
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.getxattr = shmem_getxattr,
.listxattr = shmem_listxattr,
.removexattr = shmem_removexattr,
#endif
};
static const struct inode_operations shmem_dir_inode_operations = {
#ifdef CONFIG_TMPFS
.create = shmem_create,
.lookup = simple_lookup,
.link = shmem_link,
.unlink = shmem_unlink,
.symlink = shmem_symlink,
.mkdir = shmem_mkdir,
.rmdir = shmem_rmdir,
.mknod = shmem_mknod,
.rename = shmem_rename,
#endif
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.getxattr = shmem_getxattr,
.listxattr = shmem_listxattr,
.removexattr = shmem_removexattr,
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
.setattr = shmem_setattr,
#endif
};
static const struct inode_operations shmem_special_inode_operations = {
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.getxattr = shmem_getxattr,
.listxattr = shmem_listxattr,
.removexattr = shmem_removexattr,
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
.setattr = shmem_setattr,
#endif
};
static const struct super_operations shmem_ops = {
.alloc_inode = shmem_alloc_inode,
.destroy_inode = shmem_destroy_inode,
#ifdef CONFIG_TMPFS
.statfs = shmem_statfs,
.remount_fs = shmem_remount_fs,
.show_options = shmem_show_options,
#endif
.evict_inode = shmem_evict_inode,
.drop_inode = generic_delete_inode,
.put_super = shmem_put_super,
};
static const struct vm_operations_struct shmem_vm_ops = {
.fault = shmem_fault,
#ifdef CONFIG_NUMA
.set_policy = shmem_set_policy,
.get_policy = shmem_get_policy,
#endif
.remap_pages = generic_file_remap_pages,
};
static struct dentry *shmem_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_nodev(fs_type, flags, data, shmem_fill_super);
}
static struct file_system_type shmem_fs_type = {
.owner = THIS_MODULE,
.name = "tmpfs",
.mount = shmem_mount,
.kill_sb = kill_litter_super,
};
int __init shmem_init(void)
{
int error;
error = bdi_init(&shmem_backing_dev_info);
if (error)
goto out4;
error = shmem_init_inodecache();
if (error)
goto out3;
error = register_filesystem(&shmem_fs_type);
if (error) {
printk(KERN_ERR "Could not register tmpfs\n");
goto out2;
}
shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
shmem_fs_type.name, NULL);
if (IS_ERR(shm_mnt)) {
error = PTR_ERR(shm_mnt);
printk(KERN_ERR "Could not kern_mount tmpfs\n");
goto out1;
}
return 0;
out1:
unregister_filesystem(&shmem_fs_type);
out2:
shmem_destroy_inodecache();
out3:
bdi_destroy(&shmem_backing_dev_info);
out4:
shm_mnt = ERR_PTR(error);
return error;
}
#else /* !CONFIG_SHMEM */
/*
* tiny-shmem: simple shmemfs and tmpfs using ramfs code
*
* This is intended for small system where the benefits of the full
* shmem code (swap-backed and resource-limited) are outweighed by
* their complexity. On systems without swap this code should be
* effectively equivalent, but much lighter weight.
*/
#include <linux/ramfs.h>
static struct file_system_type shmem_fs_type = {
.name = "tmpfs",
.mount = ramfs_mount,
.kill_sb = kill_litter_super,
};
int __init shmem_init(void)
{
BUG_ON(register_filesystem(&shmem_fs_type) != 0);
shm_mnt = kern_mount(&shmem_fs_type);
BUG_ON(IS_ERR(shm_mnt));
return 0;
}
int shmem_unuse(swp_entry_t swap, struct page *page)
{
return 0;
}
int shmem_lock(struct file *file, int lock, struct user_struct *user)
{
return 0;
}
void shmem_unlock_mapping(struct address_space *mapping)
{
}
void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
{
truncate_inode_pages_range(inode->i_mapping, lstart, lend);
}
EXPORT_SYMBOL_GPL(shmem_truncate_range);
#define shmem_vm_ops generic_file_vm_ops
#define shmem_file_operations ramfs_file_operations
#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
#define shmem_acct_size(flags, size) 0
#define shmem_unacct_size(flags, size) do {} while (0)
#endif /* CONFIG_SHMEM */
/* common code */
/**
* shmem_file_setup - get an unlinked file living in tmpfs
* @name: name for dentry (to be seen in /proc/<pid>/maps
* @size: size to be set for the file
* @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
*/
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
{
int error;
struct file *file;
struct inode *inode;
struct path path;
struct dentry *root;
struct qstr this;
if (IS_ERR(shm_mnt))
return (void *)shm_mnt;
if (size < 0 || size > MAX_LFS_FILESIZE)
return ERR_PTR(-EINVAL);
if (shmem_acct_size(flags, size))
return ERR_PTR(-ENOMEM);
error = -ENOMEM;
this.name = name;
this.len = strlen(name);
this.hash = 0; /* will go */
root = shm_mnt->mnt_root;
path.dentry = d_alloc(root, &this);
if (!path.dentry)
goto put_memory;
path.mnt = mntget(shm_mnt);
error = -ENOSPC;
inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
if (!inode)
goto put_dentry;
d_instantiate(path.dentry, inode);
inode->i_size = size;
clear_nlink(inode); /* It is unlinked */
#ifndef CONFIG_MMU
error = ramfs_nommu_expand_for_mapping(inode, size);
if (error)
goto put_dentry;
#endif
error = -ENFILE;
file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
&shmem_file_operations);
if (!file)
goto put_dentry;
return file;
put_dentry:
path_put(&path);
put_memory:
shmem_unacct_size(flags, size);
return ERR_PTR(error);
}
EXPORT_SYMBOL_GPL(shmem_file_setup);
/**
* shmem_zero_setup - setup a shared anonymous mapping
* @vma: the vma to be mmapped is prepared by do_mmap_pgoff
*/
int shmem_zero_setup(struct vm_area_struct *vma)
{
struct file *file;
loff_t size = vma->vm_end - vma->vm_start;
file = shmem_file_setup("dev/zero", size, vma->vm_flags);
if (IS_ERR(file))
return PTR_ERR(file);
if (vma->vm_file)
fput(vma->vm_file);
vma->vm_file = file;
vma->vm_ops = &shmem_vm_ops;
return 0;
}
/**
* shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
* @mapping: the page's address_space
* @index: the page index
* @gfp: the page allocator flags to use if allocating
*
* This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
* with any new page allocations done using the specified allocation flags.
* But read_cache_page_gfp() uses the ->readpage() method: which does not
* suit tmpfs, since it may have pages in swapcache, and needs to find those
* for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
*
* i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
* with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
*/
struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp)
{
#ifdef CONFIG_SHMEM
struct inode *inode = mapping->host;
struct page *page;
int error;
BUG_ON(mapping->a_ops != &shmem_aops);
error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
if (error)
page = ERR_PTR(error);
else
unlock_page(page);
return page;
#else
/*
* The tiny !SHMEM case uses ramfs without swap
*/
return read_cache_page_gfp(mapping, index, gfp);
#endif
}
EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_5587_0 |
crossvul-cpp_data_bad_4818_0 |
#include <linux/ceph/ceph_debug.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <crypto/aes.h>
#include <crypto/skcipher.h>
#include <linux/key-type.h>
#include <keys/ceph-type.h>
#include <keys/user-type.h>
#include <linux/ceph/decode.h>
#include "crypto.h"
int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
const struct ceph_crypto_key *src)
{
memcpy(dst, src, sizeof(struct ceph_crypto_key));
dst->key = kmemdup(src->key, src->len, GFP_NOFS);
if (!dst->key)
return -ENOMEM;
return 0;
}
int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
{
if (*p + sizeof(u16) + sizeof(key->created) +
sizeof(u16) + key->len > end)
return -ERANGE;
ceph_encode_16(p, key->type);
ceph_encode_copy(p, &key->created, sizeof(key->created));
ceph_encode_16(p, key->len);
ceph_encode_copy(p, key->key, key->len);
return 0;
}
int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
{
ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
key->type = ceph_decode_16(p);
ceph_decode_copy(p, &key->created, sizeof(key->created));
key->len = ceph_decode_16(p);
ceph_decode_need(p, end, key->len, bad);
key->key = kmalloc(key->len, GFP_NOFS);
if (!key->key)
return -ENOMEM;
ceph_decode_copy(p, key->key, key->len);
return 0;
bad:
dout("failed to decode crypto key\n");
return -EINVAL;
}
int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
{
int inlen = strlen(inkey);
int blen = inlen * 3 / 4;
void *buf, *p;
int ret;
dout("crypto_key_unarmor %s\n", inkey);
buf = kmalloc(blen, GFP_NOFS);
if (!buf)
return -ENOMEM;
blen = ceph_unarmor(buf, inkey, inkey+inlen);
if (blen < 0) {
kfree(buf);
return blen;
}
p = buf;
ret = ceph_crypto_key_decode(key, &p, p + blen);
kfree(buf);
if (ret)
return ret;
dout("crypto_key_unarmor key %p type %d len %d\n", key,
key->type, key->len);
return 0;
}
static struct crypto_skcipher *ceph_crypto_alloc_cipher(void)
{
return crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
}
static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
/*
* Should be used for buffers allocated with ceph_kvmalloc().
* Currently these are encrypt out-buffer (ceph_buffer) and decrypt
* in-buffer (msg front).
*
* Dispose of @sgt with teardown_sgtable().
*
* @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
* in cases where a single sg is sufficient. No attempt to reduce the
* number of sgs by squeezing physically contiguous pages together is
* made though, for simplicity.
*/
static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
const void *buf, unsigned int buf_len)
{
struct scatterlist *sg;
const bool is_vmalloc = is_vmalloc_addr(buf);
unsigned int off = offset_in_page(buf);
unsigned int chunk_cnt = 1;
unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
int i;
int ret;
if (buf_len == 0) {
memset(sgt, 0, sizeof(*sgt));
return -EINVAL;
}
if (is_vmalloc) {
chunk_cnt = chunk_len >> PAGE_SHIFT;
chunk_len = PAGE_SIZE;
}
if (chunk_cnt > 1) {
ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
if (ret)
return ret;
} else {
WARN_ON(chunk_cnt != 1);
sg_init_table(prealloc_sg, 1);
sgt->sgl = prealloc_sg;
sgt->nents = sgt->orig_nents = 1;
}
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
struct page *page;
unsigned int len = min(chunk_len - off, buf_len);
if (is_vmalloc)
page = vmalloc_to_page(buf);
else
page = virt_to_page(buf);
sg_set_page(sg, page, len, off);
off = 0;
buf += len;
buf_len -= len;
}
WARN_ON(buf_len != 0);
return 0;
}
static void teardown_sgtable(struct sg_table *sgt)
{
if (sgt->orig_nents > 1)
sg_free_table(sgt);
}
static int ceph_aes_encrypt(const void *key, int key_len,
void *dst, size_t *dst_len,
const void *src, size_t src_len)
{
struct scatterlist sg_in[2], prealloc_sg;
struct sg_table sg_out;
struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
SKCIPHER_REQUEST_ON_STACK(req, tfm);
int ret;
char iv[AES_BLOCK_SIZE];
size_t zero_padding = (0x10 - (src_len & 0x0f));
char pad[16];
if (IS_ERR(tfm))
return PTR_ERR(tfm);
memset(pad, zero_padding, zero_padding);
*dst_len = src_len + zero_padding;
sg_init_table(sg_in, 2);
sg_set_buf(&sg_in[0], src, src_len);
sg_set_buf(&sg_in[1], pad, zero_padding);
ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
if (ret)
goto out_tfm;
crypto_skcipher_setkey((void *)tfm, key, key_len);
memcpy(iv, aes_iv, AES_BLOCK_SIZE);
skcipher_request_set_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
src_len + zero_padding, iv);
/*
print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
key, key_len, 1);
print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1,
src, src_len, 1);
print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
pad, zero_padding, 1);
*/
ret = crypto_skcipher_encrypt(req);
skcipher_request_zero(req);
if (ret < 0) {
pr_err("ceph_aes_crypt failed %d\n", ret);
goto out_sg;
}
/*
print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
dst, *dst_len, 1);
*/
out_sg:
teardown_sgtable(&sg_out);
out_tfm:
crypto_free_skcipher(tfm);
return ret;
}
static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
size_t *dst_len,
const void *src1, size_t src1_len,
const void *src2, size_t src2_len)
{
struct scatterlist sg_in[3], prealloc_sg;
struct sg_table sg_out;
struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
SKCIPHER_REQUEST_ON_STACK(req, tfm);
int ret;
char iv[AES_BLOCK_SIZE];
size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f));
char pad[16];
if (IS_ERR(tfm))
return PTR_ERR(tfm);
memset(pad, zero_padding, zero_padding);
*dst_len = src1_len + src2_len + zero_padding;
sg_init_table(sg_in, 3);
sg_set_buf(&sg_in[0], src1, src1_len);
sg_set_buf(&sg_in[1], src2, src2_len);
sg_set_buf(&sg_in[2], pad, zero_padding);
ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
if (ret)
goto out_tfm;
crypto_skcipher_setkey((void *)tfm, key, key_len);
memcpy(iv, aes_iv, AES_BLOCK_SIZE);
skcipher_request_set_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
src1_len + src2_len + zero_padding, iv);
/*
print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
key, key_len, 1);
print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1,
src1, src1_len, 1);
print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1,
src2, src2_len, 1);
print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
pad, zero_padding, 1);
*/
ret = crypto_skcipher_encrypt(req);
skcipher_request_zero(req);
if (ret < 0) {
pr_err("ceph_aes_crypt2 failed %d\n", ret);
goto out_sg;
}
/*
print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
dst, *dst_len, 1);
*/
out_sg:
teardown_sgtable(&sg_out);
out_tfm:
crypto_free_skcipher(tfm);
return ret;
}
static int ceph_aes_decrypt(const void *key, int key_len,
void *dst, size_t *dst_len,
const void *src, size_t src_len)
{
struct sg_table sg_in;
struct scatterlist sg_out[2], prealloc_sg;
struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
SKCIPHER_REQUEST_ON_STACK(req, tfm);
char pad[16];
char iv[AES_BLOCK_SIZE];
int ret;
int last_byte;
if (IS_ERR(tfm))
return PTR_ERR(tfm);
sg_init_table(sg_out, 2);
sg_set_buf(&sg_out[0], dst, *dst_len);
sg_set_buf(&sg_out[1], pad, sizeof(pad));
ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
if (ret)
goto out_tfm;
crypto_skcipher_setkey((void *)tfm, key, key_len);
memcpy(iv, aes_iv, AES_BLOCK_SIZE);
skcipher_request_set_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
src_len, iv);
/*
print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
key, key_len, 1);
print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
src, src_len, 1);
*/
ret = crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
if (ret < 0) {
pr_err("ceph_aes_decrypt failed %d\n", ret);
goto out_sg;
}
if (src_len <= *dst_len)
last_byte = ((char *)dst)[src_len - 1];
else
last_byte = pad[src_len - *dst_len - 1];
if (last_byte <= 16 && src_len >= last_byte) {
*dst_len = src_len - last_byte;
} else {
pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
last_byte, (int)src_len);
return -EPERM; /* bad padding */
}
/*
print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
dst, *dst_len, 1);
*/
out_sg:
teardown_sgtable(&sg_in);
out_tfm:
crypto_free_skcipher(tfm);
return ret;
}
static int ceph_aes_decrypt2(const void *key, int key_len,
void *dst1, size_t *dst1_len,
void *dst2, size_t *dst2_len,
const void *src, size_t src_len)
{
struct sg_table sg_in;
struct scatterlist sg_out[3], prealloc_sg;
struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
SKCIPHER_REQUEST_ON_STACK(req, tfm);
char pad[16];
char iv[AES_BLOCK_SIZE];
int ret;
int last_byte;
if (IS_ERR(tfm))
return PTR_ERR(tfm);
sg_init_table(sg_out, 3);
sg_set_buf(&sg_out[0], dst1, *dst1_len);
sg_set_buf(&sg_out[1], dst2, *dst2_len);
sg_set_buf(&sg_out[2], pad, sizeof(pad));
ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
if (ret)
goto out_tfm;
crypto_skcipher_setkey((void *)tfm, key, key_len);
memcpy(iv, aes_iv, AES_BLOCK_SIZE);
skcipher_request_set_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
src_len, iv);
/*
print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
key, key_len, 1);
print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
src, src_len, 1);
*/
ret = crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
if (ret < 0) {
pr_err("ceph_aes_decrypt failed %d\n", ret);
goto out_sg;
}
if (src_len <= *dst1_len)
last_byte = ((char *)dst1)[src_len - 1];
else if (src_len <= *dst1_len + *dst2_len)
last_byte = ((char *)dst2)[src_len - *dst1_len - 1];
else
last_byte = pad[src_len - *dst1_len - *dst2_len - 1];
if (last_byte <= 16 && src_len >= last_byte) {
src_len -= last_byte;
} else {
pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
last_byte, (int)src_len);
return -EPERM; /* bad padding */
}
if (src_len < *dst1_len) {
*dst1_len = src_len;
*dst2_len = 0;
} else {
*dst2_len = src_len - *dst1_len;
}
/*
print_hex_dump(KERN_ERR, "dec out1: ", DUMP_PREFIX_NONE, 16, 1,
dst1, *dst1_len, 1);
print_hex_dump(KERN_ERR, "dec out2: ", DUMP_PREFIX_NONE, 16, 1,
dst2, *dst2_len, 1);
*/
out_sg:
teardown_sgtable(&sg_in);
out_tfm:
crypto_free_skcipher(tfm);
return ret;
}
int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
const void *src, size_t src_len)
{
switch (secret->type) {
case CEPH_CRYPTO_NONE:
if (*dst_len < src_len)
return -ERANGE;
memcpy(dst, src, src_len);
*dst_len = src_len;
return 0;
case CEPH_CRYPTO_AES:
return ceph_aes_decrypt(secret->key, secret->len, dst,
dst_len, src, src_len);
default:
return -EINVAL;
}
}
int ceph_decrypt2(struct ceph_crypto_key *secret,
void *dst1, size_t *dst1_len,
void *dst2, size_t *dst2_len,
const void *src, size_t src_len)
{
size_t t;
switch (secret->type) {
case CEPH_CRYPTO_NONE:
if (*dst1_len + *dst2_len < src_len)
return -ERANGE;
t = min(*dst1_len, src_len);
memcpy(dst1, src, t);
*dst1_len = t;
src += t;
src_len -= t;
if (src_len) {
t = min(*dst2_len, src_len);
memcpy(dst2, src, t);
*dst2_len = t;
}
return 0;
case CEPH_CRYPTO_AES:
return ceph_aes_decrypt2(secret->key, secret->len,
dst1, dst1_len, dst2, dst2_len,
src, src_len);
default:
return -EINVAL;
}
}
int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
const void *src, size_t src_len)
{
switch (secret->type) {
case CEPH_CRYPTO_NONE:
if (*dst_len < src_len)
return -ERANGE;
memcpy(dst, src, src_len);
*dst_len = src_len;
return 0;
case CEPH_CRYPTO_AES:
return ceph_aes_encrypt(secret->key, secret->len, dst,
dst_len, src, src_len);
default:
return -EINVAL;
}
}
int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
const void *src1, size_t src1_len,
const void *src2, size_t src2_len)
{
switch (secret->type) {
case CEPH_CRYPTO_NONE:
if (*dst_len < src1_len + src2_len)
return -ERANGE;
memcpy(dst, src1, src1_len);
memcpy(dst + src1_len, src2, src2_len);
*dst_len = src1_len + src2_len;
return 0;
case CEPH_CRYPTO_AES:
return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len,
src1, src1_len, src2, src2_len);
default:
return -EINVAL;
}
}
static int ceph_key_preparse(struct key_preparsed_payload *prep)
{
struct ceph_crypto_key *ckey;
size_t datalen = prep->datalen;
int ret;
void *p;
ret = -EINVAL;
if (datalen <= 0 || datalen > 32767 || !prep->data)
goto err;
ret = -ENOMEM;
ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
if (!ckey)
goto err;
/* TODO ceph_crypto_key_decode should really take const input */
p = (void *)prep->data;
ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
if (ret < 0)
goto err_ckey;
prep->payload.data[0] = ckey;
prep->quotalen = datalen;
return 0;
err_ckey:
kfree(ckey);
err:
return ret;
}
static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
{
struct ceph_crypto_key *ckey = prep->payload.data[0];
ceph_crypto_key_destroy(ckey);
kfree(ckey);
}
static void ceph_key_destroy(struct key *key)
{
struct ceph_crypto_key *ckey = key->payload.data[0];
ceph_crypto_key_destroy(ckey);
kfree(ckey);
}
struct key_type key_type_ceph = {
.name = "ceph",
.preparse = ceph_key_preparse,
.free_preparse = ceph_key_free_preparse,
.instantiate = generic_key_instantiate,
.destroy = ceph_key_destroy,
};
int ceph_crypto_init(void) {
return register_key_type(&key_type_ceph);
}
void ceph_crypto_shutdown(void) {
unregister_key_type(&key_type_ceph);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_4818_0 |
crossvul-cpp_data_good_4818_0 |
#include <linux/ceph/ceph_debug.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <crypto/aes.h>
#include <crypto/skcipher.h>
#include <linux/key-type.h>
#include <keys/ceph-type.h>
#include <keys/user-type.h>
#include <linux/ceph/decode.h>
#include "crypto.h"
int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
const struct ceph_crypto_key *src)
{
memcpy(dst, src, sizeof(struct ceph_crypto_key));
dst->key = kmemdup(src->key, src->len, GFP_NOFS);
if (!dst->key)
return -ENOMEM;
return 0;
}
int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
{
if (*p + sizeof(u16) + sizeof(key->created) +
sizeof(u16) + key->len > end)
return -ERANGE;
ceph_encode_16(p, key->type);
ceph_encode_copy(p, &key->created, sizeof(key->created));
ceph_encode_16(p, key->len);
ceph_encode_copy(p, key->key, key->len);
return 0;
}
int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
{
ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
key->type = ceph_decode_16(p);
ceph_decode_copy(p, &key->created, sizeof(key->created));
key->len = ceph_decode_16(p);
ceph_decode_need(p, end, key->len, bad);
key->key = kmalloc(key->len, GFP_NOFS);
if (!key->key)
return -ENOMEM;
ceph_decode_copy(p, key->key, key->len);
return 0;
bad:
dout("failed to decode crypto key\n");
return -EINVAL;
}
int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
{
int inlen = strlen(inkey);
int blen = inlen * 3 / 4;
void *buf, *p;
int ret;
dout("crypto_key_unarmor %s\n", inkey);
buf = kmalloc(blen, GFP_NOFS);
if (!buf)
return -ENOMEM;
blen = ceph_unarmor(buf, inkey, inkey+inlen);
if (blen < 0) {
kfree(buf);
return blen;
}
p = buf;
ret = ceph_crypto_key_decode(key, &p, p + blen);
kfree(buf);
if (ret)
return ret;
dout("crypto_key_unarmor key %p type %d len %d\n", key,
key->type, key->len);
return 0;
}
static struct crypto_skcipher *ceph_crypto_alloc_cipher(void)
{
return crypto_alloc_skcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
}
static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
/*
* Should be used for buffers allocated with ceph_kvmalloc().
* Currently these are encrypt out-buffer (ceph_buffer) and decrypt
* in-buffer (msg front).
*
* Dispose of @sgt with teardown_sgtable().
*
* @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
* in cases where a single sg is sufficient. No attempt to reduce the
* number of sgs by squeezing physically contiguous pages together is
* made though, for simplicity.
*/
static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
const void *buf, unsigned int buf_len)
{
struct scatterlist *sg;
const bool is_vmalloc = is_vmalloc_addr(buf);
unsigned int off = offset_in_page(buf);
unsigned int chunk_cnt = 1;
unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
int i;
int ret;
if (buf_len == 0) {
memset(sgt, 0, sizeof(*sgt));
return -EINVAL;
}
if (is_vmalloc) {
chunk_cnt = chunk_len >> PAGE_SHIFT;
chunk_len = PAGE_SIZE;
}
if (chunk_cnt > 1) {
ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
if (ret)
return ret;
} else {
WARN_ON(chunk_cnt != 1);
sg_init_table(prealloc_sg, 1);
sgt->sgl = prealloc_sg;
sgt->nents = sgt->orig_nents = 1;
}
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
struct page *page;
unsigned int len = min(chunk_len - off, buf_len);
if (is_vmalloc)
page = vmalloc_to_page(buf);
else
page = virt_to_page(buf);
sg_set_page(sg, page, len, off);
off = 0;
buf += len;
buf_len -= len;
}
WARN_ON(buf_len != 0);
return 0;
}
static void teardown_sgtable(struct sg_table *sgt)
{
if (sgt->orig_nents > 1)
sg_free_table(sgt);
}
static int ceph_aes_encrypt(const void *key, int key_len,
void *dst, size_t *dst_len,
const void *src, size_t src_len)
{
struct scatterlist sg_in[2], prealloc_sg;
struct sg_table sg_out;
struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
SKCIPHER_REQUEST_ON_STACK(req, tfm);
int ret;
char iv[AES_BLOCK_SIZE];
size_t zero_padding = (0x10 - (src_len & 0x0f));
char pad[16];
if (IS_ERR(tfm))
return PTR_ERR(tfm);
memset(pad, zero_padding, zero_padding);
*dst_len = src_len + zero_padding;
sg_init_table(sg_in, 2);
sg_set_buf(&sg_in[0], src, src_len);
sg_set_buf(&sg_in[1], pad, zero_padding);
ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
if (ret)
goto out_tfm;
crypto_skcipher_setkey((void *)tfm, key, key_len);
memcpy(iv, aes_iv, AES_BLOCK_SIZE);
skcipher_request_set_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
src_len + zero_padding, iv);
/*
print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
key, key_len, 1);
print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1,
src, src_len, 1);
print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
pad, zero_padding, 1);
*/
ret = crypto_skcipher_encrypt(req);
skcipher_request_zero(req);
if (ret < 0) {
pr_err("ceph_aes_crypt failed %d\n", ret);
goto out_sg;
}
/*
print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
dst, *dst_len, 1);
*/
out_sg:
teardown_sgtable(&sg_out);
out_tfm:
crypto_free_skcipher(tfm);
return ret;
}
static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
size_t *dst_len,
const void *src1, size_t src1_len,
const void *src2, size_t src2_len)
{
struct scatterlist sg_in[3], prealloc_sg;
struct sg_table sg_out;
struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
SKCIPHER_REQUEST_ON_STACK(req, tfm);
int ret;
char iv[AES_BLOCK_SIZE];
size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f));
char pad[16];
if (IS_ERR(tfm))
return PTR_ERR(tfm);
memset(pad, zero_padding, zero_padding);
*dst_len = src1_len + src2_len + zero_padding;
sg_init_table(sg_in, 3);
sg_set_buf(&sg_in[0], src1, src1_len);
sg_set_buf(&sg_in[1], src2, src2_len);
sg_set_buf(&sg_in[2], pad, zero_padding);
ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
if (ret)
goto out_tfm;
crypto_skcipher_setkey((void *)tfm, key, key_len);
memcpy(iv, aes_iv, AES_BLOCK_SIZE);
skcipher_request_set_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in, sg_out.sgl,
src1_len + src2_len + zero_padding, iv);
/*
print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
key, key_len, 1);
print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1,
src1, src1_len, 1);
print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1,
src2, src2_len, 1);
print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
pad, zero_padding, 1);
*/
ret = crypto_skcipher_encrypt(req);
skcipher_request_zero(req);
if (ret < 0) {
pr_err("ceph_aes_crypt2 failed %d\n", ret);
goto out_sg;
}
/*
print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
dst, *dst_len, 1);
*/
out_sg:
teardown_sgtable(&sg_out);
out_tfm:
crypto_free_skcipher(tfm);
return ret;
}
static int ceph_aes_decrypt(const void *key, int key_len,
void *dst, size_t *dst_len,
const void *src, size_t src_len)
{
struct sg_table sg_in;
struct scatterlist sg_out[2], prealloc_sg;
struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
SKCIPHER_REQUEST_ON_STACK(req, tfm);
char pad[16];
char iv[AES_BLOCK_SIZE];
int ret;
int last_byte;
if (IS_ERR(tfm))
return PTR_ERR(tfm);
sg_init_table(sg_out, 2);
sg_set_buf(&sg_out[0], dst, *dst_len);
sg_set_buf(&sg_out[1], pad, sizeof(pad));
ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
if (ret)
goto out_tfm;
crypto_skcipher_setkey((void *)tfm, key, key_len);
memcpy(iv, aes_iv, AES_BLOCK_SIZE);
skcipher_request_set_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
src_len, iv);
/*
print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
key, key_len, 1);
print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
src, src_len, 1);
*/
ret = crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
if (ret < 0) {
pr_err("ceph_aes_decrypt failed %d\n", ret);
goto out_sg;
}
if (src_len <= *dst_len)
last_byte = ((char *)dst)[src_len - 1];
else
last_byte = pad[src_len - *dst_len - 1];
if (last_byte <= 16 && src_len >= last_byte) {
*dst_len = src_len - last_byte;
} else {
pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
last_byte, (int)src_len);
return -EPERM; /* bad padding */
}
/*
print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
dst, *dst_len, 1);
*/
out_sg:
teardown_sgtable(&sg_in);
out_tfm:
crypto_free_skcipher(tfm);
return ret;
}
static int ceph_aes_decrypt2(const void *key, int key_len,
void *dst1, size_t *dst1_len,
void *dst2, size_t *dst2_len,
const void *src, size_t src_len)
{
struct sg_table sg_in;
struct scatterlist sg_out[3], prealloc_sg;
struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
SKCIPHER_REQUEST_ON_STACK(req, tfm);
char pad[16];
char iv[AES_BLOCK_SIZE];
int ret;
int last_byte;
if (IS_ERR(tfm))
return PTR_ERR(tfm);
sg_init_table(sg_out, 3);
sg_set_buf(&sg_out[0], dst1, *dst1_len);
sg_set_buf(&sg_out[1], dst2, *dst2_len);
sg_set_buf(&sg_out[2], pad, sizeof(pad));
ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
if (ret)
goto out_tfm;
crypto_skcipher_setkey((void *)tfm, key, key_len);
memcpy(iv, aes_iv, AES_BLOCK_SIZE);
skcipher_request_set_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sg_in.sgl, sg_out,
src_len, iv);
/*
print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
key, key_len, 1);
print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
src, src_len, 1);
*/
ret = crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
if (ret < 0) {
pr_err("ceph_aes_decrypt failed %d\n", ret);
goto out_sg;
}
if (src_len <= *dst1_len)
last_byte = ((char *)dst1)[src_len - 1];
else if (src_len <= *dst1_len + *dst2_len)
last_byte = ((char *)dst2)[src_len - *dst1_len - 1];
else
last_byte = pad[src_len - *dst1_len - *dst2_len - 1];
if (last_byte <= 16 && src_len >= last_byte) {
src_len -= last_byte;
} else {
pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
last_byte, (int)src_len);
return -EPERM; /* bad padding */
}
if (src_len < *dst1_len) {
*dst1_len = src_len;
*dst2_len = 0;
} else {
*dst2_len = src_len - *dst1_len;
}
/*
print_hex_dump(KERN_ERR, "dec out1: ", DUMP_PREFIX_NONE, 16, 1,
dst1, *dst1_len, 1);
print_hex_dump(KERN_ERR, "dec out2: ", DUMP_PREFIX_NONE, 16, 1,
dst2, *dst2_len, 1);
*/
out_sg:
teardown_sgtable(&sg_in);
out_tfm:
crypto_free_skcipher(tfm);
return ret;
}
int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
const void *src, size_t src_len)
{
switch (secret->type) {
case CEPH_CRYPTO_NONE:
if (*dst_len < src_len)
return -ERANGE;
memcpy(dst, src, src_len);
*dst_len = src_len;
return 0;
case CEPH_CRYPTO_AES:
return ceph_aes_decrypt(secret->key, secret->len, dst,
dst_len, src, src_len);
default:
return -EINVAL;
}
}
int ceph_decrypt2(struct ceph_crypto_key *secret,
void *dst1, size_t *dst1_len,
void *dst2, size_t *dst2_len,
const void *src, size_t src_len)
{
size_t t;
switch (secret->type) {
case CEPH_CRYPTO_NONE:
if (*dst1_len + *dst2_len < src_len)
return -ERANGE;
t = min(*dst1_len, src_len);
memcpy(dst1, src, t);
*dst1_len = t;
src += t;
src_len -= t;
if (src_len) {
t = min(*dst2_len, src_len);
memcpy(dst2, src, t);
*dst2_len = t;
}
return 0;
case CEPH_CRYPTO_AES:
return ceph_aes_decrypt2(secret->key, secret->len,
dst1, dst1_len, dst2, dst2_len,
src, src_len);
default:
return -EINVAL;
}
}
int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
const void *src, size_t src_len)
{
switch (secret->type) {
case CEPH_CRYPTO_NONE:
if (*dst_len < src_len)
return -ERANGE;
memcpy(dst, src, src_len);
*dst_len = src_len;
return 0;
case CEPH_CRYPTO_AES:
return ceph_aes_encrypt(secret->key, secret->len, dst,
dst_len, src, src_len);
default:
return -EINVAL;
}
}
int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
const void *src1, size_t src1_len,
const void *src2, size_t src2_len)
{
switch (secret->type) {
case CEPH_CRYPTO_NONE:
if (*dst_len < src1_len + src2_len)
return -ERANGE;
memcpy(dst, src1, src1_len);
memcpy(dst + src1_len, src2, src2_len);
*dst_len = src1_len + src2_len;
return 0;
case CEPH_CRYPTO_AES:
return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len,
src1, src1_len, src2, src2_len);
default:
return -EINVAL;
}
}
static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
void *buf, int buf_len, int in_len, int *pout_len)
{
struct crypto_skcipher *tfm = ceph_crypto_alloc_cipher();
SKCIPHER_REQUEST_ON_STACK(req, tfm);
struct sg_table sgt;
struct scatterlist prealloc_sg;
char iv[AES_BLOCK_SIZE];
int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
int crypt_len = encrypt ? in_len + pad_byte : in_len;
int ret;
if (IS_ERR(tfm))
return PTR_ERR(tfm);
WARN_ON(crypt_len > buf_len);
if (encrypt)
memset(buf + in_len, pad_byte, pad_byte);
ret = setup_sgtable(&sgt, &prealloc_sg, buf, crypt_len);
if (ret)
goto out_tfm;
crypto_skcipher_setkey((void *)tfm, key->key, key->len);
memcpy(iv, aes_iv, AES_BLOCK_SIZE);
skcipher_request_set_tfm(req, tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, sgt.sgl, sgt.sgl, crypt_len, iv);
/*
print_hex_dump(KERN_ERR, "key: ", DUMP_PREFIX_NONE, 16, 1,
key->key, key->len, 1);
print_hex_dump(KERN_ERR, " in: ", DUMP_PREFIX_NONE, 16, 1,
buf, crypt_len, 1);
*/
if (encrypt)
ret = crypto_skcipher_encrypt(req);
else
ret = crypto_skcipher_decrypt(req);
skcipher_request_zero(req);
if (ret) {
pr_err("%s %scrypt failed: %d\n", __func__,
encrypt ? "en" : "de", ret);
goto out_sgt;
}
/*
print_hex_dump(KERN_ERR, "out: ", DUMP_PREFIX_NONE, 16, 1,
buf, crypt_len, 1);
*/
if (encrypt) {
*pout_len = crypt_len;
} else {
pad_byte = *(char *)(buf + in_len - 1);
if (pad_byte > 0 && pad_byte <= AES_BLOCK_SIZE &&
in_len >= pad_byte) {
*pout_len = in_len - pad_byte;
} else {
pr_err("%s got bad padding %d on in_len %d\n",
__func__, pad_byte, in_len);
ret = -EPERM;
goto out_sgt;
}
}
out_sgt:
teardown_sgtable(&sgt);
out_tfm:
crypto_free_skcipher(tfm);
return ret;
}
int ceph_crypt(const struct ceph_crypto_key *key, bool encrypt,
void *buf, int buf_len, int in_len, int *pout_len)
{
switch (key->type) {
case CEPH_CRYPTO_NONE:
*pout_len = in_len;
return 0;
case CEPH_CRYPTO_AES:
return ceph_aes_crypt(key, encrypt, buf, buf_len, in_len,
pout_len);
default:
return -ENOTSUPP;
}
}
static int ceph_key_preparse(struct key_preparsed_payload *prep)
{
struct ceph_crypto_key *ckey;
size_t datalen = prep->datalen;
int ret;
void *p;
ret = -EINVAL;
if (datalen <= 0 || datalen > 32767 || !prep->data)
goto err;
ret = -ENOMEM;
ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
if (!ckey)
goto err;
/* TODO ceph_crypto_key_decode should really take const input */
p = (void *)prep->data;
ret = ceph_crypto_key_decode(ckey, &p, (char*)prep->data+datalen);
if (ret < 0)
goto err_ckey;
prep->payload.data[0] = ckey;
prep->quotalen = datalen;
return 0;
err_ckey:
kfree(ckey);
err:
return ret;
}
static void ceph_key_free_preparse(struct key_preparsed_payload *prep)
{
struct ceph_crypto_key *ckey = prep->payload.data[0];
ceph_crypto_key_destroy(ckey);
kfree(ckey);
}
static void ceph_key_destroy(struct key *key)
{
struct ceph_crypto_key *ckey = key->payload.data[0];
ceph_crypto_key_destroy(ckey);
kfree(ckey);
}
struct key_type key_type_ceph = {
.name = "ceph",
.preparse = ceph_key_preparse,
.free_preparse = ceph_key_free_preparse,
.instantiate = generic_key_instantiate,
.destroy = ceph_key_destroy,
};
int ceph_crypto_init(void) {
return register_key_type(&key_type_ceph);
}
void ceph_crypto_shutdown(void) {
unregister_key_type(&key_type_ceph);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_4818_0 |
crossvul-cpp_data_bad_1675_2 | /*
* Kernel-based Virtual Machine driver for Linux
*
* This module enables machines with Intel VT-x extensions to run virtual
* machines without emulation or binary translation.
*
* Copyright (C) 2006 Qumranet, Inc.
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Avi Kivity <avi@qumranet.com>
* Yaniv Kamay <yaniv@qumranet.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#include "irq.h"
#include "mmu.h"
#include "cpuid.h"
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/moduleparam.h>
#include <linux/mod_devicetable.h>
#include <linux/trace_events.h>
#include <linux/slab.h>
#include <linux/tboot.h>
#include <linux/hrtimer.h>
#include "kvm_cache_regs.h"
#include "x86.h"
#include <asm/cpu.h>
#include <asm/io.h>
#include <asm/desc.h>
#include <asm/vmx.h>
#include <asm/virtext.h>
#include <asm/mce.h>
#include <asm/fpu/internal.h>
#include <asm/perf_event.h>
#include <asm/debugreg.h>
#include <asm/kexec.h>
#include <asm/apic.h>
#include <asm/irq_remapping.h>
#include "trace.h"
#include "pmu.h"
#define __ex(x) __kvm_handle_fault_on_reboot(x)
#define __ex_clear(x, reg) \
____kvm_handle_fault_on_reboot(x, "xor " reg " , " reg)
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
static const struct x86_cpu_id vmx_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_VMX),
{}
};
MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
static bool __read_mostly enable_vpid = 1;
module_param_named(vpid, enable_vpid, bool, 0444);
static bool __read_mostly flexpriority_enabled = 1;
module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
static bool __read_mostly enable_ept = 1;
module_param_named(ept, enable_ept, bool, S_IRUGO);
static bool __read_mostly enable_unrestricted_guest = 1;
module_param_named(unrestricted_guest,
enable_unrestricted_guest, bool, S_IRUGO);
static bool __read_mostly enable_ept_ad_bits = 1;
module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO);
static bool __read_mostly emulate_invalid_guest_state = true;
module_param(emulate_invalid_guest_state, bool, S_IRUGO);
static bool __read_mostly vmm_exclusive = 1;
module_param(vmm_exclusive, bool, S_IRUGO);
static bool __read_mostly fasteoi = 1;
module_param(fasteoi, bool, S_IRUGO);
static bool __read_mostly enable_apicv = 1;
module_param(enable_apicv, bool, S_IRUGO);
static bool __read_mostly enable_shadow_vmcs = 1;
module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
/*
* If nested=1, nested virtualization is supported, i.e., guests may use
* VMX and be a hypervisor for its own guests. If nested=0, guests may not
* use VMX instructions.
*/
static bool __read_mostly nested = 0;
module_param(nested, bool, S_IRUGO);
static u64 __read_mostly host_xss;
static bool __read_mostly enable_pml = 1;
module_param_named(pml, enable_pml, bool, S_IRUGO);
#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
#define KVM_GUEST_CR0_MASK (X86_CR0_NW | X86_CR0_CD)
#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST (X86_CR0_WP | X86_CR0_NE)
#define KVM_VM_CR0_ALWAYS_ON \
(KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
#define KVM_CR4_GUEST_OWNED_BITS \
(X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR \
| X86_CR4_OSXMMEXCPT | X86_CR4_TSD)
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
#define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
/*
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
* ple_gap: upper bound on the amount of time between two successive
* executions of PAUSE in a loop. Also indicate if ple enabled.
* According to test, this time is usually smaller than 128 cycles.
* ple_window: upper bound on the amount of time a guest is allowed to execute
* in a PAUSE loop. Tests indicate that most spinlocks are held for
* less than 2^12 cycles
* Time is measured based on a counter that runs at the same rate as the TSC,
* refer SDM volume 3b section 21.6.13 & 22.1.3.
*/
#define KVM_VMX_DEFAULT_PLE_GAP 128
#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
#define KVM_VMX_DEFAULT_PLE_WINDOW_GROW 2
#define KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK 0
#define KVM_VMX_DEFAULT_PLE_WINDOW_MAX \
INT_MAX / KVM_VMX_DEFAULT_PLE_WINDOW_GROW
static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
module_param(ple_gap, int, S_IRUGO);
static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
module_param(ple_window, int, S_IRUGO);
/* Default doubles per-vcpu window every exit. */
static int ple_window_grow = KVM_VMX_DEFAULT_PLE_WINDOW_GROW;
module_param(ple_window_grow, int, S_IRUGO);
/* Default resets per-vcpu window every exit to ple_window. */
static int ple_window_shrink = KVM_VMX_DEFAULT_PLE_WINDOW_SHRINK;
module_param(ple_window_shrink, int, S_IRUGO);
/* Default is to compute the maximum so we can never overflow. */
static int ple_window_actual_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
static int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
module_param(ple_window_max, int, S_IRUGO);
extern const ulong vmx_return;
#define NR_AUTOLOAD_MSRS 8
#define VMCS02_POOL_SIZE 1
struct vmcs {
u32 revision_id;
u32 abort;
char data[0];
};
/*
* Track a VMCS that may be loaded on a certain CPU. If it is (cpu!=-1), also
* remember whether it was VMLAUNCHed, and maintain a linked list of all VMCSs
* loaded on this CPU (so we can clear them if the CPU goes down).
*/
struct loaded_vmcs {
struct vmcs *vmcs;
int cpu;
int launched;
struct list_head loaded_vmcss_on_cpu_link;
};
struct shared_msr_entry {
unsigned index;
u64 data;
u64 mask;
};
/*
* struct vmcs12 describes the state that our guest hypervisor (L1) keeps for a
* single nested guest (L2), hence the name vmcs12. Any VMX implementation has
* a VMCS structure, and vmcs12 is our emulated VMX's VMCS. This structure is
* stored in guest memory specified by VMPTRLD, but is opaque to the guest,
* which must access it using VMREAD/VMWRITE/VMCLEAR instructions.
* More than one of these structures may exist, if L1 runs multiple L2 guests.
* nested_vmx_run() will use the data here to build a vmcs02: a VMCS for the
* underlying hardware which will be used to run L2.
* This structure is packed to ensure that its layout is identical across
* machines (necessary for live migration).
* If there are changes in this struct, VMCS12_REVISION must be changed.
*/
typedef u64 natural_width;
struct __packed vmcs12 {
/* According to the Intel spec, a VMCS region must start with the
* following two fields. Then follow implementation-specific data.
*/
u32 revision_id;
u32 abort;
u32 launch_state; /* set to 0 by VMCLEAR, to 1 by VMLAUNCH */
u32 padding[7]; /* room for future expansion */
u64 io_bitmap_a;
u64 io_bitmap_b;
u64 msr_bitmap;
u64 vm_exit_msr_store_addr;
u64 vm_exit_msr_load_addr;
u64 vm_entry_msr_load_addr;
u64 tsc_offset;
u64 virtual_apic_page_addr;
u64 apic_access_addr;
u64 posted_intr_desc_addr;
u64 ept_pointer;
u64 eoi_exit_bitmap0;
u64 eoi_exit_bitmap1;
u64 eoi_exit_bitmap2;
u64 eoi_exit_bitmap3;
u64 xss_exit_bitmap;
u64 guest_physical_address;
u64 vmcs_link_pointer;
u64 guest_ia32_debugctl;
u64 guest_ia32_pat;
u64 guest_ia32_efer;
u64 guest_ia32_perf_global_ctrl;
u64 guest_pdptr0;
u64 guest_pdptr1;
u64 guest_pdptr2;
u64 guest_pdptr3;
u64 guest_bndcfgs;
u64 host_ia32_pat;
u64 host_ia32_efer;
u64 host_ia32_perf_global_ctrl;
u64 padding64[8]; /* room for future expansion */
/*
* To allow migration of L1 (complete with its L2 guests) between
* machines of different natural widths (32 or 64 bit), we cannot have
* unsigned long fields with no explict size. We use u64 (aliased
* natural_width) instead. Luckily, x86 is little-endian.
*/
natural_width cr0_guest_host_mask;
natural_width cr4_guest_host_mask;
natural_width cr0_read_shadow;
natural_width cr4_read_shadow;
natural_width cr3_target_value0;
natural_width cr3_target_value1;
natural_width cr3_target_value2;
natural_width cr3_target_value3;
natural_width exit_qualification;
natural_width guest_linear_address;
natural_width guest_cr0;
natural_width guest_cr3;
natural_width guest_cr4;
natural_width guest_es_base;
natural_width guest_cs_base;
natural_width guest_ss_base;
natural_width guest_ds_base;
natural_width guest_fs_base;
natural_width guest_gs_base;
natural_width guest_ldtr_base;
natural_width guest_tr_base;
natural_width guest_gdtr_base;
natural_width guest_idtr_base;
natural_width guest_dr7;
natural_width guest_rsp;
natural_width guest_rip;
natural_width guest_rflags;
natural_width guest_pending_dbg_exceptions;
natural_width guest_sysenter_esp;
natural_width guest_sysenter_eip;
natural_width host_cr0;
natural_width host_cr3;
natural_width host_cr4;
natural_width host_fs_base;
natural_width host_gs_base;
natural_width host_tr_base;
natural_width host_gdtr_base;
natural_width host_idtr_base;
natural_width host_ia32_sysenter_esp;
natural_width host_ia32_sysenter_eip;
natural_width host_rsp;
natural_width host_rip;
natural_width paddingl[8]; /* room for future expansion */
u32 pin_based_vm_exec_control;
u32 cpu_based_vm_exec_control;
u32 exception_bitmap;
u32 page_fault_error_code_mask;
u32 page_fault_error_code_match;
u32 cr3_target_count;
u32 vm_exit_controls;
u32 vm_exit_msr_store_count;
u32 vm_exit_msr_load_count;
u32 vm_entry_controls;
u32 vm_entry_msr_load_count;
u32 vm_entry_intr_info_field;
u32 vm_entry_exception_error_code;
u32 vm_entry_instruction_len;
u32 tpr_threshold;
u32 secondary_vm_exec_control;
u32 vm_instruction_error;
u32 vm_exit_reason;
u32 vm_exit_intr_info;
u32 vm_exit_intr_error_code;
u32 idt_vectoring_info_field;
u32 idt_vectoring_error_code;
u32 vm_exit_instruction_len;
u32 vmx_instruction_info;
u32 guest_es_limit;
u32 guest_cs_limit;
u32 guest_ss_limit;
u32 guest_ds_limit;
u32 guest_fs_limit;
u32 guest_gs_limit;
u32 guest_ldtr_limit;
u32 guest_tr_limit;
u32 guest_gdtr_limit;
u32 guest_idtr_limit;
u32 guest_es_ar_bytes;
u32 guest_cs_ar_bytes;
u32 guest_ss_ar_bytes;
u32 guest_ds_ar_bytes;
u32 guest_fs_ar_bytes;
u32 guest_gs_ar_bytes;
u32 guest_ldtr_ar_bytes;
u32 guest_tr_ar_bytes;
u32 guest_interruptibility_info;
u32 guest_activity_state;
u32 guest_sysenter_cs;
u32 host_ia32_sysenter_cs;
u32 vmx_preemption_timer_value;
u32 padding32[7]; /* room for future expansion */
u16 virtual_processor_id;
u16 posted_intr_nv;
u16 guest_es_selector;
u16 guest_cs_selector;
u16 guest_ss_selector;
u16 guest_ds_selector;
u16 guest_fs_selector;
u16 guest_gs_selector;
u16 guest_ldtr_selector;
u16 guest_tr_selector;
u16 guest_intr_status;
u16 host_es_selector;
u16 host_cs_selector;
u16 host_ss_selector;
u16 host_ds_selector;
u16 host_fs_selector;
u16 host_gs_selector;
u16 host_tr_selector;
};
/*
* VMCS12_REVISION is an arbitrary id that should be changed if the content or
* layout of struct vmcs12 is changed. MSR_IA32_VMX_BASIC returns this id, and
* VMPTRLD verifies that the VMCS region that L1 is loading contains this id.
*/
#define VMCS12_REVISION 0x11e57ed0
/*
* VMCS12_SIZE is the number of bytes L1 should allocate for the VMXON region
* and any VMCS region. Although only sizeof(struct vmcs12) are used by the
* current implementation, 4K are reserved to avoid future complications.
*/
#define VMCS12_SIZE 0x1000
/* Used to remember the last vmcs02 used for some recently used vmcs12s */
struct vmcs02_list {
struct list_head list;
gpa_t vmptr;
struct loaded_vmcs vmcs02;
};
/*
* The nested_vmx structure is part of vcpu_vmx, and holds information we need
* for correct emulation of VMX (i.e., nested VMX) on this vcpu.
*/
struct nested_vmx {
/* Has the level1 guest done vmxon? */
bool vmxon;
gpa_t vmxon_ptr;
/* The guest-physical address of the current VMCS L1 keeps for L2 */
gpa_t current_vmptr;
/* The host-usable pointer to the above */
struct page *current_vmcs12_page;
struct vmcs12 *current_vmcs12;
struct vmcs *current_shadow_vmcs;
/*
* Indicates if the shadow vmcs must be updated with the
* data hold by vmcs12
*/
bool sync_shadow_vmcs;
/* vmcs02_list cache of VMCSs recently used to run L2 guests */
struct list_head vmcs02_pool;
int vmcs02_num;
u64 vmcs01_tsc_offset;
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
/*
* Guest pages referred to in vmcs02 with host-physical pointers, so
* we must keep them pinned while L2 runs.
*/
struct page *apic_access_page;
struct page *virtual_apic_page;
struct page *pi_desc_page;
struct pi_desc *pi_desc;
bool pi_pending;
u16 posted_intr_nv;
u64 msr_ia32_feature_control;
struct hrtimer preemption_timer;
bool preemption_timer_expired;
/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
u64 vmcs01_debugctl;
u16 vpid02;
u16 last_vpid;
u32 nested_vmx_procbased_ctls_low;
u32 nested_vmx_procbased_ctls_high;
u32 nested_vmx_true_procbased_ctls_low;
u32 nested_vmx_secondary_ctls_low;
u32 nested_vmx_secondary_ctls_high;
u32 nested_vmx_pinbased_ctls_low;
u32 nested_vmx_pinbased_ctls_high;
u32 nested_vmx_exit_ctls_low;
u32 nested_vmx_exit_ctls_high;
u32 nested_vmx_true_exit_ctls_low;
u32 nested_vmx_entry_ctls_low;
u32 nested_vmx_entry_ctls_high;
u32 nested_vmx_true_entry_ctls_low;
u32 nested_vmx_misc_low;
u32 nested_vmx_misc_high;
u32 nested_vmx_ept_caps;
u32 nested_vmx_vpid_caps;
};
#define POSTED_INTR_ON 0
#define POSTED_INTR_SN 1
/* Posted-Interrupt Descriptor */
struct pi_desc {
u32 pir[8]; /* Posted interrupt requested */
union {
struct {
/* bit 256 - Outstanding Notification */
u16 on : 1,
/* bit 257 - Suppress Notification */
sn : 1,
/* bit 271:258 - Reserved */
rsvd_1 : 14;
/* bit 279:272 - Notification Vector */
u8 nv;
/* bit 287:280 - Reserved */
u8 rsvd_2;
/* bit 319:288 - Notification Destination */
u32 ndst;
};
u64 control;
};
u32 rsvd[6];
} __aligned(64);
static bool pi_test_and_set_on(struct pi_desc *pi_desc)
{
return test_and_set_bit(POSTED_INTR_ON,
(unsigned long *)&pi_desc->control);
}
static bool pi_test_and_clear_on(struct pi_desc *pi_desc)
{
return test_and_clear_bit(POSTED_INTR_ON,
(unsigned long *)&pi_desc->control);
}
static int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
{
return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
}
static inline void pi_clear_sn(struct pi_desc *pi_desc)
{
return clear_bit(POSTED_INTR_SN,
(unsigned long *)&pi_desc->control);
}
static inline void pi_set_sn(struct pi_desc *pi_desc)
{
return set_bit(POSTED_INTR_SN,
(unsigned long *)&pi_desc->control);
}
static inline int pi_test_on(struct pi_desc *pi_desc)
{
return test_bit(POSTED_INTR_ON,
(unsigned long *)&pi_desc->control);
}
static inline int pi_test_sn(struct pi_desc *pi_desc)
{
return test_bit(POSTED_INTR_SN,
(unsigned long *)&pi_desc->control);
}
struct vcpu_vmx {
struct kvm_vcpu vcpu;
unsigned long host_rsp;
u8 fail;
bool nmi_known_unmasked;
u32 exit_intr_info;
u32 idt_vectoring_info;
ulong rflags;
struct shared_msr_entry *guest_msrs;
int nmsrs;
int save_nmsrs;
unsigned long host_idt_base;
#ifdef CONFIG_X86_64
u64 msr_host_kernel_gs_base;
u64 msr_guest_kernel_gs_base;
#endif
u32 vm_entry_controls_shadow;
u32 vm_exit_controls_shadow;
/*
* loaded_vmcs points to the VMCS currently used in this vcpu. For a
* non-nested (L1) guest, it always points to vmcs01. For a nested
* guest (L2), it points to a different VMCS.
*/
struct loaded_vmcs vmcs01;
struct loaded_vmcs *loaded_vmcs;
bool __launched; /* temporary, used in vmx_vcpu_run */
struct msr_autoload {
unsigned nr;
struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
} msr_autoload;
struct {
int loaded;
u16 fs_sel, gs_sel, ldt_sel;
#ifdef CONFIG_X86_64
u16 ds_sel, es_sel;
#endif
int gs_ldt_reload_needed;
int fs_reload_needed;
u64 msr_host_bndcfgs;
unsigned long vmcs_host_cr4; /* May not match real cr4 */
} host_state;
struct {
int vm86_active;
ulong save_rflags;
struct kvm_segment segs[8];
} rmode;
struct {
u32 bitmask; /* 4 bits per segment (1 bit per field) */
struct kvm_save_segment {
u16 selector;
unsigned long base;
u32 limit;
u32 ar;
} seg[8];
} segment_cache;
int vpid;
bool emulation_required;
/* Support for vnmi-less CPUs */
int soft_vnmi_blocked;
ktime_t entry_time;
s64 vnmi_blocked_time;
u32 exit_reason;
/* Posted interrupt descriptor */
struct pi_desc pi_desc;
/* Support for a guest hypervisor (nested VMX) */
struct nested_vmx nested;
/* Dynamic PLE window. */
int ple_window;
bool ple_window_dirty;
/* Support for PML */
#define PML_ENTITY_NUM 512
struct page *pml_pg;
};
enum segment_cache_field {
SEG_FIELD_SEL = 0,
SEG_FIELD_BASE = 1,
SEG_FIELD_LIMIT = 2,
SEG_FIELD_AR = 3,
SEG_FIELD_NR = 4
};
static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
{
return container_of(vcpu, struct vcpu_vmx, vcpu);
}
static struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
{
return &(to_vmx(vcpu)->pi_desc);
}
#define VMCS12_OFFSET(x) offsetof(struct vmcs12, x)
#define FIELD(number, name) [number] = VMCS12_OFFSET(name)
#define FIELD64(number, name) [number] = VMCS12_OFFSET(name), \
[number##_HIGH] = VMCS12_OFFSET(name)+4
static unsigned long shadow_read_only_fields[] = {
/*
* We do NOT shadow fields that are modified when L0
* traps and emulates any vmx instruction (e.g. VMPTRLD,
* VMXON...) executed by L1.
* For example, VM_INSTRUCTION_ERROR is read
* by L1 if a vmx instruction fails (part of the error path).
* Note the code assumes this logic. If for some reason
* we start shadowing these fields then we need to
* force a shadow sync when L0 emulates vmx instructions
* (e.g. force a sync if VM_INSTRUCTION_ERROR is modified
* by nested_vmx_failValid)
*/
VM_EXIT_REASON,
VM_EXIT_INTR_INFO,
VM_EXIT_INSTRUCTION_LEN,
IDT_VECTORING_INFO_FIELD,
IDT_VECTORING_ERROR_CODE,
VM_EXIT_INTR_ERROR_CODE,
EXIT_QUALIFICATION,
GUEST_LINEAR_ADDRESS,
GUEST_PHYSICAL_ADDRESS
};
static int max_shadow_read_only_fields =
ARRAY_SIZE(shadow_read_only_fields);
static unsigned long shadow_read_write_fields[] = {
TPR_THRESHOLD,
GUEST_RIP,
GUEST_RSP,
GUEST_CR0,
GUEST_CR3,
GUEST_CR4,
GUEST_INTERRUPTIBILITY_INFO,
GUEST_RFLAGS,
GUEST_CS_SELECTOR,
GUEST_CS_AR_BYTES,
GUEST_CS_LIMIT,
GUEST_CS_BASE,
GUEST_ES_BASE,
GUEST_BNDCFGS,
CR0_GUEST_HOST_MASK,
CR0_READ_SHADOW,
CR4_READ_SHADOW,
TSC_OFFSET,
EXCEPTION_BITMAP,
CPU_BASED_VM_EXEC_CONTROL,
VM_ENTRY_EXCEPTION_ERROR_CODE,
VM_ENTRY_INTR_INFO_FIELD,
VM_ENTRY_INSTRUCTION_LEN,
VM_ENTRY_EXCEPTION_ERROR_CODE,
HOST_FS_BASE,
HOST_GS_BASE,
HOST_FS_SELECTOR,
HOST_GS_SELECTOR
};
static int max_shadow_read_write_fields =
ARRAY_SIZE(shadow_read_write_fields);
static const unsigned short vmcs_field_to_offset_table[] = {
FIELD(VIRTUAL_PROCESSOR_ID, virtual_processor_id),
FIELD(POSTED_INTR_NV, posted_intr_nv),
FIELD(GUEST_ES_SELECTOR, guest_es_selector),
FIELD(GUEST_CS_SELECTOR, guest_cs_selector),
FIELD(GUEST_SS_SELECTOR, guest_ss_selector),
FIELD(GUEST_DS_SELECTOR, guest_ds_selector),
FIELD(GUEST_FS_SELECTOR, guest_fs_selector),
FIELD(GUEST_GS_SELECTOR, guest_gs_selector),
FIELD(GUEST_LDTR_SELECTOR, guest_ldtr_selector),
FIELD(GUEST_TR_SELECTOR, guest_tr_selector),
FIELD(GUEST_INTR_STATUS, guest_intr_status),
FIELD(HOST_ES_SELECTOR, host_es_selector),
FIELD(HOST_CS_SELECTOR, host_cs_selector),
FIELD(HOST_SS_SELECTOR, host_ss_selector),
FIELD(HOST_DS_SELECTOR, host_ds_selector),
FIELD(HOST_FS_SELECTOR, host_fs_selector),
FIELD(HOST_GS_SELECTOR, host_gs_selector),
FIELD(HOST_TR_SELECTOR, host_tr_selector),
FIELD64(IO_BITMAP_A, io_bitmap_a),
FIELD64(IO_BITMAP_B, io_bitmap_b),
FIELD64(MSR_BITMAP, msr_bitmap),
FIELD64(VM_EXIT_MSR_STORE_ADDR, vm_exit_msr_store_addr),
FIELD64(VM_EXIT_MSR_LOAD_ADDR, vm_exit_msr_load_addr),
FIELD64(VM_ENTRY_MSR_LOAD_ADDR, vm_entry_msr_load_addr),
FIELD64(TSC_OFFSET, tsc_offset),
FIELD64(VIRTUAL_APIC_PAGE_ADDR, virtual_apic_page_addr),
FIELD64(APIC_ACCESS_ADDR, apic_access_addr),
FIELD64(POSTED_INTR_DESC_ADDR, posted_intr_desc_addr),
FIELD64(EPT_POINTER, ept_pointer),
FIELD64(EOI_EXIT_BITMAP0, eoi_exit_bitmap0),
FIELD64(EOI_EXIT_BITMAP1, eoi_exit_bitmap1),
FIELD64(EOI_EXIT_BITMAP2, eoi_exit_bitmap2),
FIELD64(EOI_EXIT_BITMAP3, eoi_exit_bitmap3),
FIELD64(XSS_EXIT_BITMAP, xss_exit_bitmap),
FIELD64(GUEST_PHYSICAL_ADDRESS, guest_physical_address),
FIELD64(VMCS_LINK_POINTER, vmcs_link_pointer),
FIELD64(GUEST_IA32_DEBUGCTL, guest_ia32_debugctl),
FIELD64(GUEST_IA32_PAT, guest_ia32_pat),
FIELD64(GUEST_IA32_EFER, guest_ia32_efer),
FIELD64(GUEST_IA32_PERF_GLOBAL_CTRL, guest_ia32_perf_global_ctrl),
FIELD64(GUEST_PDPTR0, guest_pdptr0),
FIELD64(GUEST_PDPTR1, guest_pdptr1),
FIELD64(GUEST_PDPTR2, guest_pdptr2),
FIELD64(GUEST_PDPTR3, guest_pdptr3),
FIELD64(GUEST_BNDCFGS, guest_bndcfgs),
FIELD64(HOST_IA32_PAT, host_ia32_pat),
FIELD64(HOST_IA32_EFER, host_ia32_efer),
FIELD64(HOST_IA32_PERF_GLOBAL_CTRL, host_ia32_perf_global_ctrl),
FIELD(PIN_BASED_VM_EXEC_CONTROL, pin_based_vm_exec_control),
FIELD(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control),
FIELD(EXCEPTION_BITMAP, exception_bitmap),
FIELD(PAGE_FAULT_ERROR_CODE_MASK, page_fault_error_code_mask),
FIELD(PAGE_FAULT_ERROR_CODE_MATCH, page_fault_error_code_match),
FIELD(CR3_TARGET_COUNT, cr3_target_count),
FIELD(VM_EXIT_CONTROLS, vm_exit_controls),
FIELD(VM_EXIT_MSR_STORE_COUNT, vm_exit_msr_store_count),
FIELD(VM_EXIT_MSR_LOAD_COUNT, vm_exit_msr_load_count),
FIELD(VM_ENTRY_CONTROLS, vm_entry_controls),
FIELD(VM_ENTRY_MSR_LOAD_COUNT, vm_entry_msr_load_count),
FIELD(VM_ENTRY_INTR_INFO_FIELD, vm_entry_intr_info_field),
FIELD(VM_ENTRY_EXCEPTION_ERROR_CODE, vm_entry_exception_error_code),
FIELD(VM_ENTRY_INSTRUCTION_LEN, vm_entry_instruction_len),
FIELD(TPR_THRESHOLD, tpr_threshold),
FIELD(SECONDARY_VM_EXEC_CONTROL, secondary_vm_exec_control),
FIELD(VM_INSTRUCTION_ERROR, vm_instruction_error),
FIELD(VM_EXIT_REASON, vm_exit_reason),
FIELD(VM_EXIT_INTR_INFO, vm_exit_intr_info),
FIELD(VM_EXIT_INTR_ERROR_CODE, vm_exit_intr_error_code),
FIELD(IDT_VECTORING_INFO_FIELD, idt_vectoring_info_field),
FIELD(IDT_VECTORING_ERROR_CODE, idt_vectoring_error_code),
FIELD(VM_EXIT_INSTRUCTION_LEN, vm_exit_instruction_len),
FIELD(VMX_INSTRUCTION_INFO, vmx_instruction_info),
FIELD(GUEST_ES_LIMIT, guest_es_limit),
FIELD(GUEST_CS_LIMIT, guest_cs_limit),
FIELD(GUEST_SS_LIMIT, guest_ss_limit),
FIELD(GUEST_DS_LIMIT, guest_ds_limit),
FIELD(GUEST_FS_LIMIT, guest_fs_limit),
FIELD(GUEST_GS_LIMIT, guest_gs_limit),
FIELD(GUEST_LDTR_LIMIT, guest_ldtr_limit),
FIELD(GUEST_TR_LIMIT, guest_tr_limit),
FIELD(GUEST_GDTR_LIMIT, guest_gdtr_limit),
FIELD(GUEST_IDTR_LIMIT, guest_idtr_limit),
FIELD(GUEST_ES_AR_BYTES, guest_es_ar_bytes),
FIELD(GUEST_CS_AR_BYTES, guest_cs_ar_bytes),
FIELD(GUEST_SS_AR_BYTES, guest_ss_ar_bytes),
FIELD(GUEST_DS_AR_BYTES, guest_ds_ar_bytes),
FIELD(GUEST_FS_AR_BYTES, guest_fs_ar_bytes),
FIELD(GUEST_GS_AR_BYTES, guest_gs_ar_bytes),
FIELD(GUEST_LDTR_AR_BYTES, guest_ldtr_ar_bytes),
FIELD(GUEST_TR_AR_BYTES, guest_tr_ar_bytes),
FIELD(GUEST_INTERRUPTIBILITY_INFO, guest_interruptibility_info),
FIELD(GUEST_ACTIVITY_STATE, guest_activity_state),
FIELD(GUEST_SYSENTER_CS, guest_sysenter_cs),
FIELD(HOST_IA32_SYSENTER_CS, host_ia32_sysenter_cs),
FIELD(VMX_PREEMPTION_TIMER_VALUE, vmx_preemption_timer_value),
FIELD(CR0_GUEST_HOST_MASK, cr0_guest_host_mask),
FIELD(CR4_GUEST_HOST_MASK, cr4_guest_host_mask),
FIELD(CR0_READ_SHADOW, cr0_read_shadow),
FIELD(CR4_READ_SHADOW, cr4_read_shadow),
FIELD(CR3_TARGET_VALUE0, cr3_target_value0),
FIELD(CR3_TARGET_VALUE1, cr3_target_value1),
FIELD(CR3_TARGET_VALUE2, cr3_target_value2),
FIELD(CR3_TARGET_VALUE3, cr3_target_value3),
FIELD(EXIT_QUALIFICATION, exit_qualification),
FIELD(GUEST_LINEAR_ADDRESS, guest_linear_address),
FIELD(GUEST_CR0, guest_cr0),
FIELD(GUEST_CR3, guest_cr3),
FIELD(GUEST_CR4, guest_cr4),
FIELD(GUEST_ES_BASE, guest_es_base),
FIELD(GUEST_CS_BASE, guest_cs_base),
FIELD(GUEST_SS_BASE, guest_ss_base),
FIELD(GUEST_DS_BASE, guest_ds_base),
FIELD(GUEST_FS_BASE, guest_fs_base),
FIELD(GUEST_GS_BASE, guest_gs_base),
FIELD(GUEST_LDTR_BASE, guest_ldtr_base),
FIELD(GUEST_TR_BASE, guest_tr_base),
FIELD(GUEST_GDTR_BASE, guest_gdtr_base),
FIELD(GUEST_IDTR_BASE, guest_idtr_base),
FIELD(GUEST_DR7, guest_dr7),
FIELD(GUEST_RSP, guest_rsp),
FIELD(GUEST_RIP, guest_rip),
FIELD(GUEST_RFLAGS, guest_rflags),
FIELD(GUEST_PENDING_DBG_EXCEPTIONS, guest_pending_dbg_exceptions),
FIELD(GUEST_SYSENTER_ESP, guest_sysenter_esp),
FIELD(GUEST_SYSENTER_EIP, guest_sysenter_eip),
FIELD(HOST_CR0, host_cr0),
FIELD(HOST_CR3, host_cr3),
FIELD(HOST_CR4, host_cr4),
FIELD(HOST_FS_BASE, host_fs_base),
FIELD(HOST_GS_BASE, host_gs_base),
FIELD(HOST_TR_BASE, host_tr_base),
FIELD(HOST_GDTR_BASE, host_gdtr_base),
FIELD(HOST_IDTR_BASE, host_idtr_base),
FIELD(HOST_IA32_SYSENTER_ESP, host_ia32_sysenter_esp),
FIELD(HOST_IA32_SYSENTER_EIP, host_ia32_sysenter_eip),
FIELD(HOST_RSP, host_rsp),
FIELD(HOST_RIP, host_rip),
};
static inline short vmcs_field_to_offset(unsigned long field)
{
BUILD_BUG_ON(ARRAY_SIZE(vmcs_field_to_offset_table) > SHRT_MAX);
if (field >= ARRAY_SIZE(vmcs_field_to_offset_table) ||
vmcs_field_to_offset_table[field] == 0)
return -ENOENT;
return vmcs_field_to_offset_table[field];
}
static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
{
return to_vmx(vcpu)->nested.current_vmcs12;
}
static struct page *nested_get_page(struct kvm_vcpu *vcpu, gpa_t addr)
{
struct page *page = kvm_vcpu_gfn_to_page(vcpu, addr >> PAGE_SHIFT);
if (is_error_page(page))
return NULL;
return page;
}
static void nested_release_page(struct page *page)
{
kvm_release_page_dirty(page);
}
static void nested_release_page_clean(struct page *page)
{
kvm_release_page_clean(page);
}
static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
static u64 construct_eptp(unsigned long root_hpa);
static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
static bool vmx_mpx_supported(void);
static bool vmx_xsaves_supported(void);
static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
static void vmx_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
static void vmx_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
static bool guest_state_valid(struct kvm_vcpu *vcpu);
static u32 vmx_segment_access_rights(struct kvm_segment *var);
static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
static int alloc_identity_pagetable(struct kvm *kvm);
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
/*
* We maintain a per-CPU linked-list of VMCS loaded on that CPU. This is needed
* when a CPU is brought down, and we need to VMCLEAR all VMCSs loaded on it.
*/
static DEFINE_PER_CPU(struct list_head, loaded_vmcss_on_cpu);
static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
/*
* We maintian a per-CPU linked-list of vCPU, so in wakeup_handler() we
* can find which vCPU should be waken up.
*/
static DEFINE_PER_CPU(struct list_head, blocked_vcpu_on_cpu);
static DEFINE_PER_CPU(spinlock_t, blocked_vcpu_on_cpu_lock);
static unsigned long *vmx_io_bitmap_a;
static unsigned long *vmx_io_bitmap_b;
static unsigned long *vmx_msr_bitmap_legacy;
static unsigned long *vmx_msr_bitmap_longmode;
static unsigned long *vmx_msr_bitmap_legacy_x2apic;
static unsigned long *vmx_msr_bitmap_longmode_x2apic;
static unsigned long *vmx_msr_bitmap_nested;
static unsigned long *vmx_vmread_bitmap;
static unsigned long *vmx_vmwrite_bitmap;
static bool cpu_has_load_ia32_efer;
static bool cpu_has_load_perf_global_ctrl;
static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
static DEFINE_SPINLOCK(vmx_vpid_lock);
static struct vmcs_config {
int size;
int order;
u32 revision_id;
u32 pin_based_exec_ctrl;
u32 cpu_based_exec_ctrl;
u32 cpu_based_2nd_exec_ctrl;
u32 vmexit_ctrl;
u32 vmentry_ctrl;
} vmcs_config;
static struct vmx_capability {
u32 ept;
u32 vpid;
} vmx_capability;
#define VMX_SEGMENT_FIELD(seg) \
[VCPU_SREG_##seg] = { \
.selector = GUEST_##seg##_SELECTOR, \
.base = GUEST_##seg##_BASE, \
.limit = GUEST_##seg##_LIMIT, \
.ar_bytes = GUEST_##seg##_AR_BYTES, \
}
static const struct kvm_vmx_segment_field {
unsigned selector;
unsigned base;
unsigned limit;
unsigned ar_bytes;
} kvm_vmx_segment_fields[] = {
VMX_SEGMENT_FIELD(CS),
VMX_SEGMENT_FIELD(DS),
VMX_SEGMENT_FIELD(ES),
VMX_SEGMENT_FIELD(FS),
VMX_SEGMENT_FIELD(GS),
VMX_SEGMENT_FIELD(SS),
VMX_SEGMENT_FIELD(TR),
VMX_SEGMENT_FIELD(LDTR),
};
static u64 host_efer;
static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
/*
* Keep MSR_STAR at the end, as setup_msrs() will try to optimize it
* away by decrementing the array size.
*/
static const u32 vmx_msr_index[] = {
#ifdef CONFIG_X86_64
MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
#endif
MSR_EFER, MSR_TSC_AUX, MSR_STAR,
};
static inline bool is_page_fault(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
}
static inline bool is_no_device(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
}
static inline bool is_invalid_opcode(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
}
static inline bool is_external_interrupt(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
== (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
}
static inline bool is_machine_check(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) ==
(INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
}
static inline bool cpu_has_vmx_msr_bitmap(void)
{
return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
}
static inline bool cpu_has_vmx_tpr_shadow(void)
{
return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
}
static inline bool cpu_need_tpr_shadow(struct kvm_vcpu *vcpu)
{
return cpu_has_vmx_tpr_shadow() && lapic_in_kernel(vcpu);
}
static inline bool cpu_has_secondary_exec_ctrls(void)
{
return vmcs_config.cpu_based_exec_ctrl &
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
}
static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
}
static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
}
static inline bool cpu_has_vmx_apic_register_virt(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_APIC_REGISTER_VIRT;
}
static inline bool cpu_has_vmx_virtual_intr_delivery(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
}
static inline bool cpu_has_vmx_posted_intr(void)
{
return IS_ENABLED(CONFIG_X86_LOCAL_APIC) &&
vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
}
static inline bool cpu_has_vmx_apicv(void)
{
return cpu_has_vmx_apic_register_virt() &&
cpu_has_vmx_virtual_intr_delivery() &&
cpu_has_vmx_posted_intr();
}
static inline bool cpu_has_vmx_flexpriority(void)
{
return cpu_has_vmx_tpr_shadow() &&
cpu_has_vmx_virtualize_apic_accesses();
}
static inline bool cpu_has_vmx_ept_execute_only(void)
{
return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
}
static inline bool cpu_has_vmx_ept_2m_page(void)
{
return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
}
static inline bool cpu_has_vmx_ept_1g_page(void)
{
return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
}
static inline bool cpu_has_vmx_ept_4levels(void)
{
return vmx_capability.ept & VMX_EPT_PAGE_WALK_4_BIT;
}
static inline bool cpu_has_vmx_ept_ad_bits(void)
{
return vmx_capability.ept & VMX_EPT_AD_BIT;
}
static inline bool cpu_has_vmx_invept_context(void)
{
return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
}
static inline bool cpu_has_vmx_invept_global(void)
{
return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
}
static inline bool cpu_has_vmx_invvpid_single(void)
{
return vmx_capability.vpid & VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT;
}
static inline bool cpu_has_vmx_invvpid_global(void)
{
return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
}
static inline bool cpu_has_vmx_ept(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_ENABLE_EPT;
}
static inline bool cpu_has_vmx_unrestricted_guest(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_UNRESTRICTED_GUEST;
}
static inline bool cpu_has_vmx_ple(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_PAUSE_LOOP_EXITING;
}
static inline bool cpu_need_virtualize_apic_accesses(struct kvm_vcpu *vcpu)
{
return flexpriority_enabled && lapic_in_kernel(vcpu);
}
static inline bool cpu_has_vmx_vpid(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_ENABLE_VPID;
}
static inline bool cpu_has_vmx_rdtscp(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_RDTSCP;
}
static inline bool cpu_has_vmx_invpcid(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_ENABLE_INVPCID;
}
static inline bool cpu_has_virtual_nmis(void)
{
return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
}
static inline bool cpu_has_vmx_wbinvd_exit(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_WBINVD_EXITING;
}
static inline bool cpu_has_vmx_shadow_vmcs(void)
{
u64 vmx_msr;
rdmsrl(MSR_IA32_VMX_MISC, vmx_msr);
/* check if the cpu supports writing r/o exit information fields */
if (!(vmx_msr & MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS))
return false;
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_SHADOW_VMCS;
}
static inline bool cpu_has_vmx_pml(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl & SECONDARY_EXEC_ENABLE_PML;
}
static inline bool cpu_has_vmx_tsc_scaling(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_TSC_SCALING;
}
static inline bool report_flexpriority(void)
{
return flexpriority_enabled;
}
static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
{
return vmcs12->cpu_based_vm_exec_control & bit;
}
static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
{
return (vmcs12->cpu_based_vm_exec_control &
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
(vmcs12->secondary_vm_exec_control & bit);
}
static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
{
return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
}
static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
{
return vmcs12->pin_based_vm_exec_control &
PIN_BASED_VMX_PREEMPTION_TIMER;
}
static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
{
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
}
static inline bool nested_cpu_has_xsaves(struct vmcs12 *vmcs12)
{
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES) &&
vmx_xsaves_supported();
}
static inline bool nested_cpu_has_virt_x2apic_mode(struct vmcs12 *vmcs12)
{
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
}
static inline bool nested_cpu_has_vpid(struct vmcs12 *vmcs12)
{
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_VPID);
}
static inline bool nested_cpu_has_apic_reg_virt(struct vmcs12 *vmcs12)
{
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_APIC_REGISTER_VIRT);
}
static inline bool nested_cpu_has_vid(struct vmcs12 *vmcs12)
{
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
}
static inline bool nested_cpu_has_posted_intr(struct vmcs12 *vmcs12)
{
return vmcs12->pin_based_vm_exec_control & PIN_BASED_POSTED_INTR;
}
static inline bool is_exception(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
== (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
}
static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
u32 exit_intr_info,
unsigned long exit_qualification);
static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
u32 reason, unsigned long qualification);
static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
{
int i;
for (i = 0; i < vmx->nmsrs; ++i)
if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
return i;
return -1;
}
static inline void __invvpid(int ext, u16 vpid, gva_t gva)
{
struct {
u64 vpid : 16;
u64 rsvd : 48;
u64 gva;
} operand = { vpid, 0, gva };
asm volatile (__ex(ASM_VMX_INVVPID)
/* CF==1 or ZF==1 --> rc = -1 */
"; ja 1f ; ud2 ; 1:"
: : "a"(&operand), "c"(ext) : "cc", "memory");
}
static inline void __invept(int ext, u64 eptp, gpa_t gpa)
{
struct {
u64 eptp, gpa;
} operand = {eptp, gpa};
asm volatile (__ex(ASM_VMX_INVEPT)
/* CF==1 or ZF==1 --> rc = -1 */
"; ja 1f ; ud2 ; 1:\n"
: : "a" (&operand), "c" (ext) : "cc", "memory");
}
static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
{
int i;
i = __find_msr_index(vmx, msr);
if (i >= 0)
return &vmx->guest_msrs[i];
return NULL;
}
static void vmcs_clear(struct vmcs *vmcs)
{
u64 phys_addr = __pa(vmcs);
u8 error;
asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
: "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
: "cc", "memory");
if (error)
printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
vmcs, phys_addr);
}
static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
{
vmcs_clear(loaded_vmcs->vmcs);
loaded_vmcs->cpu = -1;
loaded_vmcs->launched = 0;
}
static void vmcs_load(struct vmcs *vmcs)
{
u64 phys_addr = __pa(vmcs);
u8 error;
asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
: "=qm"(error) : "a"(&phys_addr), "m"(phys_addr)
: "cc", "memory");
if (error)
printk(KERN_ERR "kvm: vmptrld %p/%llx failed\n",
vmcs, phys_addr);
}
#ifdef CONFIG_KEXEC_CORE
/*
* This bitmap is used to indicate whether the vmclear
* operation is enabled on all cpus. All disabled by
* default.
*/
static cpumask_t crash_vmclear_enabled_bitmap = CPU_MASK_NONE;
static inline void crash_enable_local_vmclear(int cpu)
{
cpumask_set_cpu(cpu, &crash_vmclear_enabled_bitmap);
}
static inline void crash_disable_local_vmclear(int cpu)
{
cpumask_clear_cpu(cpu, &crash_vmclear_enabled_bitmap);
}
static inline int crash_local_vmclear_enabled(int cpu)
{
return cpumask_test_cpu(cpu, &crash_vmclear_enabled_bitmap);
}
static void crash_vmclear_local_loaded_vmcss(void)
{
int cpu = raw_smp_processor_id();
struct loaded_vmcs *v;
if (!crash_local_vmclear_enabled(cpu))
return;
list_for_each_entry(v, &per_cpu(loaded_vmcss_on_cpu, cpu),
loaded_vmcss_on_cpu_link)
vmcs_clear(v->vmcs);
}
#else
static inline void crash_enable_local_vmclear(int cpu) { }
static inline void crash_disable_local_vmclear(int cpu) { }
#endif /* CONFIG_KEXEC_CORE */
static void __loaded_vmcs_clear(void *arg)
{
struct loaded_vmcs *loaded_vmcs = arg;
int cpu = raw_smp_processor_id();
if (loaded_vmcs->cpu != cpu)
return; /* vcpu migration can race with cpu offline */
if (per_cpu(current_vmcs, cpu) == loaded_vmcs->vmcs)
per_cpu(current_vmcs, cpu) = NULL;
crash_disable_local_vmclear(cpu);
list_del(&loaded_vmcs->loaded_vmcss_on_cpu_link);
/*
* we should ensure updating loaded_vmcs->loaded_vmcss_on_cpu_link
* is before setting loaded_vmcs->vcpu to -1 which is done in
* loaded_vmcs_init. Otherwise, other cpu can see vcpu = -1 fist
* then adds the vmcs into percpu list before it is deleted.
*/
smp_wmb();
loaded_vmcs_init(loaded_vmcs);
crash_enable_local_vmclear(cpu);
}
static void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs)
{
int cpu = loaded_vmcs->cpu;
if (cpu != -1)
smp_call_function_single(cpu,
__loaded_vmcs_clear, loaded_vmcs, 1);
}
static inline void vpid_sync_vcpu_single(int vpid)
{
if (vpid == 0)
return;
if (cpu_has_vmx_invvpid_single())
__invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
}
static inline void vpid_sync_vcpu_global(void)
{
if (cpu_has_vmx_invvpid_global())
__invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
}
static inline void vpid_sync_context(int vpid)
{
if (cpu_has_vmx_invvpid_single())
vpid_sync_vcpu_single(vpid);
else
vpid_sync_vcpu_global();
}
static inline void ept_sync_global(void)
{
if (cpu_has_vmx_invept_global())
__invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
}
static inline void ept_sync_context(u64 eptp)
{
if (enable_ept) {
if (cpu_has_vmx_invept_context())
__invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
else
ept_sync_global();
}
}
static __always_inline unsigned long vmcs_readl(unsigned long field)
{
unsigned long value;
asm volatile (__ex_clear(ASM_VMX_VMREAD_RDX_RAX, "%0")
: "=a"(value) : "d"(field) : "cc");
return value;
}
static __always_inline u16 vmcs_read16(unsigned long field)
{
return vmcs_readl(field);
}
static __always_inline u32 vmcs_read32(unsigned long field)
{
return vmcs_readl(field);
}
static __always_inline u64 vmcs_read64(unsigned long field)
{
#ifdef CONFIG_X86_64
return vmcs_readl(field);
#else
return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
#endif
}
static noinline void vmwrite_error(unsigned long field, unsigned long value)
{
printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
dump_stack();
}
static void vmcs_writel(unsigned long field, unsigned long value)
{
u8 error;
asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
: "=q"(error) : "a"(value), "d"(field) : "cc");
if (unlikely(error))
vmwrite_error(field, value);
}
static void vmcs_write16(unsigned long field, u16 value)
{
vmcs_writel(field, value);
}
static void vmcs_write32(unsigned long field, u32 value)
{
vmcs_writel(field, value);
}
static void vmcs_write64(unsigned long field, u64 value)
{
vmcs_writel(field, value);
#ifndef CONFIG_X86_64
asm volatile ("");
vmcs_writel(field+1, value >> 32);
#endif
}
static void vmcs_clear_bits(unsigned long field, u32 mask)
{
vmcs_writel(field, vmcs_readl(field) & ~mask);
}
static void vmcs_set_bits(unsigned long field, u32 mask)
{
vmcs_writel(field, vmcs_readl(field) | mask);
}
static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val)
{
vmcs_write32(VM_ENTRY_CONTROLS, val);
vmx->vm_entry_controls_shadow = val;
}
static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val)
{
if (vmx->vm_entry_controls_shadow != val)
vm_entry_controls_init(vmx, val);
}
static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx)
{
return vmx->vm_entry_controls_shadow;
}
static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val)
{
vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val);
}
static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
{
vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val);
}
static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val)
{
vmcs_write32(VM_EXIT_CONTROLS, val);
vmx->vm_exit_controls_shadow = val;
}
static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val)
{
if (vmx->vm_exit_controls_shadow != val)
vm_exit_controls_init(vmx, val);
}
static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx)
{
return vmx->vm_exit_controls_shadow;
}
static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val)
{
vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val);
}
static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val)
{
vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val);
}
static void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
{
vmx->segment_cache.bitmask = 0;
}
static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg,
unsigned field)
{
bool ret;
u32 mask = 1 << (seg * SEG_FIELD_NR + field);
if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) {
vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS);
vmx->segment_cache.bitmask = 0;
}
ret = vmx->segment_cache.bitmask & mask;
vmx->segment_cache.bitmask |= mask;
return ret;
}
static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg)
{
u16 *p = &vmx->segment_cache.seg[seg].selector;
if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL))
*p = vmcs_read16(kvm_vmx_segment_fields[seg].selector);
return *p;
}
static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg)
{
ulong *p = &vmx->segment_cache.seg[seg].base;
if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE))
*p = vmcs_readl(kvm_vmx_segment_fields[seg].base);
return *p;
}
static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg)
{
u32 *p = &vmx->segment_cache.seg[seg].limit;
if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT))
*p = vmcs_read32(kvm_vmx_segment_fields[seg].limit);
return *p;
}
static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg)
{
u32 *p = &vmx->segment_cache.seg[seg].ar;
if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR))
*p = vmcs_read32(kvm_vmx_segment_fields[seg].ar_bytes);
return *p;
}
static void update_exception_bitmap(struct kvm_vcpu *vcpu)
{
u32 eb;
eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
(1u << NM_VECTOR) | (1u << DB_VECTOR);
if ((vcpu->guest_debug &
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
eb |= 1u << BP_VECTOR;
if (to_vmx(vcpu)->rmode.vm86_active)
eb = ~0;
if (enable_ept)
eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
if (vcpu->fpu_active)
eb &= ~(1u << NM_VECTOR);
/* When we are running a nested L2 guest and L1 specified for it a
* certain exception bitmap, we must trap the same exceptions and pass
* them to L1. When running L2, we will only handle the exceptions
* specified above if L1 did not want them.
*/
if (is_guest_mode(vcpu))
eb |= get_vmcs12(vcpu)->exception_bitmap;
vmcs_write32(EXCEPTION_BITMAP, eb);
}
static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx,
unsigned long entry, unsigned long exit)
{
vm_entry_controls_clearbit(vmx, entry);
vm_exit_controls_clearbit(vmx, exit);
}
static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
{
unsigned i;
struct msr_autoload *m = &vmx->msr_autoload;
switch (msr) {
case MSR_EFER:
if (cpu_has_load_ia32_efer) {
clear_atomic_switch_msr_special(vmx,
VM_ENTRY_LOAD_IA32_EFER,
VM_EXIT_LOAD_IA32_EFER);
return;
}
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
if (cpu_has_load_perf_global_ctrl) {
clear_atomic_switch_msr_special(vmx,
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
return;
}
break;
}
for (i = 0; i < m->nr; ++i)
if (m->guest[i].index == msr)
break;
if (i == m->nr)
return;
--m->nr;
m->guest[i] = m->guest[m->nr];
m->host[i] = m->host[m->nr];
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
}
static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
unsigned long entry, unsigned long exit,
unsigned long guest_val_vmcs, unsigned long host_val_vmcs,
u64 guest_val, u64 host_val)
{
vmcs_write64(guest_val_vmcs, guest_val);
vmcs_write64(host_val_vmcs, host_val);
vm_entry_controls_setbit(vmx, entry);
vm_exit_controls_setbit(vmx, exit);
}
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
u64 guest_val, u64 host_val)
{
unsigned i;
struct msr_autoload *m = &vmx->msr_autoload;
switch (msr) {
case MSR_EFER:
if (cpu_has_load_ia32_efer) {
add_atomic_switch_msr_special(vmx,
VM_ENTRY_LOAD_IA32_EFER,
VM_EXIT_LOAD_IA32_EFER,
GUEST_IA32_EFER,
HOST_IA32_EFER,
guest_val, host_val);
return;
}
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
if (cpu_has_load_perf_global_ctrl) {
add_atomic_switch_msr_special(vmx,
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL,
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL,
GUEST_IA32_PERF_GLOBAL_CTRL,
HOST_IA32_PERF_GLOBAL_CTRL,
guest_val, host_val);
return;
}
break;
}
for (i = 0; i < m->nr; ++i)
if (m->guest[i].index == msr)
break;
if (i == NR_AUTOLOAD_MSRS) {
printk_once(KERN_WARNING "Not enough msr switch entries. "
"Can't add msr %x\n", msr);
return;
} else if (i == m->nr) {
++m->nr;
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
}
m->guest[i].index = msr;
m->guest[i].value = guest_val;
m->host[i].index = msr;
m->host[i].value = host_val;
}
static void reload_tss(void)
{
/*
* VT restores TR but not its size. Useless.
*/
struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
struct desc_struct *descs;
descs = (void *)gdt->address;
descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
load_TR_desc();
}
static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
{
u64 guest_efer;
u64 ignore_bits;
guest_efer = vmx->vcpu.arch.efer;
/*
* NX is emulated; LMA and LME handled by hardware; SCE meaningless
* outside long mode
*/
ignore_bits = EFER_NX | EFER_SCE;
#ifdef CONFIG_X86_64
ignore_bits |= EFER_LMA | EFER_LME;
/* SCE is meaningful only in long mode on Intel */
if (guest_efer & EFER_LMA)
ignore_bits &= ~(u64)EFER_SCE;
#endif
guest_efer &= ~ignore_bits;
guest_efer |= host_efer & ignore_bits;
vmx->guest_msrs[efer_offset].data = guest_efer;
vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
clear_atomic_switch_msr(vmx, MSR_EFER);
/*
* On EPT, we can't emulate NX, so we must switch EFER atomically.
* On CPUs that support "load IA32_EFER", always switch EFER
* atomically, since it's faster than switching it manually.
*/
if (cpu_has_load_ia32_efer ||
(enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) {
guest_efer = vmx->vcpu.arch.efer;
if (!(guest_efer & EFER_LMA))
guest_efer &= ~EFER_LME;
if (guest_efer != host_efer)
add_atomic_switch_msr(vmx, MSR_EFER,
guest_efer, host_efer);
return false;
}
return true;
}
static unsigned long segment_base(u16 selector)
{
struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
struct desc_struct *d;
unsigned long table_base;
unsigned long v;
if (!(selector & ~3))
return 0;
table_base = gdt->address;
if (selector & 4) { /* from ldt */
u16 ldt_selector = kvm_read_ldt();
if (!(ldt_selector & ~3))
return 0;
table_base = segment_base(ldt_selector);
}
d = (struct desc_struct *)(table_base + (selector & ~7));
v = get_desc_base(d);
#ifdef CONFIG_X86_64
if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
#endif
return v;
}
static inline unsigned long kvm_read_tr_base(void)
{
u16 tr;
asm("str %0" : "=g"(tr));
return segment_base(tr);
}
static void vmx_save_host_state(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int i;
if (vmx->host_state.loaded)
return;
vmx->host_state.loaded = 1;
/*
* Set host fs and gs selectors. Unfortunately, 22.2.3 does not
* allow segment selectors with cpl > 0 or ti == 1.
*/
vmx->host_state.ldt_sel = kvm_read_ldt();
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
savesegment(fs, vmx->host_state.fs_sel);
if (!(vmx->host_state.fs_sel & 7)) {
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
vmx->host_state.fs_reload_needed = 0;
} else {
vmcs_write16(HOST_FS_SELECTOR, 0);
vmx->host_state.fs_reload_needed = 1;
}
savesegment(gs, vmx->host_state.gs_sel);
if (!(vmx->host_state.gs_sel & 7))
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
else {
vmcs_write16(HOST_GS_SELECTOR, 0);
vmx->host_state.gs_ldt_reload_needed = 1;
}
#ifdef CONFIG_X86_64
savesegment(ds, vmx->host_state.ds_sel);
savesegment(es, vmx->host_state.es_sel);
#endif
#ifdef CONFIG_X86_64
vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
#else
vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
#endif
#ifdef CONFIG_X86_64
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
if (is_long_mode(&vmx->vcpu))
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#endif
if (boot_cpu_has(X86_FEATURE_MPX))
rdmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
for (i = 0; i < vmx->save_nmsrs; ++i)
kvm_set_shared_msr(vmx->guest_msrs[i].index,
vmx->guest_msrs[i].data,
vmx->guest_msrs[i].mask);
}
static void __vmx_load_host_state(struct vcpu_vmx *vmx)
{
if (!vmx->host_state.loaded)
return;
++vmx->vcpu.stat.host_state_reload;
vmx->host_state.loaded = 0;
#ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu))
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#endif
if (vmx->host_state.gs_ldt_reload_needed) {
kvm_load_ldt(vmx->host_state.ldt_sel);
#ifdef CONFIG_X86_64
load_gs_index(vmx->host_state.gs_sel);
#else
loadsegment(gs, vmx->host_state.gs_sel);
#endif
}
if (vmx->host_state.fs_reload_needed)
loadsegment(fs, vmx->host_state.fs_sel);
#ifdef CONFIG_X86_64
if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
loadsegment(ds, vmx->host_state.ds_sel);
loadsegment(es, vmx->host_state.es_sel);
}
#endif
reload_tss();
#ifdef CONFIG_X86_64
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
#endif
if (vmx->host_state.msr_host_bndcfgs)
wrmsrl(MSR_IA32_BNDCFGS, vmx->host_state.msr_host_bndcfgs);
/*
* If the FPU is not active (through the host task or
* the guest vcpu), then restore the cr0.TS bit.
*/
if (!fpregs_active() && !vmx->vcpu.guest_fpu_loaded)
stts();
load_gdt(this_cpu_ptr(&host_gdt));
}
static void vmx_load_host_state(struct vcpu_vmx *vmx)
{
preempt_disable();
__vmx_load_host_state(vmx);
preempt_enable();
}
static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
struct pi_desc old, new;
unsigned int dest;
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
!irq_remapping_cap(IRQ_POSTING_CAP))
return;
do {
old.control = new.control = pi_desc->control;
/*
* If 'nv' field is POSTED_INTR_WAKEUP_VECTOR, there
* are two possible cases:
* 1. After running 'pre_block', context switch
* happened. For this case, 'sn' was set in
* vmx_vcpu_put(), so we need to clear it here.
* 2. After running 'pre_block', we were blocked,
* and woken up by some other guy. For this case,
* we don't need to do anything, 'pi_post_block'
* will do everything for us. However, we cannot
* check whether it is case #1 or case #2 here
* (maybe, not needed), so we also clear sn here,
* I think it is not a big deal.
*/
if (pi_desc->nv != POSTED_INTR_WAKEUP_VECTOR) {
if (vcpu->cpu != cpu) {
dest = cpu_physical_id(cpu);
if (x2apic_enabled())
new.ndst = dest;
else
new.ndst = (dest << 8) & 0xFF00;
}
/* set 'NV' to 'notification vector' */
new.nv = POSTED_INTR_VECTOR;
}
/* Allow posting non-urgent interrupts */
new.sn = 0;
} while (cmpxchg(&pi_desc->control, old.control,
new.control) != old.control);
}
/*
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
* vcpu mutex is already taken.
*/
static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
if (!vmm_exclusive)
kvm_cpu_vmxon(phys_addr);
else if (vmx->loaded_vmcs->cpu != cpu)
loaded_vmcs_clear(vmx->loaded_vmcs);
if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
vmcs_load(vmx->loaded_vmcs->vmcs);
}
if (vmx->loaded_vmcs->cpu != cpu) {
struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
unsigned long sysenter_esp;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
local_irq_disable();
crash_disable_local_vmclear(cpu);
/*
* Read loaded_vmcs->cpu should be before fetching
* loaded_vmcs->loaded_vmcss_on_cpu_link.
* See the comments in __loaded_vmcs_clear().
*/
smp_rmb();
list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link,
&per_cpu(loaded_vmcss_on_cpu, cpu));
crash_enable_local_vmclear(cpu);
local_irq_enable();
/*
* Linux uses per-cpu TSS and GDT, so set these when switching
* processors.
*/
vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
/* Setup TSC multiplier */
if (cpu_has_vmx_tsc_scaling())
vmcs_write64(TSC_MULTIPLIER,
vcpu->arch.tsc_scaling_ratio);
vmx->loaded_vmcs->cpu = cpu;
}
vmx_vcpu_pi_load(vcpu, cpu);
}
static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
!irq_remapping_cap(IRQ_POSTING_CAP))
return;
/* Set SN when the vCPU is preempted */
if (vcpu->preempted)
pi_set_sn(pi_desc);
}
static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
{
vmx_vcpu_pi_put(vcpu);
__vmx_load_host_state(to_vmx(vcpu));
if (!vmm_exclusive) {
__loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs);
vcpu->cpu = -1;
kvm_cpu_vmxoff();
}
}
static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
{
ulong cr0;
if (vcpu->fpu_active)
return;
vcpu->fpu_active = 1;
cr0 = vmcs_readl(GUEST_CR0);
cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
vmcs_writel(GUEST_CR0, cr0);
update_exception_bitmap(vcpu);
vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
if (is_guest_mode(vcpu))
vcpu->arch.cr0_guest_owned_bits &=
~get_vmcs12(vcpu)->cr0_guest_host_mask;
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
}
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
/*
* Return the cr0 value that a nested guest would read. This is a combination
* of the real cr0 used to run the guest (guest_cr0), and the bits shadowed by
* its hypervisor (cr0_read_shadow).
*/
static inline unsigned long nested_read_cr0(struct vmcs12 *fields)
{
return (fields->guest_cr0 & ~fields->cr0_guest_host_mask) |
(fields->cr0_read_shadow & fields->cr0_guest_host_mask);
}
static inline unsigned long nested_read_cr4(struct vmcs12 *fields)
{
return (fields->guest_cr4 & ~fields->cr4_guest_host_mask) |
(fields->cr4_read_shadow & fields->cr4_guest_host_mask);
}
static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
{
/* Note that there is no vcpu->fpu_active = 0 here. The caller must
* set this *before* calling this function.
*/
vmx_decache_cr0_guest_bits(vcpu);
vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
update_exception_bitmap(vcpu);
vcpu->arch.cr0_guest_owned_bits = 0;
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
if (is_guest_mode(vcpu)) {
/*
* L1's specified read shadow might not contain the TS bit,
* so now that we turned on shadowing of this bit, we need to
* set this bit of the shadow. Like in nested_vmx_run we need
* nested_read_cr0(vmcs12), but vmcs12->guest_cr0 is not yet
* up-to-date here because we just decached cr0.TS (and we'll
* only update vmcs12->guest_cr0 on nested exit).
*/
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
vmcs12->guest_cr0 = (vmcs12->guest_cr0 & ~X86_CR0_TS) |
(vcpu->arch.cr0 & X86_CR0_TS);
vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
} else
vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
}
static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
{
unsigned long rflags, save_rflags;
if (!test_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail)) {
__set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
rflags = vmcs_readl(GUEST_RFLAGS);
if (to_vmx(vcpu)->rmode.vm86_active) {
rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
save_rflags = to_vmx(vcpu)->rmode.save_rflags;
rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
}
to_vmx(vcpu)->rflags = rflags;
}
return to_vmx(vcpu)->rflags;
}
static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
__set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
to_vmx(vcpu)->rflags = rflags;
if (to_vmx(vcpu)->rmode.vm86_active) {
to_vmx(vcpu)->rmode.save_rflags = rflags;
rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
}
vmcs_writel(GUEST_RFLAGS, rflags);
}
static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
{
u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
int ret = 0;
if (interruptibility & GUEST_INTR_STATE_STI)
ret |= KVM_X86_SHADOW_INT_STI;
if (interruptibility & GUEST_INTR_STATE_MOV_SS)
ret |= KVM_X86_SHADOW_INT_MOV_SS;
return ret;
}
static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
{
u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
u32 interruptibility = interruptibility_old;
interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
if (mask & KVM_X86_SHADOW_INT_MOV_SS)
interruptibility |= GUEST_INTR_STATE_MOV_SS;
else if (mask & KVM_X86_SHADOW_INT_STI)
interruptibility |= GUEST_INTR_STATE_STI;
if ((interruptibility != interruptibility_old))
vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
}
static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
unsigned long rip;
rip = kvm_rip_read(vcpu);
rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
kvm_rip_write(vcpu, rip);
/* skipping an emulated instruction also counts */
vmx_set_interrupt_shadow(vcpu, 0);
}
/*
* KVM wants to inject page-faults which it got to the guest. This function
* checks whether in a nested guest, we need to inject them to L1 or L2.
*/
static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (!(vmcs12->exception_bitmap & (1u << nr)))
return 0;
nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_readl(EXIT_QUALIFICATION));
return 1;
}
static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code,
bool reinject)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 intr_info = nr | INTR_INFO_VALID_MASK;
if (!reinject && is_guest_mode(vcpu) &&
nested_vmx_check_exception(vcpu, nr))
return;
if (has_error_code) {
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
intr_info |= INTR_INFO_DELIVER_CODE_MASK;
}
if (vmx->rmode.vm86_active) {
int inc_eip = 0;
if (kvm_exception_is_soft(nr))
inc_eip = vcpu->arch.event_exit_inst_len;
if (kvm_inject_realmode_interrupt(vcpu, nr, inc_eip) != EMULATE_DONE)
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return;
}
if (kvm_exception_is_soft(nr)) {
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
vmx->vcpu.arch.event_exit_inst_len);
intr_info |= INTR_TYPE_SOFT_EXCEPTION;
} else
intr_info |= INTR_TYPE_HARD_EXCEPTION;
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
}
static bool vmx_rdtscp_supported(void)
{
return cpu_has_vmx_rdtscp();
}
static bool vmx_invpcid_supported(void)
{
return cpu_has_vmx_invpcid() && enable_ept;
}
/*
* Swap MSR entry in host/guest MSR entry array.
*/
static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
{
struct shared_msr_entry tmp;
tmp = vmx->guest_msrs[to];
vmx->guest_msrs[to] = vmx->guest_msrs[from];
vmx->guest_msrs[from] = tmp;
}
static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
{
unsigned long *msr_bitmap;
if (is_guest_mode(vcpu))
msr_bitmap = vmx_msr_bitmap_nested;
else if (vcpu->arch.apic_base & X2APIC_ENABLE) {
if (is_long_mode(vcpu))
msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
else
msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
} else {
if (is_long_mode(vcpu))
msr_bitmap = vmx_msr_bitmap_longmode;
else
msr_bitmap = vmx_msr_bitmap_legacy;
}
vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
}
/*
* Set up the vmcs to automatically save and restore system
* msrs. Don't touch the 64-bit msrs if the guest is in legacy
* mode, as fiddling with msrs is very expensive.
*/
static void setup_msrs(struct vcpu_vmx *vmx)
{
int save_nmsrs, index;
save_nmsrs = 0;
#ifdef CONFIG_X86_64
if (is_long_mode(&vmx->vcpu)) {
index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
if (index >= 0)
move_msr_up(vmx, index, save_nmsrs++);
index = __find_msr_index(vmx, MSR_LSTAR);
if (index >= 0)
move_msr_up(vmx, index, save_nmsrs++);
index = __find_msr_index(vmx, MSR_CSTAR);
if (index >= 0)
move_msr_up(vmx, index, save_nmsrs++);
index = __find_msr_index(vmx, MSR_TSC_AUX);
if (index >= 0 && guest_cpuid_has_rdtscp(&vmx->vcpu))
move_msr_up(vmx, index, save_nmsrs++);
/*
* MSR_STAR is only needed on long mode guests, and only
* if efer.sce is enabled.
*/
index = __find_msr_index(vmx, MSR_STAR);
if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
move_msr_up(vmx, index, save_nmsrs++);
}
#endif
index = __find_msr_index(vmx, MSR_EFER);
if (index >= 0 && update_transition_efer(vmx, index))
move_msr_up(vmx, index, save_nmsrs++);
vmx->save_nmsrs = save_nmsrs;
if (cpu_has_vmx_msr_bitmap())
vmx_set_msr_bitmap(&vmx->vcpu);
}
/*
* reads and returns guest's timestamp counter "register"
* guest_tsc = (host_tsc * tsc multiplier) >> 48 + tsc_offset
* -- Intel TSC Scaling for Virtualization White Paper, sec 1.3
*/
static u64 guest_read_tsc(struct kvm_vcpu *vcpu)
{
u64 host_tsc, tsc_offset;
host_tsc = rdtsc();
tsc_offset = vmcs_read64(TSC_OFFSET);
return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
}
/*
* Like guest_read_tsc, but always returns L1's notion of the timestamp
* counter, even if a nested guest (L2) is currently running.
*/
static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
u64 tsc_offset;
tsc_offset = is_guest_mode(vcpu) ?
to_vmx(vcpu)->nested.vmcs01_tsc_offset :
vmcs_read64(TSC_OFFSET);
return host_tsc + tsc_offset;
}
static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
{
return vmcs_read64(TSC_OFFSET);
}
/*
* writes 'offset' into guest's timestamp counter offset register
*/
static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
if (is_guest_mode(vcpu)) {
/*
* We're here if L1 chose not to trap WRMSR to TSC. According
* to the spec, this should set L1's TSC; The offset that L1
* set for L2 remains unchanged, and still needs to be added
* to the newly set TSC to get L2's TSC.
*/
struct vmcs12 *vmcs12;
to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
/* recalculate vmcs02.TSC_OFFSET: */
vmcs12 = get_vmcs12(vcpu);
vmcs_write64(TSC_OFFSET, offset +
(nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
vmcs12->tsc_offset : 0));
} else {
trace_kvm_write_tsc_offset(vcpu->vcpu_id,
vmcs_read64(TSC_OFFSET), offset);
vmcs_write64(TSC_OFFSET, offset);
}
}
static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
{
u64 offset = vmcs_read64(TSC_OFFSET);
vmcs_write64(TSC_OFFSET, offset + adjustment);
if (is_guest_mode(vcpu)) {
/* Even when running L2, the adjustment needs to apply to L1 */
to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
} else
trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
offset + adjustment);
}
static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
return best && (best->ecx & (1 << (X86_FEATURE_VMX & 31)));
}
/*
* nested_vmx_allowed() checks whether a guest should be allowed to use VMX
* instructions and MSRs (i.e., nested VMX). Nested VMX is disabled for
* all guests if the "nested" module option is off, and can also be disabled
* for a single guest by disabling its VMX cpuid bit.
*/
static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
{
return nested && guest_cpuid_has_vmx(vcpu);
}
/*
* nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
* returned for the various VMX controls MSRs when nested VMX is enabled.
* The same values should also be used to verify that vmcs12 control fields are
* valid during nested entry from L1 to L2.
* Each of these control msrs has a low and high 32-bit half: A low bit is on
* if the corresponding bit in the (32-bit) control field *must* be on, and a
* bit in the high half is on if the corresponding bit in the control field
* may be on. See also vmx_control_verify().
*/
static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
{
/*
* Note that as a general rule, the high half of the MSRs (bits in
* the control fields which may be 1) should be initialized by the
* intersection of the underlying hardware's MSR (i.e., features which
* can be supported) and the list of features we want to expose -
* because they are known to be properly supported in our code.
* Also, usually, the low half of the MSRs (bits which must be 1) can
* be set to 0, meaning that L1 may turn off any of these bits. The
* reason is that if one of these bits is necessary, it will appear
* in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
* fields of vmcs01 and vmcs02, will turn these bits off - and
* nested_vmx_exit_handled() will not pass related exits to L1.
* These rules have exceptions below.
*/
/* pin-based controls */
rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
vmx->nested.nested_vmx_pinbased_ctls_low,
vmx->nested.nested_vmx_pinbased_ctls_high);
vmx->nested.nested_vmx_pinbased_ctls_low |=
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
vmx->nested.nested_vmx_pinbased_ctls_high &=
PIN_BASED_EXT_INTR_MASK |
PIN_BASED_NMI_EXITING |
PIN_BASED_VIRTUAL_NMIS;
vmx->nested.nested_vmx_pinbased_ctls_high |=
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
PIN_BASED_VMX_PREEMPTION_TIMER;
if (vmx_cpu_uses_apicv(&vmx->vcpu))
vmx->nested.nested_vmx_pinbased_ctls_high |=
PIN_BASED_POSTED_INTR;
/* exit controls */
rdmsr(MSR_IA32_VMX_EXIT_CTLS,
vmx->nested.nested_vmx_exit_ctls_low,
vmx->nested.nested_vmx_exit_ctls_high);
vmx->nested.nested_vmx_exit_ctls_low =
VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
vmx->nested.nested_vmx_exit_ctls_high &=
#ifdef CONFIG_X86_64
VM_EXIT_HOST_ADDR_SPACE_SIZE |
#endif
VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
vmx->nested.nested_vmx_exit_ctls_high |=
VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
if (vmx_mpx_supported())
vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
/* We support free control of debug control saving. */
vmx->nested.nested_vmx_true_exit_ctls_low =
vmx->nested.nested_vmx_exit_ctls_low &
~VM_EXIT_SAVE_DEBUG_CONTROLS;
/* entry controls */
rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
vmx->nested.nested_vmx_entry_ctls_low,
vmx->nested.nested_vmx_entry_ctls_high);
vmx->nested.nested_vmx_entry_ctls_low =
VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
vmx->nested.nested_vmx_entry_ctls_high &=
#ifdef CONFIG_X86_64
VM_ENTRY_IA32E_MODE |
#endif
VM_ENTRY_LOAD_IA32_PAT;
vmx->nested.nested_vmx_entry_ctls_high |=
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
if (vmx_mpx_supported())
vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
/* We support free control of debug control loading. */
vmx->nested.nested_vmx_true_entry_ctls_low =
vmx->nested.nested_vmx_entry_ctls_low &
~VM_ENTRY_LOAD_DEBUG_CONTROLS;
/* cpu-based controls */
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
vmx->nested.nested_vmx_procbased_ctls_low,
vmx->nested.nested_vmx_procbased_ctls_high);
vmx->nested.nested_vmx_procbased_ctls_low =
CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
vmx->nested.nested_vmx_procbased_ctls_high &=
CPU_BASED_VIRTUAL_INTR_PENDING |
CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING |
#ifdef CONFIG_X86_64
CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
#endif
CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
/*
* We can allow some features even when not supported by the
* hardware. For example, L1 can specify an MSR bitmap - and we
* can use it to avoid exits to L1 - even when L0 runs L2
* without MSR bitmaps.
*/
vmx->nested.nested_vmx_procbased_ctls_high |=
CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
CPU_BASED_USE_MSR_BITMAPS;
/* We support free control of CR3 access interception. */
vmx->nested.nested_vmx_true_procbased_ctls_low =
vmx->nested.nested_vmx_procbased_ctls_low &
~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
/* secondary cpu-based controls */
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
vmx->nested.nested_vmx_secondary_ctls_low,
vmx->nested.nested_vmx_secondary_ctls_high);
vmx->nested.nested_vmx_secondary_ctls_low = 0;
vmx->nested.nested_vmx_secondary_ctls_high &=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_WBINVD_EXITING |
SECONDARY_EXEC_XSAVES |
SECONDARY_EXEC_PCOMMIT;
if (enable_ept) {
/* nested EPT: emulate EPT also to L1 */
vmx->nested.nested_vmx_secondary_ctls_high |=
SECONDARY_EXEC_ENABLE_EPT;
vmx->nested.nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
VMX_EPT_INVEPT_BIT;
vmx->nested.nested_vmx_ept_caps &= vmx_capability.ept;
/*
* For nested guests, we don't do anything specific
* for single context invalidation. Hence, only advertise
* support for global context invalidation.
*/
vmx->nested.nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT;
} else
vmx->nested.nested_vmx_ept_caps = 0;
if (enable_vpid)
vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT |
VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT;
else
vmx->nested.nested_vmx_vpid_caps = 0;
if (enable_unrestricted_guest)
vmx->nested.nested_vmx_secondary_ctls_high |=
SECONDARY_EXEC_UNRESTRICTED_GUEST;
/* miscellaneous data */
rdmsr(MSR_IA32_VMX_MISC,
vmx->nested.nested_vmx_misc_low,
vmx->nested.nested_vmx_misc_high);
vmx->nested.nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
vmx->nested.nested_vmx_misc_low |=
VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
VMX_MISC_ACTIVITY_HLT;
vmx->nested.nested_vmx_misc_high = 0;
}
static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
{
/*
* Bits 0 in high must be 0, and bits 1 in low must be 1.
*/
return ((control & high) | low) == control;
}
static inline u64 vmx_control_msr(u32 low, u32 high)
{
return low | ((u64)high << 32);
}
/* Returns 0 on success, non-0 otherwise. */
static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
switch (msr_index) {
case MSR_IA32_VMX_BASIC:
/*
* This MSR reports some information about VMX support. We
* should return information about the VMX we emulate for the
* guest, and the VMCS structure we give it - not about the
* VMX support of the underlying hardware.
*/
*pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS |
((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
(VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
break;
case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
case MSR_IA32_VMX_PINBASED_CTLS:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_pinbased_ctls_low,
vmx->nested.nested_vmx_pinbased_ctls_high);
break;
case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_true_procbased_ctls_low,
vmx->nested.nested_vmx_procbased_ctls_high);
break;
case MSR_IA32_VMX_PROCBASED_CTLS:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_procbased_ctls_low,
vmx->nested.nested_vmx_procbased_ctls_high);
break;
case MSR_IA32_VMX_TRUE_EXIT_CTLS:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_true_exit_ctls_low,
vmx->nested.nested_vmx_exit_ctls_high);
break;
case MSR_IA32_VMX_EXIT_CTLS:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_exit_ctls_low,
vmx->nested.nested_vmx_exit_ctls_high);
break;
case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_true_entry_ctls_low,
vmx->nested.nested_vmx_entry_ctls_high);
break;
case MSR_IA32_VMX_ENTRY_CTLS:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_entry_ctls_low,
vmx->nested.nested_vmx_entry_ctls_high);
break;
case MSR_IA32_VMX_MISC:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_misc_low,
vmx->nested.nested_vmx_misc_high);
break;
/*
* These MSRs specify bits which the guest must keep fixed (on or off)
* while L1 is in VMXON mode (in L1's root mode, or running an L2).
* We picked the standard core2 setting.
*/
#define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
#define VMXON_CR4_ALWAYSON X86_CR4_VMXE
case MSR_IA32_VMX_CR0_FIXED0:
*pdata = VMXON_CR0_ALWAYSON;
break;
case MSR_IA32_VMX_CR0_FIXED1:
*pdata = -1ULL;
break;
case MSR_IA32_VMX_CR4_FIXED0:
*pdata = VMXON_CR4_ALWAYSON;
break;
case MSR_IA32_VMX_CR4_FIXED1:
*pdata = -1ULL;
break;
case MSR_IA32_VMX_VMCS_ENUM:
*pdata = 0x2e; /* highest index: VMX_PREEMPTION_TIMER_VALUE */
break;
case MSR_IA32_VMX_PROCBASED_CTLS2:
*pdata = vmx_control_msr(
vmx->nested.nested_vmx_secondary_ctls_low,
vmx->nested.nested_vmx_secondary_ctls_high);
break;
case MSR_IA32_VMX_EPT_VPID_CAP:
/* Currently, no nested vpid support */
*pdata = vmx->nested.nested_vmx_ept_caps |
((u64)vmx->nested.nested_vmx_vpid_caps << 32);
break;
default:
return 1;
}
return 0;
}
/*
* Reads an msr value (of 'msr_index') into 'pdata'.
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct shared_msr_entry *msr;
switch (msr_info->index) {
#ifdef CONFIG_X86_64
case MSR_FS_BASE:
msr_info->data = vmcs_readl(GUEST_FS_BASE);
break;
case MSR_GS_BASE:
msr_info->data = vmcs_readl(GUEST_GS_BASE);
break;
case MSR_KERNEL_GS_BASE:
vmx_load_host_state(to_vmx(vcpu));
msr_info->data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
break;
#endif
case MSR_EFER:
return kvm_get_msr_common(vcpu, msr_info);
case MSR_IA32_TSC:
msr_info->data = guest_read_tsc(vcpu);
break;
case MSR_IA32_SYSENTER_CS:
msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
break;
case MSR_IA32_SYSENTER_EIP:
msr_info->data = vmcs_readl(GUEST_SYSENTER_EIP);
break;
case MSR_IA32_SYSENTER_ESP:
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
case MSR_IA32_BNDCFGS:
if (!vmx_mpx_supported())
return 1;
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break;
case MSR_IA32_FEATURE_CONTROL:
if (!nested_vmx_allowed(vcpu))
return 1;
msr_info->data = to_vmx(vcpu)->nested.msr_ia32_feature_control;
break;
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
if (!nested_vmx_allowed(vcpu))
return 1;
return vmx_get_vmx_msr(vcpu, msr_info->index, &msr_info->data);
case MSR_IA32_XSS:
if (!vmx_xsaves_supported())
return 1;
msr_info->data = vcpu->arch.ia32_xss;
break;
case MSR_TSC_AUX:
if (!guest_cpuid_has_rdtscp(vcpu))
return 1;
/* Otherwise falls through */
default:
msr = find_msr_entry(to_vmx(vcpu), msr_info->index);
if (msr) {
msr_info->data = msr->data;
break;
}
return kvm_get_msr_common(vcpu, msr_info);
}
return 0;
}
static void vmx_leave_nested(struct kvm_vcpu *vcpu);
/*
* Writes msr value into into the appropriate "register".
* Returns 0 on success, non-0 otherwise.
* Assumes vcpu_load() was already called.
*/
static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct shared_msr_entry *msr;
int ret = 0;
u32 msr_index = msr_info->index;
u64 data = msr_info->data;
switch (msr_index) {
case MSR_EFER:
ret = kvm_set_msr_common(vcpu, msr_info);
break;
#ifdef CONFIG_X86_64
case MSR_FS_BASE:
vmx_segment_cache_clear(vmx);
vmcs_writel(GUEST_FS_BASE, data);
break;
case MSR_GS_BASE:
vmx_segment_cache_clear(vmx);
vmcs_writel(GUEST_GS_BASE, data);
break;
case MSR_KERNEL_GS_BASE:
vmx_load_host_state(vmx);
vmx->msr_guest_kernel_gs_base = data;
break;
#endif
case MSR_IA32_SYSENTER_CS:
vmcs_write32(GUEST_SYSENTER_CS, data);
break;
case MSR_IA32_SYSENTER_EIP:
vmcs_writel(GUEST_SYSENTER_EIP, data);
break;
case MSR_IA32_SYSENTER_ESP:
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_BNDCFGS:
if (!vmx_mpx_supported())
return 1;
vmcs_write64(GUEST_BNDCFGS, data);
break;
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr_info);
break;
case MSR_IA32_CR_PAT:
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
return 1;
vmcs_write64(GUEST_IA32_PAT, data);
vcpu->arch.pat = data;
break;
}
ret = kvm_set_msr_common(vcpu, msr_info);
break;
case MSR_IA32_TSC_ADJUST:
ret = kvm_set_msr_common(vcpu, msr_info);
break;
case MSR_IA32_FEATURE_CONTROL:
if (!nested_vmx_allowed(vcpu) ||
(to_vmx(vcpu)->nested.msr_ia32_feature_control &
FEATURE_CONTROL_LOCKED && !msr_info->host_initiated))
return 1;
vmx->nested.msr_ia32_feature_control = data;
if (msr_info->host_initiated && data == 0)
vmx_leave_nested(vcpu);
break;
case MSR_IA32_VMX_BASIC ... MSR_IA32_VMX_VMFUNC:
return 1; /* they are read-only */
case MSR_IA32_XSS:
if (!vmx_xsaves_supported())
return 1;
/*
* The only supported bit as of Skylake is bit 8, but
* it is not supported on KVM.
*/
if (data != 0)
return 1;
vcpu->arch.ia32_xss = data;
if (vcpu->arch.ia32_xss != host_xss)
add_atomic_switch_msr(vmx, MSR_IA32_XSS,
vcpu->arch.ia32_xss, host_xss);
else
clear_atomic_switch_msr(vmx, MSR_IA32_XSS);
break;
case MSR_TSC_AUX:
if (!guest_cpuid_has_rdtscp(vcpu))
return 1;
/* Check reserved bit, higher 32 bits should be zero */
if ((data >> 32) != 0)
return 1;
/* Otherwise falls through */
default:
msr = find_msr_entry(vmx, msr_index);
if (msr) {
u64 old_msr_data = msr->data;
msr->data = data;
if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
preempt_disable();
ret = kvm_set_shared_msr(msr->index, msr->data,
msr->mask);
preempt_enable();
if (ret)
msr->data = old_msr_data;
}
break;
}
ret = kvm_set_msr_common(vcpu, msr_info);
}
return ret;
}
static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
__set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
switch (reg) {
case VCPU_REGS_RSP:
vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
break;
case VCPU_REGS_RIP:
vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
break;
case VCPU_EXREG_PDPTR:
if (enable_ept)
ept_save_pdptrs(vcpu);
break;
default:
break;
}
}
static __init int cpu_has_kvm_support(void)
{
return cpu_has_vmx();
}
static __init int vmx_disabled_by_bios(void)
{
u64 msr;
rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
if (msr & FEATURE_CONTROL_LOCKED) {
/* launched w/ TXT and VMX disabled */
if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
&& tboot_enabled())
return 1;
/* launched w/o TXT and VMX only enabled w/ TXT */
if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
&& (msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
&& !tboot_enabled()) {
printk(KERN_WARNING "kvm: disable TXT in the BIOS or "
"activate TXT before enabling KVM\n");
return 1;
}
/* launched w/o TXT and VMX disabled */
if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
&& !tboot_enabled())
return 1;
}
return 0;
}
static void kvm_cpu_vmxon(u64 addr)
{
asm volatile (ASM_VMX_VMXON_RAX
: : "a"(&addr), "m"(addr)
: "memory", "cc");
}
static int hardware_enable(void)
{
int cpu = raw_smp_processor_id();
u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
u64 old, test_bits;
if (cr4_read_shadow() & X86_CR4_VMXE)
return -EBUSY;
INIT_LIST_HEAD(&per_cpu(loaded_vmcss_on_cpu, cpu));
INIT_LIST_HEAD(&per_cpu(blocked_vcpu_on_cpu, cpu));
spin_lock_init(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
/*
* Now we can enable the vmclear operation in kdump
* since the loaded_vmcss_on_cpu list on this cpu
* has been initialized.
*
* Though the cpu is not in VMX operation now, there
* is no problem to enable the vmclear operation
* for the loaded_vmcss_on_cpu list is empty!
*/
crash_enable_local_vmclear(cpu);
rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
test_bits = FEATURE_CONTROL_LOCKED;
test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
if (tboot_enabled())
test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
if ((old & test_bits) != test_bits) {
/* enable and lock */
wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
}
cr4_set_bits(X86_CR4_VMXE);
if (vmm_exclusive) {
kvm_cpu_vmxon(phys_addr);
ept_sync_global();
}
native_store_gdt(this_cpu_ptr(&host_gdt));
return 0;
}
static void vmclear_local_loaded_vmcss(void)
{
int cpu = raw_smp_processor_id();
struct loaded_vmcs *v, *n;
list_for_each_entry_safe(v, n, &per_cpu(loaded_vmcss_on_cpu, cpu),
loaded_vmcss_on_cpu_link)
__loaded_vmcs_clear(v);
}
/* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
* tricks.
*/
static void kvm_cpu_vmxoff(void)
{
asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
}
static void hardware_disable(void)
{
if (vmm_exclusive) {
vmclear_local_loaded_vmcss();
kvm_cpu_vmxoff();
}
cr4_clear_bits(X86_CR4_VMXE);
}
static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
u32 msr, u32 *result)
{
u32 vmx_msr_low, vmx_msr_high;
u32 ctl = ctl_min | ctl_opt;
rdmsr(msr, vmx_msr_low, vmx_msr_high);
ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
ctl |= vmx_msr_low; /* bit == 1 in low word ==> must be one */
/* Ensure minimum (required) set of control bits are supported. */
if (ctl_min & ~ctl)
return -EIO;
*result = ctl;
return 0;
}
static __init bool allow_1_setting(u32 msr, u32 ctl)
{
u32 vmx_msr_low, vmx_msr_high;
rdmsr(msr, vmx_msr_low, vmx_msr_high);
return vmx_msr_high & ctl;
}
static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
{
u32 vmx_msr_low, vmx_msr_high;
u32 min, opt, min2, opt2;
u32 _pin_based_exec_control = 0;
u32 _cpu_based_exec_control = 0;
u32 _cpu_based_2nd_exec_control = 0;
u32 _vmexit_control = 0;
u32 _vmentry_control = 0;
min = CPU_BASED_HLT_EXITING |
#ifdef CONFIG_X86_64
CPU_BASED_CR8_LOAD_EXITING |
CPU_BASED_CR8_STORE_EXITING |
#endif
CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING |
CPU_BASED_USE_IO_BITMAPS |
CPU_BASED_MOV_DR_EXITING |
CPU_BASED_USE_TSC_OFFSETING |
CPU_BASED_MWAIT_EXITING |
CPU_BASED_MONITOR_EXITING |
CPU_BASED_INVLPG_EXITING |
CPU_BASED_RDPMC_EXITING;
opt = CPU_BASED_TPR_SHADOW |
CPU_BASED_USE_MSR_BITMAPS |
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
&_cpu_based_exec_control) < 0)
return -EIO;
#ifdef CONFIG_X86_64
if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
_cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
~CPU_BASED_CR8_STORE_EXITING;
#endif
if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
min2 = 0;
opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_WBINVD_EXITING |
SECONDARY_EXEC_ENABLE_VPID |
SECONDARY_EXEC_ENABLE_EPT |
SECONDARY_EXEC_UNRESTRICTED_GUEST |
SECONDARY_EXEC_PAUSE_LOOP_EXITING |
SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_ENABLE_INVPCID |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_SHADOW_VMCS |
SECONDARY_EXEC_XSAVES |
SECONDARY_EXEC_ENABLE_PML |
SECONDARY_EXEC_PCOMMIT |
SECONDARY_EXEC_TSC_SCALING;
if (adjust_vmx_controls(min2, opt2,
MSR_IA32_VMX_PROCBASED_CTLS2,
&_cpu_based_2nd_exec_control) < 0)
return -EIO;
}
#ifndef CONFIG_X86_64
if (!(_cpu_based_2nd_exec_control &
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
_cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
#endif
if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
_cpu_based_2nd_exec_control &= ~(
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
/* CR3 accesses and invlpg don't need to cause VM Exits when EPT
enabled */
_cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING |
CPU_BASED_INVLPG_EXITING);
rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
vmx_capability.ept, vmx_capability.vpid);
}
min = VM_EXIT_SAVE_DEBUG_CONTROLS;
#ifdef CONFIG_X86_64
min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
#endif
opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT |
VM_EXIT_ACK_INTR_ON_EXIT | VM_EXIT_CLEAR_BNDCFGS;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
&_vmexit_control) < 0)
return -EIO;
min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
&_pin_based_exec_control) < 0)
return -EIO;
if (!(_cpu_based_2nd_exec_control &
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) ||
!(_vmexit_control & VM_EXIT_ACK_INTR_ON_EXIT))
_pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
min = VM_ENTRY_LOAD_DEBUG_CONTROLS;
opt = VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
&_vmentry_control) < 0)
return -EIO;
rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
/* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
return -EIO;
#ifdef CONFIG_X86_64
/* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
if (vmx_msr_high & (1u<<16))
return -EIO;
#endif
/* Require Write-Back (WB) memory type for VMCS accesses. */
if (((vmx_msr_high >> 18) & 15) != 6)
return -EIO;
vmcs_conf->size = vmx_msr_high & 0x1fff;
vmcs_conf->order = get_order(vmcs_config.size);
vmcs_conf->revision_id = vmx_msr_low;
vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
vmcs_conf->vmexit_ctrl = _vmexit_control;
vmcs_conf->vmentry_ctrl = _vmentry_control;
cpu_has_load_ia32_efer =
allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
VM_ENTRY_LOAD_IA32_EFER)
&& allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
VM_EXIT_LOAD_IA32_EFER);
cpu_has_load_perf_global_ctrl =
allow_1_setting(MSR_IA32_VMX_ENTRY_CTLS,
VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
&& allow_1_setting(MSR_IA32_VMX_EXIT_CTLS,
VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL);
/*
* Some cpus support VM_ENTRY_(LOAD|SAVE)_IA32_PERF_GLOBAL_CTRL
* but due to arrata below it can't be used. Workaround is to use
* msr load mechanism to switch IA32_PERF_GLOBAL_CTRL.
*
* VM Exit May Incorrectly Clear IA32_PERF_GLOBAL_CTRL [34:32]
*
* AAK155 (model 26)
* AAP115 (model 30)
* AAT100 (model 37)
* BC86,AAY89,BD102 (model 44)
* BA97 (model 46)
*
*/
if (cpu_has_load_perf_global_ctrl && boot_cpu_data.x86 == 0x6) {
switch (boot_cpu_data.x86_model) {
case 26:
case 30:
case 37:
case 44:
case 46:
cpu_has_load_perf_global_ctrl = false;
printk_once(KERN_WARNING"kvm: VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL "
"does not work properly. Using workaround\n");
break;
default:
break;
}
}
if (cpu_has_xsaves)
rdmsrl(MSR_IA32_XSS, host_xss);
return 0;
}
static struct vmcs *alloc_vmcs_cpu(int cpu)
{
int node = cpu_to_node(cpu);
struct page *pages;
struct vmcs *vmcs;
pages = __alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
if (!pages)
return NULL;
vmcs = page_address(pages);
memset(vmcs, 0, vmcs_config.size);
vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
return vmcs;
}
static struct vmcs *alloc_vmcs(void)
{
return alloc_vmcs_cpu(raw_smp_processor_id());
}
static void free_vmcs(struct vmcs *vmcs)
{
free_pages((unsigned long)vmcs, vmcs_config.order);
}
/*
* Free a VMCS, but before that VMCLEAR it on the CPU where it was last loaded
*/
static void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
{
if (!loaded_vmcs->vmcs)
return;
loaded_vmcs_clear(loaded_vmcs);
free_vmcs(loaded_vmcs->vmcs);
loaded_vmcs->vmcs = NULL;
}
static void free_kvm_area(void)
{
int cpu;
for_each_possible_cpu(cpu) {
free_vmcs(per_cpu(vmxarea, cpu));
per_cpu(vmxarea, cpu) = NULL;
}
}
static void init_vmcs_shadow_fields(void)
{
int i, j;
/* No checks for read only fields yet */
for (i = j = 0; i < max_shadow_read_write_fields; i++) {
switch (shadow_read_write_fields[i]) {
case GUEST_BNDCFGS:
if (!vmx_mpx_supported())
continue;
break;
default:
break;
}
if (j < i)
shadow_read_write_fields[j] =
shadow_read_write_fields[i];
j++;
}
max_shadow_read_write_fields = j;
/* shadowed fields guest access without vmexit */
for (i = 0; i < max_shadow_read_write_fields; i++) {
clear_bit(shadow_read_write_fields[i],
vmx_vmwrite_bitmap);
clear_bit(shadow_read_write_fields[i],
vmx_vmread_bitmap);
}
for (i = 0; i < max_shadow_read_only_fields; i++)
clear_bit(shadow_read_only_fields[i],
vmx_vmread_bitmap);
}
static __init int alloc_kvm_area(void)
{
int cpu;
for_each_possible_cpu(cpu) {
struct vmcs *vmcs;
vmcs = alloc_vmcs_cpu(cpu);
if (!vmcs) {
free_kvm_area();
return -ENOMEM;
}
per_cpu(vmxarea, cpu) = vmcs;
}
return 0;
}
static bool emulation_required(struct kvm_vcpu *vcpu)
{
return emulate_invalid_guest_state && !guest_state_valid(vcpu);
}
static void fix_pmode_seg(struct kvm_vcpu *vcpu, int seg,
struct kvm_segment *save)
{
if (!emulate_invalid_guest_state) {
/*
* CS and SS RPL should be equal during guest entry according
* to VMX spec, but in reality it is not always so. Since vcpu
* is in the middle of the transition from real mode to
* protected mode it is safe to assume that RPL 0 is a good
* default value.
*/
if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
save->selector &= ~SEGMENT_RPL_MASK;
save->dpl = save->selector & SEGMENT_RPL_MASK;
save->s = 1;
}
vmx_set_segment(vcpu, save, seg);
}
static void enter_pmode(struct kvm_vcpu *vcpu)
{
unsigned long flags;
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
* Update real mode segment cache. It may be not up-to-date if sement
* register was written while vcpu was in a guest mode.
*/
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
vmx->rmode.vm86_active = 0;
vmx_segment_cache_clear(vmx);
vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
flags = vmcs_readl(GUEST_RFLAGS);
flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
vmcs_writel(GUEST_RFLAGS, flags);
vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
(vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
update_exception_bitmap(vcpu);
fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
}
static void fix_rmode_seg(int seg, struct kvm_segment *save)
{
const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
struct kvm_segment var = *save;
var.dpl = 0x3;
if (seg == VCPU_SREG_CS)
var.type = 0x3;
if (!emulate_invalid_guest_state) {
var.selector = var.base >> 4;
var.base = var.base & 0xffff0;
var.limit = 0xffff;
var.g = 0;
var.db = 0;
var.present = 1;
var.s = 1;
var.l = 0;
var.unusable = 0;
var.type = 0x3;
var.avl = 0;
if (save->base & 0xf)
printk_once(KERN_WARNING "kvm: segment base is not "
"paragraph aligned when entering "
"protected mode (seg=%d)", seg);
}
vmcs_write16(sf->selector, var.selector);
vmcs_write32(sf->base, var.base);
vmcs_write32(sf->limit, var.limit);
vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(&var));
}
static void enter_rmode(struct kvm_vcpu *vcpu)
{
unsigned long flags;
struct vcpu_vmx *vmx = to_vmx(vcpu);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS);
vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS);
vmx->rmode.vm86_active = 1;
/*
* Very old userspace does not call KVM_SET_TSS_ADDR before entering
* vcpu. Warn the user that an update is overdue.
*/
if (!vcpu->kvm->arch.tss_addr)
printk_once(KERN_WARNING "kvm: KVM_SET_TSS_ADDR need to be "
"called before entering vcpu\n");
vmx_segment_cache_clear(vmx);
vmcs_writel(GUEST_TR_BASE, vcpu->kvm->arch.tss_addr);
vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
flags = vmcs_readl(GUEST_RFLAGS);
vmx->rmode.save_rflags = flags;
flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
vmcs_writel(GUEST_RFLAGS, flags);
vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
update_exception_bitmap(vcpu);
fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]);
fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]);
fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]);
fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]);
fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]);
fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]);
kvm_mmu_reset_context(vcpu);
}
static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
if (!msr)
return;
/*
* Force kernel_gs_base reloading before EFER changes, as control
* of this msr depends on is_long_mode().
*/
vmx_load_host_state(to_vmx(vcpu));
vcpu->arch.efer = efer;
if (efer & EFER_LMA) {
vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
msr->data = efer;
} else {
vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
msr->data = efer & ~EFER_LME;
}
setup_msrs(vmx);
}
#ifdef CONFIG_X86_64
static void enter_lmode(struct kvm_vcpu *vcpu)
{
u32 guest_tr_ar;
vmx_segment_cache_clear(to_vmx(vcpu));
guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
if ((guest_tr_ar & VMX_AR_TYPE_MASK) != VMX_AR_TYPE_BUSY_64_TSS) {
pr_debug_ratelimited("%s: tss fixup for long mode. \n",
__func__);
vmcs_write32(GUEST_TR_AR_BYTES,
(guest_tr_ar & ~VMX_AR_TYPE_MASK)
| VMX_AR_TYPE_BUSY_64_TSS);
}
vmx_set_efer(vcpu, vcpu->arch.efer | EFER_LMA);
}
static void exit_lmode(struct kvm_vcpu *vcpu)
{
vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
vmx_set_efer(vcpu, vcpu->arch.efer & ~EFER_LMA);
}
#endif
static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid)
{
vpid_sync_context(vpid);
if (enable_ept) {
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
}
}
static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
{
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid);
}
static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
}
static void vmx_decache_cr3(struct kvm_vcpu *vcpu)
{
if (enable_ept && is_paging(vcpu))
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
}
static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
{
ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
}
static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
if (!test_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_dirty))
return;
if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
}
}
static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
}
__set_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_avail);
__set_bit(VCPU_EXREG_PDPTR,
(unsigned long *)&vcpu->arch.regs_dirty);
}
static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
unsigned long cr0,
struct kvm_vcpu *vcpu)
{
if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
vmx_decache_cr3(vcpu);
if (!(cr0 & X86_CR0_PG)) {
/* From paging/starting to nonpaging */
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
(CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING));
vcpu->arch.cr0 = cr0;
vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
} else if (!is_paging(vcpu)) {
/* From nonpaging to paging */
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
~(CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING));
vcpu->arch.cr0 = cr0;
vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
}
if (!(cr0 & X86_CR0_WP))
*hw_cr0 &= ~X86_CR0_WP;
}
static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long hw_cr0;
hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK);
if (enable_unrestricted_guest)
hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
else {
hw_cr0 |= KVM_VM_CR0_ALWAYS_ON;
if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
enter_pmode(vcpu);
if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
enter_rmode(vcpu);
}
#ifdef CONFIG_X86_64
if (vcpu->arch.efer & EFER_LME) {
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
enter_lmode(vcpu);
if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
exit_lmode(vcpu);
}
#endif
if (enable_ept)
ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
if (!vcpu->fpu_active)
hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
vmcs_writel(CR0_READ_SHADOW, cr0);
vmcs_writel(GUEST_CR0, hw_cr0);
vcpu->arch.cr0 = cr0;
/* depends on vcpu->arch.cr0 to be set to a new value */
vmx->emulation_required = emulation_required(vcpu);
}
static u64 construct_eptp(unsigned long root_hpa)
{
u64 eptp;
/* TODO write the value reading from MSR */
eptp = VMX_EPT_DEFAULT_MT |
VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
if (enable_ept_ad_bits)
eptp |= VMX_EPT_AD_ENABLE_BIT;
eptp |= (root_hpa & PAGE_MASK);
return eptp;
}
static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
unsigned long guest_cr3;
u64 eptp;
guest_cr3 = cr3;
if (enable_ept) {
eptp = construct_eptp(cr3);
vmcs_write64(EPT_POINTER, eptp);
if (is_paging(vcpu) || is_guest_mode(vcpu))
guest_cr3 = kvm_read_cr3(vcpu);
else
guest_cr3 = vcpu->kvm->arch.ept_identity_map_addr;
ept_load_pdptrs(vcpu);
}
vmx_flush_tlb(vcpu);
vmcs_writel(GUEST_CR3, guest_cr3);
}
static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
/*
* Pass through host's Machine Check Enable value to hw_cr4, which
* is in force while we are in guest mode. Do not let guests control
* this bit, even if host CR4.MCE == 0.
*/
unsigned long hw_cr4 =
(cr4_read_shadow() & X86_CR4_MCE) |
(cr4 & ~X86_CR4_MCE) |
(to_vmx(vcpu)->rmode.vm86_active ?
KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
if (cr4 & X86_CR4_VMXE) {
/*
* To use VMXON (and later other VMX instructions), a guest
* must first be able to turn on cr4.VMXE (see handle_vmon()).
* So basically the check on whether to allow nested VMX
* is here.
*/
if (!nested_vmx_allowed(vcpu))
return 1;
}
if (to_vmx(vcpu)->nested.vmxon &&
((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON))
return 1;
vcpu->arch.cr4 = cr4;
if (enable_ept) {
if (!is_paging(vcpu)) {
hw_cr4 &= ~X86_CR4_PAE;
hw_cr4 |= X86_CR4_PSE;
} else if (!(cr4 & X86_CR4_PAE)) {
hw_cr4 &= ~X86_CR4_PAE;
}
}
if (!enable_unrestricted_guest && !is_paging(vcpu))
/*
* SMEP/SMAP is disabled if CPU is in non-paging mode in
* hardware. However KVM always uses paging mode without
* unrestricted guest.
* To emulate this behavior, SMEP/SMAP needs to be manually
* disabled when guest switches to non-paging mode.
*/
hw_cr4 &= ~(X86_CR4_SMEP | X86_CR4_SMAP);
vmcs_writel(CR4_READ_SHADOW, cr4);
vmcs_writel(GUEST_CR4, hw_cr4);
return 0;
}
static void vmx_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 ar;
if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
*var = vmx->rmode.segs[seg];
if (seg == VCPU_SREG_TR
|| var->selector == vmx_read_guest_seg_selector(vmx, seg))
return;
var->base = vmx_read_guest_seg_base(vmx, seg);
var->selector = vmx_read_guest_seg_selector(vmx, seg);
return;
}
var->base = vmx_read_guest_seg_base(vmx, seg);
var->limit = vmx_read_guest_seg_limit(vmx, seg);
var->selector = vmx_read_guest_seg_selector(vmx, seg);
ar = vmx_read_guest_seg_ar(vmx, seg);
var->unusable = (ar >> 16) & 1;
var->type = ar & 15;
var->s = (ar >> 4) & 1;
var->dpl = (ar >> 5) & 3;
/*
* Some userspaces do not preserve unusable property. Since usable
* segment has to be present according to VMX spec we can use present
* property to amend userspace bug by making unusable segment always
* nonpresent. vmx_segment_access_rights() already marks nonpresent
* segment as unusable.
*/
var->present = !var->unusable;
var->avl = (ar >> 12) & 1;
var->l = (ar >> 13) & 1;
var->db = (ar >> 14) & 1;
var->g = (ar >> 15) & 1;
}
static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
struct kvm_segment s;
if (to_vmx(vcpu)->rmode.vm86_active) {
vmx_get_segment(vcpu, &s, seg);
return s.base;
}
return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
}
static int vmx_get_cpl(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (unlikely(vmx->rmode.vm86_active))
return 0;
else {
int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS);
return VMX_AR_DPL(ar);
}
}
static u32 vmx_segment_access_rights(struct kvm_segment *var)
{
u32 ar;
if (var->unusable || !var->present)
ar = 1 << 16;
else {
ar = var->type & 15;
ar |= (var->s & 1) << 4;
ar |= (var->dpl & 3) << 5;
ar |= (var->present & 1) << 7;
ar |= (var->avl & 1) << 12;
ar |= (var->l & 1) << 13;
ar |= (var->db & 1) << 14;
ar |= (var->g & 1) << 15;
}
return ar;
}
static void vmx_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
vmx_segment_cache_clear(vmx);
if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) {
vmx->rmode.segs[seg] = *var;
if (seg == VCPU_SREG_TR)
vmcs_write16(sf->selector, var->selector);
else if (var->s)
fix_rmode_seg(seg, &vmx->rmode.segs[seg]);
goto out;
}
vmcs_writel(sf->base, var->base);
vmcs_write32(sf->limit, var->limit);
vmcs_write16(sf->selector, var->selector);
/*
* Fix the "Accessed" bit in AR field of segment registers for older
* qemu binaries.
* IA32 arch specifies that at the time of processor reset the
* "Accessed" bit in the AR field of segment registers is 1. And qemu
* is setting it to 0 in the userland code. This causes invalid guest
* state vmexit when "unrestricted guest" mode is turned on.
* Fix for this setup issue in cpu_reset is being pushed in the qemu
* tree. Newer qemu binaries with that qemu fix would not need this
* kvm hack.
*/
if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
var->type |= 0x1; /* Accessed */
vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
out:
vmx->emulation_required = emulation_required(vcpu);
}
static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
{
u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS);
*db = (ar >> 14) & 1;
*l = (ar >> 13) & 1;
}
static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
dt->address = vmcs_readl(GUEST_IDTR_BASE);
}
static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
vmcs_writel(GUEST_IDTR_BASE, dt->address);
}
static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
dt->address = vmcs_readl(GUEST_GDTR_BASE);
}
static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
vmcs_writel(GUEST_GDTR_BASE, dt->address);
}
static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
{
struct kvm_segment var;
u32 ar;
vmx_get_segment(vcpu, &var, seg);
var.dpl = 0x3;
if (seg == VCPU_SREG_CS)
var.type = 0x3;
ar = vmx_segment_access_rights(&var);
if (var.base != (var.selector << 4))
return false;
if (var.limit != 0xffff)
return false;
if (ar != 0xf3)
return false;
return true;
}
static bool code_segment_valid(struct kvm_vcpu *vcpu)
{
struct kvm_segment cs;
unsigned int cs_rpl;
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
cs_rpl = cs.selector & SEGMENT_RPL_MASK;
if (cs.unusable)
return false;
if (~cs.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_ACCESSES_MASK))
return false;
if (!cs.s)
return false;
if (cs.type & VMX_AR_TYPE_WRITEABLE_MASK) {
if (cs.dpl > cs_rpl)
return false;
} else {
if (cs.dpl != cs_rpl)
return false;
}
if (!cs.present)
return false;
/* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
return true;
}
static bool stack_segment_valid(struct kvm_vcpu *vcpu)
{
struct kvm_segment ss;
unsigned int ss_rpl;
vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
ss_rpl = ss.selector & SEGMENT_RPL_MASK;
if (ss.unusable)
return true;
if (ss.type != 3 && ss.type != 7)
return false;
if (!ss.s)
return false;
if (ss.dpl != ss_rpl) /* DPL != RPL */
return false;
if (!ss.present)
return false;
return true;
}
static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
{
struct kvm_segment var;
unsigned int rpl;
vmx_get_segment(vcpu, &var, seg);
rpl = var.selector & SEGMENT_RPL_MASK;
if (var.unusable)
return true;
if (!var.s)
return false;
if (!var.present)
return false;
if (~var.type & (VMX_AR_TYPE_CODE_MASK|VMX_AR_TYPE_WRITEABLE_MASK)) {
if (var.dpl < rpl) /* DPL < RPL */
return false;
}
/* TODO: Add other members to kvm_segment_field to allow checking for other access
* rights flags
*/
return true;
}
static bool tr_valid(struct kvm_vcpu *vcpu)
{
struct kvm_segment tr;
vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
if (tr.unusable)
return false;
if (tr.selector & SEGMENT_TI_MASK) /* TI = 1 */
return false;
if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
return false;
if (!tr.present)
return false;
return true;
}
static bool ldtr_valid(struct kvm_vcpu *vcpu)
{
struct kvm_segment ldtr;
vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
if (ldtr.unusable)
return true;
if (ldtr.selector & SEGMENT_TI_MASK) /* TI = 1 */
return false;
if (ldtr.type != 2)
return false;
if (!ldtr.present)
return false;
return true;
}
static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
{
struct kvm_segment cs, ss;
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
return ((cs.selector & SEGMENT_RPL_MASK) ==
(ss.selector & SEGMENT_RPL_MASK));
}
/*
* Check if guest state is valid. Returns true if valid, false if
* not.
* We assume that registers are always usable
*/
static bool guest_state_valid(struct kvm_vcpu *vcpu)
{
if (enable_unrestricted_guest)
return true;
/* real mode guest state checks */
if (!is_protmode(vcpu) || (vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
return false;
if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
return false;
if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
return false;
if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
return false;
if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
return false;
if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
return false;
} else {
/* protected mode guest state checks */
if (!cs_ss_rpl_check(vcpu))
return false;
if (!code_segment_valid(vcpu))
return false;
if (!stack_segment_valid(vcpu))
return false;
if (!data_segment_valid(vcpu, VCPU_SREG_DS))
return false;
if (!data_segment_valid(vcpu, VCPU_SREG_ES))
return false;
if (!data_segment_valid(vcpu, VCPU_SREG_FS))
return false;
if (!data_segment_valid(vcpu, VCPU_SREG_GS))
return false;
if (!tr_valid(vcpu))
return false;
if (!ldtr_valid(vcpu))
return false;
}
/* TODO:
* - Add checks on RIP
* - Add checks on RFLAGS
*/
return true;
}
static int init_rmode_tss(struct kvm *kvm)
{
gfn_t fn;
u16 data = 0;
int idx, r;
idx = srcu_read_lock(&kvm->srcu);
fn = kvm->arch.tss_addr >> PAGE_SHIFT;
r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
if (r < 0)
goto out;
data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
r = kvm_write_guest_page(kvm, fn++, &data,
TSS_IOPB_BASE_OFFSET, sizeof(u16));
if (r < 0)
goto out;
r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
if (r < 0)
goto out;
r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
if (r < 0)
goto out;
data = ~0;
r = kvm_write_guest_page(kvm, fn, &data,
RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
sizeof(u8));
out:
srcu_read_unlock(&kvm->srcu, idx);
return r;
}
static int init_rmode_identity_map(struct kvm *kvm)
{
int i, idx, r = 0;
pfn_t identity_map_pfn;
u32 tmp;
if (!enable_ept)
return 0;
/* Protect kvm->arch.ept_identity_pagetable_done. */
mutex_lock(&kvm->slots_lock);
if (likely(kvm->arch.ept_identity_pagetable_done))
goto out2;
identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
r = alloc_identity_pagetable(kvm);
if (r < 0)
goto out2;
idx = srcu_read_lock(&kvm->srcu);
r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
if (r < 0)
goto out;
/* Set up identity-mapping pagetable for EPT in real mode */
for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
_PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
r = kvm_write_guest_page(kvm, identity_map_pfn,
&tmp, i * sizeof(tmp), sizeof(tmp));
if (r < 0)
goto out;
}
kvm->arch.ept_identity_pagetable_done = true;
out:
srcu_read_unlock(&kvm->srcu, idx);
out2:
mutex_unlock(&kvm->slots_lock);
return r;
}
static void seg_setup(int seg)
{
const struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
unsigned int ar;
vmcs_write16(sf->selector, 0);
vmcs_writel(sf->base, 0);
vmcs_write32(sf->limit, 0xffff);
ar = 0x93;
if (seg == VCPU_SREG_CS)
ar |= 0x08; /* code segment */
vmcs_write32(sf->ar_bytes, ar);
}
static int alloc_apic_access_page(struct kvm *kvm)
{
struct page *page;
int r = 0;
mutex_lock(&kvm->slots_lock);
if (kvm->arch.apic_access_page_done)
goto out;
r = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
if (r)
goto out;
page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
if (is_error_page(page)) {
r = -EFAULT;
goto out;
}
/*
* Do not pin the page in memory, so that memory hot-unplug
* is able to migrate it.
*/
put_page(page);
kvm->arch.apic_access_page_done = true;
out:
mutex_unlock(&kvm->slots_lock);
return r;
}
static int alloc_identity_pagetable(struct kvm *kvm)
{
/* Called with kvm->slots_lock held. */
int r = 0;
BUG_ON(kvm->arch.ept_identity_pagetable_done);
r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT,
kvm->arch.ept_identity_map_addr, PAGE_SIZE);
return r;
}
static int allocate_vpid(void)
{
int vpid;
if (!enable_vpid)
return 0;
spin_lock(&vmx_vpid_lock);
vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
if (vpid < VMX_NR_VPIDS)
__set_bit(vpid, vmx_vpid_bitmap);
else
vpid = 0;
spin_unlock(&vmx_vpid_lock);
return vpid;
}
static void free_vpid(int vpid)
{
if (!enable_vpid || vpid == 0)
return;
spin_lock(&vmx_vpid_lock);
__clear_bit(vpid, vmx_vpid_bitmap);
spin_unlock(&vmx_vpid_lock);
}
#define MSR_TYPE_R 1
#define MSR_TYPE_W 2
static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
u32 msr, int type)
{
int f = sizeof(unsigned long);
if (!cpu_has_vmx_msr_bitmap())
return;
/*
* See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
* have the write-low and read-high bitmap offsets the wrong way round.
* We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
*/
if (msr <= 0x1fff) {
if (type & MSR_TYPE_R)
/* read-low */
__clear_bit(msr, msr_bitmap + 0x000 / f);
if (type & MSR_TYPE_W)
/* write-low */
__clear_bit(msr, msr_bitmap + 0x800 / f);
} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
msr &= 0x1fff;
if (type & MSR_TYPE_R)
/* read-high */
__clear_bit(msr, msr_bitmap + 0x400 / f);
if (type & MSR_TYPE_W)
/* write-high */
__clear_bit(msr, msr_bitmap + 0xc00 / f);
}
}
static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
u32 msr, int type)
{
int f = sizeof(unsigned long);
if (!cpu_has_vmx_msr_bitmap())
return;
/*
* See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
* have the write-low and read-high bitmap offsets the wrong way round.
* We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
*/
if (msr <= 0x1fff) {
if (type & MSR_TYPE_R)
/* read-low */
__set_bit(msr, msr_bitmap + 0x000 / f);
if (type & MSR_TYPE_W)
/* write-low */
__set_bit(msr, msr_bitmap + 0x800 / f);
} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
msr &= 0x1fff;
if (type & MSR_TYPE_R)
/* read-high */
__set_bit(msr, msr_bitmap + 0x400 / f);
if (type & MSR_TYPE_W)
/* write-high */
__set_bit(msr, msr_bitmap + 0xc00 / f);
}
}
/*
* If a msr is allowed by L0, we should check whether it is allowed by L1.
* The corresponding bit will be cleared unless both of L0 and L1 allow it.
*/
static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
unsigned long *msr_bitmap_nested,
u32 msr, int type)
{
int f = sizeof(unsigned long);
if (!cpu_has_vmx_msr_bitmap()) {
WARN_ON(1);
return;
}
/*
* See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
* have the write-low and read-high bitmap offsets the wrong way round.
* We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
*/
if (msr <= 0x1fff) {
if (type & MSR_TYPE_R &&
!test_bit(msr, msr_bitmap_l1 + 0x000 / f))
/* read-low */
__clear_bit(msr, msr_bitmap_nested + 0x000 / f);
if (type & MSR_TYPE_W &&
!test_bit(msr, msr_bitmap_l1 + 0x800 / f))
/* write-low */
__clear_bit(msr, msr_bitmap_nested + 0x800 / f);
} else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
msr &= 0x1fff;
if (type & MSR_TYPE_R &&
!test_bit(msr, msr_bitmap_l1 + 0x400 / f))
/* read-high */
__clear_bit(msr, msr_bitmap_nested + 0x400 / f);
if (type & MSR_TYPE_W &&
!test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
/* write-high */
__clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
}
}
static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
{
if (!longmode_only)
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy,
msr, MSR_TYPE_R | MSR_TYPE_W);
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode,
msr, MSR_TYPE_R | MSR_TYPE_W);
}
static void vmx_enable_intercept_msr_read_x2apic(u32 msr)
{
__vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
msr, MSR_TYPE_R);
__vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
msr, MSR_TYPE_R);
}
static void vmx_disable_intercept_msr_read_x2apic(u32 msr)
{
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
msr, MSR_TYPE_R);
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
msr, MSR_TYPE_R);
}
static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
{
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
msr, MSR_TYPE_W);
__vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
msr, MSR_TYPE_W);
}
static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu)
{
return enable_apicv && lapic_in_kernel(vcpu);
}
static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int max_irr;
void *vapic_page;
u16 status;
if (vmx->nested.pi_desc &&
vmx->nested.pi_pending) {
vmx->nested.pi_pending = false;
if (!pi_test_and_clear_on(vmx->nested.pi_desc))
return 0;
max_irr = find_last_bit(
(unsigned long *)vmx->nested.pi_desc->pir, 256);
if (max_irr == 256)
return 0;
vapic_page = kmap(vmx->nested.virtual_apic_page);
if (!vapic_page) {
WARN_ON(1);
return -ENOMEM;
}
__kvm_apic_update_irr(vmx->nested.pi_desc->pir, vapic_page);
kunmap(vmx->nested.virtual_apic_page);
status = vmcs_read16(GUEST_INTR_STATUS);
if ((u8)max_irr > ((u8)status & 0xff)) {
status &= ~0xff;
status |= (u8)max_irr;
vmcs_write16(GUEST_INTR_STATUS, status);
}
}
return 0;
}
static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_SMP
if (vcpu->mode == IN_GUEST_MODE) {
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
* Currently, we don't support urgent interrupt,
* all interrupts are recognized as non-urgent
* interrupt, so we cannot post interrupts when
* 'SN' is set.
*
* If the vcpu is in guest mode, it means it is
* running instead of being scheduled out and
* waiting in the run queue, and that's the only
* case when 'SN' is set currently, warning if
* 'SN' is set.
*/
WARN_ON_ONCE(pi_test_sn(&vmx->pi_desc));
apic->send_IPI_mask(get_cpu_mask(vcpu->cpu),
POSTED_INTR_VECTOR);
return true;
}
#endif
return false;
}
static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
int vector)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (is_guest_mode(vcpu) &&
vector == vmx->nested.posted_intr_nv) {
/* the PIR and ON have been set by L1. */
kvm_vcpu_trigger_posted_interrupt(vcpu);
/*
* If a posted intr is not recognized by hardware,
* we will accomplish it in the next vmentry.
*/
vmx->nested.pi_pending = true;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0;
}
return -1;
}
/*
* Send interrupt to vcpu via posted interrupt way.
* 1. If target vcpu is running(non-root mode), send posted interrupt
* notification to vcpu and hardware will sync PIR to vIRR atomically.
* 2. If target vcpu isn't running(root mode), kick it to pick up the
* interrupt from PIR in next vmentry.
*/
static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int r;
r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
if (!r)
return;
if (pi_test_and_set_pir(vector, &vmx->pi_desc))
return;
r = pi_test_and_set_on(&vmx->pi_desc);
kvm_make_request(KVM_REQ_EVENT, vcpu);
if (r || !kvm_vcpu_trigger_posted_interrupt(vcpu))
kvm_vcpu_kick(vcpu);
}
static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!pi_test_and_clear_on(&vmx->pi_desc))
return;
kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
}
static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu)
{
return;
}
/*
* Set up the vmcs's constant host-state fields, i.e., host-state fields that
* will not change in the lifetime of the guest.
* Note that host-state that does change is set elsewhere. E.g., host-state
* that is set differently for each CPU is set in vmx_vcpu_load(), not here.
*/
static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
{
u32 low32, high32;
unsigned long tmpl;
struct desc_ptr dt;
unsigned long cr4;
vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
/* Save the most likely value for this task's CR4 in the VMCS. */
cr4 = cr4_read_shadow();
vmcs_writel(HOST_CR4, cr4); /* 22.2.3, 22.2.5 */
vmx->host_state.vmcs_host_cr4 = cr4;
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
#ifdef CONFIG_X86_64
/*
* Load null selectors, so we can avoid reloading them in
* __vmx_load_host_state(), in case userspace uses the null selectors
* too (the expected case).
*/
vmcs_write16(HOST_DS_SELECTOR, 0);
vmcs_write16(HOST_ES_SELECTOR, 0);
#else
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
#endif
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
native_store_idt(&dt);
vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
vmx->host_idt_base = dt.address;
vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
rdmsrl(MSR_IA32_SYSENTER_EIP, tmpl);
vmcs_writel(HOST_IA32_SYSENTER_EIP, tmpl); /* 22.2.3 */
if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
rdmsr(MSR_IA32_CR_PAT, low32, high32);
vmcs_write64(HOST_IA32_PAT, low32 | ((u64) high32 << 32));
}
}
static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
{
vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
if (enable_ept)
vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
if (is_guest_mode(&vmx->vcpu))
vmx->vcpu.arch.cr4_guest_owned_bits &=
~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask;
vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
}
static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
{
u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
if (!vmx_cpu_uses_apicv(&vmx->vcpu))
pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
return pin_based_exec_ctrl;
}
static u32 vmx_exec_control(struct vcpu_vmx *vmx)
{
u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)
exec_control &= ~CPU_BASED_MOV_DR_EXITING;
if (!cpu_need_tpr_shadow(&vmx->vcpu)) {
exec_control &= ~CPU_BASED_TPR_SHADOW;
#ifdef CONFIG_X86_64
exec_control |= CPU_BASED_CR8_STORE_EXITING |
CPU_BASED_CR8_LOAD_EXITING;
#endif
}
if (!enable_ept)
exec_control |= CPU_BASED_CR3_STORE_EXITING |
CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_INVLPG_EXITING;
return exec_control;
}
static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
{
u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
if (!cpu_need_virtualize_apic_accesses(&vmx->vcpu))
exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
if (vmx->vpid == 0)
exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
if (!enable_ept) {
exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
enable_unrestricted_guest = 0;
/* Enable INVPCID for non-ept guests may cause performance regression. */
exec_control &= ~SECONDARY_EXEC_ENABLE_INVPCID;
}
if (!enable_unrestricted_guest)
exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
if (!ple_gap)
exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
if (!vmx_cpu_uses_apicv(&vmx->vcpu))
exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
/* SECONDARY_EXEC_SHADOW_VMCS is enabled when L1 executes VMPTRLD
(handle_vmptrld).
We can NOT enable shadow_vmcs here because we don't have yet
a current VMCS12
*/
exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
if (!enable_pml)
exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
/* Currently, we allow L1 guest to directly run pcommit instruction. */
exec_control &= ~SECONDARY_EXEC_PCOMMIT;
return exec_control;
}
static void ept_set_mmio_spte_mask(void)
{
/*
* EPT Misconfigurations can be generated if the value of bits 2:0
* of an EPT paging-structure entry is 110b (write/execute).
* Also, magic bits (0x3ull << 62) is set to quickly identify mmio
* spte.
*/
kvm_mmu_set_mmio_spte_mask((0x3ull << 62) | 0x6ull);
}
#define VMX_XSS_EXIT_BITMAP 0
/*
* Sets up the vmcs for emulated real mode.
*/
static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
{
#ifdef CONFIG_X86_64
unsigned long a;
#endif
int i;
/* I/O */
vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
if (enable_shadow_vmcs) {
vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
}
if (cpu_has_vmx_msr_bitmap())
vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
/* Control */
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
if (cpu_has_secondary_exec_ctrls())
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
vmx_secondary_exec_control(vmx));
if (vmx_cpu_uses_apicv(&vmx->vcpu)) {
vmcs_write64(EOI_EXIT_BITMAP0, 0);
vmcs_write64(EOI_EXIT_BITMAP1, 0);
vmcs_write64(EOI_EXIT_BITMAP2, 0);
vmcs_write64(EOI_EXIT_BITMAP3, 0);
vmcs_write16(GUEST_INTR_STATUS, 0);
vmcs_write64(POSTED_INTR_NV, POSTED_INTR_VECTOR);
vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
}
if (ple_gap) {
vmcs_write32(PLE_GAP, ple_gap);
vmx->ple_window = ple_window;
vmx->ple_window_dirty = true;
}
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
vmcs_write32(CR3_TARGET_COUNT, 0); /* 22.2.1 */
vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
vmx_set_constant_host_state(vmx);
#ifdef CONFIG_X86_64
rdmsrl(MSR_FS_BASE, a);
vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
rdmsrl(MSR_GS_BASE, a);
vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
#else
vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
#endif
vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
u32 index = vmx_msr_index[i];
u32 data_low, data_high;
int j = vmx->nmsrs;
if (rdmsr_safe(index, &data_low, &data_high) < 0)
continue;
if (wrmsr_safe(index, data_low, data_high) < 0)
continue;
vmx->guest_msrs[j].index = i;
vmx->guest_msrs[j].data = 0;
vmx->guest_msrs[j].mask = -1ull;
++vmx->nmsrs;
}
vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
/* 22.2.1, 20.8.1 */
vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl);
vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
set_cr4_guest_host_mask(vmx);
if (vmx_xsaves_supported())
vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
return 0;
}
static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct msr_data apic_base_msr;
u64 cr0;
vmx->rmode.vm86_active = 0;
vmx->soft_vnmi_blocked = 0;
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
kvm_set_cr8(vcpu, 0);
if (!init_event) {
apic_base_msr.data = APIC_DEFAULT_PHYS_BASE |
MSR_IA32_APICBASE_ENABLE;
if (kvm_vcpu_is_reset_bsp(vcpu))
apic_base_msr.data |= MSR_IA32_APICBASE_BSP;
apic_base_msr.host_initiated = true;
kvm_set_apic_base(vcpu, &apic_base_msr);
}
vmx_segment_cache_clear(vmx);
seg_setup(VCPU_SREG_CS);
vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
vmcs_write32(GUEST_CS_BASE, 0xffff0000);
seg_setup(VCPU_SREG_DS);
seg_setup(VCPU_SREG_ES);
seg_setup(VCPU_SREG_FS);
seg_setup(VCPU_SREG_GS);
seg_setup(VCPU_SREG_SS);
vmcs_write16(GUEST_TR_SELECTOR, 0);
vmcs_writel(GUEST_TR_BASE, 0);
vmcs_write32(GUEST_TR_LIMIT, 0xffff);
vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
vmcs_write16(GUEST_LDTR_SELECTOR, 0);
vmcs_writel(GUEST_LDTR_BASE, 0);
vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
if (!init_event) {
vmcs_write32(GUEST_SYSENTER_CS, 0);
vmcs_writel(GUEST_SYSENTER_ESP, 0);
vmcs_writel(GUEST_SYSENTER_EIP, 0);
vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
}
vmcs_writel(GUEST_RFLAGS, 0x02);
kvm_rip_write(vcpu, 0xfff0);
vmcs_writel(GUEST_GDTR_BASE, 0);
vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
vmcs_writel(GUEST_IDTR_BASE, 0);
vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE);
vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
setup_msrs(vmx);
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0); /* 22.2.1 */
if (cpu_has_vmx_tpr_shadow() && !init_event) {
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
if (cpu_need_tpr_shadow(vcpu))
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
__pa(vcpu->arch.apic->regs));
vmcs_write32(TPR_THRESHOLD, 0);
}
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
if (vmx_cpu_uses_apicv(vcpu))
memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
if (vmx->vpid != 0)
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
vmx_set_cr0(vcpu, cr0); /* enter rmode */
vmx->vcpu.arch.cr0 = cr0;
vmx_set_cr4(vcpu, 0);
vmx_set_efer(vcpu, 0);
vmx_fpu_activate(vcpu);
update_exception_bitmap(vcpu);
vpid_sync_context(vmx->vpid);
}
/*
* In nested virtualization, check if L1 asked to exit on external interrupts.
* For most existing hypervisors, this will always return true.
*/
static bool nested_exit_on_intr(struct kvm_vcpu *vcpu)
{
return get_vmcs12(vcpu)->pin_based_vm_exec_control &
PIN_BASED_EXT_INTR_MASK;
}
/*
* In nested virtualization, check if L1 has set
* VM_EXIT_ACK_INTR_ON_EXIT
*/
static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
{
return get_vmcs12(vcpu)->vm_exit_controls &
VM_EXIT_ACK_INTR_ON_EXIT;
}
static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
{
return get_vmcs12(vcpu)->pin_based_vm_exec_control &
PIN_BASED_NMI_EXITING;
}
static void enable_irq_window(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
}
static void enable_nmi_window(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;
if (!cpu_has_virtual_nmis() ||
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
enable_irq_window(vcpu);
return;
}
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
}
static void vmx_inject_irq(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
uint32_t intr;
int irq = vcpu->arch.interrupt.nr;
trace_kvm_inj_virq(irq);
++vcpu->stat.irq_injections;
if (vmx->rmode.vm86_active) {
int inc_eip = 0;
if (vcpu->arch.interrupt.soft)
inc_eip = vcpu->arch.event_exit_inst_len;
if (kvm_inject_realmode_interrupt(vcpu, irq, inc_eip) != EMULATE_DONE)
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return;
}
intr = irq | INTR_INFO_VALID_MASK;
if (vcpu->arch.interrupt.soft) {
intr |= INTR_TYPE_SOFT_INTR;
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
vmx->vcpu.arch.event_exit_inst_len);
} else
intr |= INTR_TYPE_EXT_INTR;
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
}
static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (is_guest_mode(vcpu))
return;
if (!cpu_has_virtual_nmis()) {
/*
* Tracking the NMI-blocked state in software is built upon
* finding the next open IRQ window. This, in turn, depends on
* well-behaving guests: They have to keep IRQs disabled at
* least as long as the NMI handler runs. Otherwise we may
* cause NMI nesting, maybe breaking the guest. But as this is
* highly unlikely, we can live with the residual risk.
*/
vmx->soft_vnmi_blocked = 1;
vmx->vnmi_blocked_time = 0;
}
++vcpu->stat.nmi_injections;
vmx->nmi_known_unmasked = false;
if (vmx->rmode.vm86_active) {
if (kvm_inject_realmode_interrupt(vcpu, NMI_VECTOR, 0) != EMULATE_DONE)
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return;
}
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
}
static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
{
if (!cpu_has_virtual_nmis())
return to_vmx(vcpu)->soft_vnmi_blocked;
if (to_vmx(vcpu)->nmi_known_unmasked)
return false;
return vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
}
static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!cpu_has_virtual_nmis()) {
if (vmx->soft_vnmi_blocked != masked) {
vmx->soft_vnmi_blocked = masked;
vmx->vnmi_blocked_time = 0;
}
} else {
vmx->nmi_known_unmasked = !masked;
if (masked)
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
else
vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
}
}
static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
{
if (to_vmx(vcpu)->nested.nested_run_pending)
return 0;
if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
return 0;
return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
| GUEST_INTR_STATE_NMI));
}
static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
{
return (!to_vmx(vcpu)->nested.nested_run_pending &&
vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
}
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
{
int ret;
ret = x86_set_memory_region(kvm, TSS_PRIVATE_MEMSLOT, addr,
PAGE_SIZE * 3);
if (ret)
return ret;
kvm->arch.tss_addr = addr;
return init_rmode_tss(kvm);
}
static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
{
switch (vec) {
case BP_VECTOR:
/*
* Update instruction length as we may reinject the exception
* from user space while in guest debugging mode.
*/
to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
return false;
/* fall through */
case DB_VECTOR:
if (vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
return false;
/* fall through */
case DE_VECTOR:
case OF_VECTOR:
case BR_VECTOR:
case UD_VECTOR:
case DF_VECTOR:
case SS_VECTOR:
case GP_VECTOR:
case MF_VECTOR:
return true;
break;
}
return false;
}
static int handle_rmode_exception(struct kvm_vcpu *vcpu,
int vec, u32 err_code)
{
/*
* Instruction with address size override prefix opcode 0x67
* Cause the #SS fault with 0 error code in VM86 mode.
*/
if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) {
if (emulate_instruction(vcpu, 0) == EMULATE_DONE) {
if (vcpu->arch.halt_request) {
vcpu->arch.halt_request = 0;
return kvm_vcpu_halt(vcpu);
}
return 1;
}
return 0;
}
/*
* Forward all other exceptions that are valid in real mode.
* FIXME: Breaks guest debugging in real mode, needs to be fixed with
* the required debugging infrastructure rework.
*/
kvm_queue_exception(vcpu, vec);
return 1;
}
/*
* Trigger machine check on the host. We assume all the MSRs are already set up
* by the CPU and that we still run on the same CPU as the MCE occurred on.
* We pass a fake environment to the machine check handler because we want
* the guest to be always treated like user space, no matter what context
* it used internally.
*/
static void kvm_machine_check(void)
{
#if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
struct pt_regs regs = {
.cs = 3, /* Fake ring 3 no matter what the guest ran on */
.flags = X86_EFLAGS_IF,
};
do_machine_check(®s, 0);
#endif
}
static int handle_machine_check(struct kvm_vcpu *vcpu)
{
/* already handled by vcpu_run */
return 1;
}
static int handle_exception(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct kvm_run *kvm_run = vcpu->run;
u32 intr_info, ex_no, error_code;
unsigned long cr2, rip, dr6;
u32 vect_info;
enum emulation_result er;
vect_info = vmx->idt_vectoring_info;
intr_info = vmx->exit_intr_info;
if (is_machine_check(intr_info))
return handle_machine_check(vcpu);
if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
return 1; /* already handled by vmx_vcpu_run() */
if (is_no_device(intr_info)) {
vmx_fpu_activate(vcpu);
return 1;
}
if (is_invalid_opcode(intr_info)) {
if (is_guest_mode(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
if (er != EMULATE_DONE)
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
error_code = 0;
if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
/*
* The #PF with PFEC.RSVD = 1 indicates the guest is accessing
* MMIO, it is better to report an internal error.
* See the comments in vmx_handle_exit.
*/
if ((vect_info & VECTORING_INFO_VALID_MASK) &&
!(is_page_fault(intr_info) && !(error_code & PFERR_RSVD_MASK))) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
vcpu->run->internal.ndata = 3;
vcpu->run->internal.data[0] = vect_info;
vcpu->run->internal.data[1] = intr_info;
vcpu->run->internal.data[2] = error_code;
return 0;
}
if (is_page_fault(intr_info)) {
/* EPT won't cause page fault directly */
BUG_ON(enable_ept);
cr2 = vmcs_readl(EXIT_QUALIFICATION);
trace_kvm_page_fault(cr2, error_code);
if (kvm_event_needs_reinjection(vcpu))
kvm_mmu_unprotect_page_virt(vcpu, cr2);
return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
}
ex_no = intr_info & INTR_INFO_VECTOR_MASK;
if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no))
return handle_rmode_exception(vcpu, ex_no, error_code);
switch (ex_no) {
case DB_VECTOR:
dr6 = vmcs_readl(EXIT_QUALIFICATION);
if (!(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
vcpu->arch.dr6 &= ~15;
vcpu->arch.dr6 |= dr6 | DR6_RTM;
if (!(dr6 & ~DR6_RESERVED)) /* icebp */
skip_emulated_instruction(vcpu);
kvm_queue_exception(vcpu, DB_VECTOR);
return 1;
}
kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
/* fall through */
case BP_VECTOR:
/*
* Update instruction length as we may reinject #BP from
* user space while in guest debugging mode. Reading it for
* #DB as well causes no harm, it is not used in that case.
*/
vmx->vcpu.arch.event_exit_inst_len =
vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
kvm_run->exit_reason = KVM_EXIT_DEBUG;
rip = kvm_rip_read(vcpu);
kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
kvm_run->debug.arch.exception = ex_no;
break;
default:
kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
kvm_run->ex.exception = ex_no;
kvm_run->ex.error_code = error_code;
break;
}
return 0;
}
static int handle_external_interrupt(struct kvm_vcpu *vcpu)
{
++vcpu->stat.irq_exits;
return 1;
}
static int handle_triple_fault(struct kvm_vcpu *vcpu)
{
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
return 0;
}
static int handle_io(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
int size, in, string;
unsigned port;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
string = (exit_qualification & 16) != 0;
in = (exit_qualification & 8) != 0;
++vcpu->stat.io_exits;
if (string || in)
return emulate_instruction(vcpu, 0) == EMULATE_DONE;
port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1;
skip_emulated_instruction(vcpu);
return kvm_fast_pio_out(vcpu, size, port);
}
static void
vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
{
/*
* Patch in the VMCALL instruction:
*/
hypercall[0] = 0x0f;
hypercall[1] = 0x01;
hypercall[2] = 0xc1;
}
static bool nested_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
{
unsigned long always_on = VMXON_CR0_ALWAYSON;
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
SECONDARY_EXEC_UNRESTRICTED_GUEST &&
nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
always_on &= ~(X86_CR0_PE | X86_CR0_PG);
return (val & always_on) == always_on;
}
/* called to set cr0 as appropriate for a mov-to-cr0 exit. */
static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
{
if (is_guest_mode(vcpu)) {
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
unsigned long orig_val = val;
/*
* We get here when L2 changed cr0 in a way that did not change
* any of L1's shadowed bits (see nested_vmx_exit_handled_cr),
* but did change L0 shadowed bits. So we first calculate the
* effective cr0 value that L1 would like to write into the
* hardware. It consists of the L2-owned bits from the new
* value combined with the L1-owned bits from L1's guest_cr0.
*/
val = (val & ~vmcs12->cr0_guest_host_mask) |
(vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
if (!nested_cr0_valid(vcpu, val))
return 1;
if (kvm_set_cr0(vcpu, val))
return 1;
vmcs_writel(CR0_READ_SHADOW, orig_val);
return 0;
} else {
if (to_vmx(vcpu)->nested.vmxon &&
((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON))
return 1;
return kvm_set_cr0(vcpu, val);
}
}
static int handle_set_cr4(struct kvm_vcpu *vcpu, unsigned long val)
{
if (is_guest_mode(vcpu)) {
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
unsigned long orig_val = val;
/* analogously to handle_set_cr0 */
val = (val & ~vmcs12->cr4_guest_host_mask) |
(vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask);
if (kvm_set_cr4(vcpu, val))
return 1;
vmcs_writel(CR4_READ_SHADOW, orig_val);
return 0;
} else
return kvm_set_cr4(vcpu, val);
}
/* called to set cr0 as approriate for clts instruction exit. */
static void handle_clts(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu)) {
/*
* We get here when L2 did CLTS, and L1 didn't shadow CR0.TS
* but we did (!fpu_active). We need to keep GUEST_CR0.TS on,
* just pretend it's off (also in arch.cr0 for fpu_activate).
*/
vmcs_writel(CR0_READ_SHADOW,
vmcs_readl(CR0_READ_SHADOW) & ~X86_CR0_TS);
vcpu->arch.cr0 &= ~X86_CR0_TS;
} else
vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
}
static int handle_cr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification, val;
int cr;
int reg;
int err;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
cr = exit_qualification & 15;
reg = (exit_qualification >> 8) & 15;
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
val = kvm_register_readl(vcpu, reg);
trace_kvm_cr_write(cr, val);
switch (cr) {
case 0:
err = handle_set_cr0(vcpu, val);
kvm_complete_insn_gp(vcpu, err);
return 1;
case 3:
err = kvm_set_cr3(vcpu, val);
kvm_complete_insn_gp(vcpu, err);
return 1;
case 4:
err = handle_set_cr4(vcpu, val);
kvm_complete_insn_gp(vcpu, err);
return 1;
case 8: {
u8 cr8_prev = kvm_get_cr8(vcpu);
u8 cr8 = (u8)val;
err = kvm_set_cr8(vcpu, cr8);
kvm_complete_insn_gp(vcpu, err);
if (lapic_in_kernel(vcpu))
return 1;
if (cr8_prev <= cr8)
return 1;
vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
return 0;
}
}
break;
case 2: /* clts */
handle_clts(vcpu);
trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
skip_emulated_instruction(vcpu);
vmx_fpu_activate(vcpu);
return 1;
case 1: /*mov from cr*/
switch (cr) {
case 3:
val = kvm_read_cr3(vcpu);
kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val);
skip_emulated_instruction(vcpu);
return 1;
case 8:
val = kvm_get_cr8(vcpu);
kvm_register_write(vcpu, reg, val);
trace_kvm_cr_read(cr, val);
skip_emulated_instruction(vcpu);
return 1;
}
break;
case 3: /* lmsw */
val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
kvm_lmsw(vcpu, val);
skip_emulated_instruction(vcpu);
return 1;
default:
break;
}
vcpu->run->exit_reason = 0;
vcpu_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
(int)(exit_qualification >> 4) & 3, cr);
return 0;
}
static int handle_dr(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
int dr, dr7, reg;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
/* First, if DR does not exist, trigger UD */
if (!kvm_require_dr(vcpu, dr))
return 1;
/* Do not handle if the CPL > 0, will trigger GP on re-entry */
if (!kvm_require_cpl(vcpu, 0))
return 1;
dr7 = vmcs_readl(GUEST_DR7);
if (dr7 & DR7_GD) {
/*
* As the vm-exit takes precedence over the debug trap, we
* need to emulate the latter, either for the host or the
* guest debugging itself.
*/
if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
vcpu->run->debug.arch.dr7 = dr7;
vcpu->run->debug.arch.pc = kvm_get_linear_rip(vcpu);
vcpu->run->debug.arch.exception = DB_VECTOR;
vcpu->run->exit_reason = KVM_EXIT_DEBUG;
return 0;
} else {
vcpu->arch.dr6 &= ~15;
vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
kvm_queue_exception(vcpu, DB_VECTOR);
return 1;
}
}
if (vcpu->guest_debug == 0) {
u32 cpu_based_vm_exec_control;
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control &= ~CPU_BASED_MOV_DR_EXITING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
/*
* No more DR vmexits; force a reload of the debug registers
* and reenter on this instruction. The next vmexit will
* retrieve the full state of the debug registers.
*/
vcpu->arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
return 1;
}
reg = DEBUG_REG_ACCESS_REG(exit_qualification);
if (exit_qualification & TYPE_MOV_FROM_DR) {
unsigned long val;
if (kvm_get_dr(vcpu, dr, &val))
return 1;
kvm_register_write(vcpu, reg, val);
} else
if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
return 1;
skip_emulated_instruction(vcpu);
return 1;
}
static u64 vmx_get_dr6(struct kvm_vcpu *vcpu)
{
return vcpu->arch.dr6;
}
static void vmx_set_dr6(struct kvm_vcpu *vcpu, unsigned long val)
{
}
static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;
get_debugreg(vcpu->arch.db[0], 0);
get_debugreg(vcpu->arch.db[1], 1);
get_debugreg(vcpu->arch.db[2], 2);
get_debugreg(vcpu->arch.db[3], 3);
get_debugreg(vcpu->arch.dr6, 6);
vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_MOV_DR_EXITING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
}
static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
{
vmcs_writel(GUEST_DR7, val);
}
static int handle_cpuid(struct kvm_vcpu *vcpu)
{
kvm_emulate_cpuid(vcpu);
return 1;
}
static int handle_rdmsr(struct kvm_vcpu *vcpu)
{
u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
struct msr_data msr_info;
msr_info.index = ecx;
msr_info.host_initiated = false;
if (vmx_get_msr(vcpu, &msr_info)) {
trace_kvm_msr_read_ex(ecx);
kvm_inject_gp(vcpu, 0);
return 1;
}
trace_kvm_msr_read(ecx, msr_info.data);
/* FIXME: handling of bits 32:63 of rax, rdx */
vcpu->arch.regs[VCPU_REGS_RAX] = msr_info.data & -1u;
vcpu->arch.regs[VCPU_REGS_RDX] = (msr_info.data >> 32) & -1u;
skip_emulated_instruction(vcpu);
return 1;
}
static int handle_wrmsr(struct kvm_vcpu *vcpu)
{
struct msr_data msr;
u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
| ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
msr.data = data;
msr.index = ecx;
msr.host_initiated = false;
if (kvm_set_msr(vcpu, &msr) != 0) {
trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(vcpu, 0);
return 1;
}
trace_kvm_msr_write(ecx, data);
skip_emulated_instruction(vcpu);
return 1;
}
static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
{
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 1;
}
static int handle_interrupt_window(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;
/* clear pending irq */
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
kvm_make_request(KVM_REQ_EVENT, vcpu);
++vcpu->stat.irq_window_exits;
return 1;
}
static int handle_halt(struct kvm_vcpu *vcpu)
{
return kvm_emulate_halt(vcpu);
}
static int handle_vmcall(struct kvm_vcpu *vcpu)
{
kvm_emulate_hypercall(vcpu);
return 1;
}
static int handle_invd(struct kvm_vcpu *vcpu)
{
return emulate_instruction(vcpu, 0) == EMULATE_DONE;
}
static int handle_invlpg(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
kvm_mmu_invlpg(vcpu, exit_qualification);
skip_emulated_instruction(vcpu);
return 1;
}
static int handle_rdpmc(struct kvm_vcpu *vcpu)
{
int err;
err = kvm_rdpmc(vcpu);
kvm_complete_insn_gp(vcpu, err);
return 1;
}
static int handle_wbinvd(struct kvm_vcpu *vcpu)
{
kvm_emulate_wbinvd(vcpu);
return 1;
}
static int handle_xsetbv(struct kvm_vcpu *vcpu)
{
u64 new_bv = kvm_read_edx_eax(vcpu);
u32 index = kvm_register_read(vcpu, VCPU_REGS_RCX);
if (kvm_set_xcr(vcpu, index, new_bv) == 0)
skip_emulated_instruction(vcpu);
return 1;
}
static int handle_xsaves(struct kvm_vcpu *vcpu)
{
skip_emulated_instruction(vcpu);
WARN(1, "this should never happen\n");
return 1;
}
static int handle_xrstors(struct kvm_vcpu *vcpu)
{
skip_emulated_instruction(vcpu);
WARN(1, "this should never happen\n");
return 1;
}
static int handle_apic_access(struct kvm_vcpu *vcpu)
{
if (likely(fasteoi)) {
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
int access_type, offset;
access_type = exit_qualification & APIC_ACCESS_TYPE;
offset = exit_qualification & APIC_ACCESS_OFFSET;
/*
* Sane guest uses MOV to write EOI, with written value
* not cared. So make a short-circuit here by avoiding
* heavy instruction emulation.
*/
if ((access_type == TYPE_LINEAR_APIC_INST_WRITE) &&
(offset == APIC_EOI)) {
kvm_lapic_set_eoi(vcpu);
skip_emulated_instruction(vcpu);
return 1;
}
}
return emulate_instruction(vcpu, 0) == EMULATE_DONE;
}
static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
int vector = exit_qualification & 0xff;
/* EOI-induced VM exit is trap-like and thus no need to adjust IP */
kvm_apic_set_eoi_accelerated(vcpu, vector);
return 1;
}
static int handle_apic_write(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 offset = exit_qualification & 0xfff;
/* APIC-write VM exit is trap-like and thus no need to adjust IP */
kvm_apic_write_nodecode(vcpu, offset);
return 1;
}
static int handle_task_switch(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qualification;
bool has_error_code = false;
u32 error_code = 0;
u16 tss_selector;
int reason, type, idt_v, idt_index;
idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
reason = (u32)exit_qualification >> 30;
if (reason == TASK_SWITCH_GATE && idt_v) {
switch (type) {
case INTR_TYPE_NMI_INTR:
vcpu->arch.nmi_injected = false;
vmx_set_nmi_mask(vcpu, true);
break;
case INTR_TYPE_EXT_INTR:
case INTR_TYPE_SOFT_INTR:
kvm_clear_interrupt_queue(vcpu);
break;
case INTR_TYPE_HARD_EXCEPTION:
if (vmx->idt_vectoring_info &
VECTORING_INFO_DELIVER_CODE_MASK) {
has_error_code = true;
error_code =
vmcs_read32(IDT_VECTORING_ERROR_CODE);
}
/* fall through */
case INTR_TYPE_SOFT_EXCEPTION:
kvm_clear_exception_queue(vcpu);
break;
default:
break;
}
}
tss_selector = exit_qualification;
if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
type != INTR_TYPE_EXT_INTR &&
type != INTR_TYPE_NMI_INTR))
skip_emulated_instruction(vcpu);
if (kvm_task_switch(vcpu, tss_selector,
type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason,
has_error_code, error_code) == EMULATE_FAIL) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
return 0;
}
/*
* TODO: What about debug traps on tss switch?
* Are we supposed to inject them and update dr6?
*/
return 1;
}
static int handle_ept_violation(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
gpa_t gpa;
u32 error_code;
int gla_validity;
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
gla_validity = (exit_qualification >> 7) & 0x3;
if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
(long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
vmcs_readl(GUEST_LINEAR_ADDRESS));
printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
(long unsigned int)exit_qualification);
vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
return 0;
}
/*
* EPT violation happened while executing iret from NMI,
* "blocked by NMI" bit has to be set before next VM entry.
* There are errata that may cause this bit to not be set:
* AAK134, BY25.
*/
if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
cpu_has_virtual_nmis() &&
(exit_qualification & INTR_INFO_UNBLOCK_NMI))
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
trace_kvm_page_fault(gpa, exit_qualification);
/* It is a write fault? */
error_code = exit_qualification & PFERR_WRITE_MASK;
/* It is a fetch fault? */
error_code |= (exit_qualification << 2) & PFERR_FETCH_MASK;
/* ept page table is present? */
error_code |= (exit_qualification >> 3) & PFERR_PRESENT_MASK;
vcpu->arch.exit_qualification = exit_qualification;
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
}
static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
{
int ret;
gpa_t gpa;
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
if (!kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
skip_emulated_instruction(vcpu);
trace_kvm_fast_mmio(gpa);
return 1;
}
ret = handle_mmio_page_fault(vcpu, gpa, true);
if (likely(ret == RET_MMIO_PF_EMULATE))
return x86_emulate_instruction(vcpu, gpa, 0, NULL, 0) ==
EMULATE_DONE;
if (unlikely(ret == RET_MMIO_PF_INVALID))
return kvm_mmu_page_fault(vcpu, gpa, 0, NULL, 0);
if (unlikely(ret == RET_MMIO_PF_RETRY))
return 1;
/* It is the real ept misconfig */
WARN_ON(1);
vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
return 0;
}
static int handle_nmi_window(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;
/* clear pending NMI */
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
++vcpu->stat.nmi_window_exits;
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 1;
}
static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
enum emulation_result err = EMULATE_DONE;
int ret = 1;
u32 cpu_exec_ctrl;
bool intr_window_requested;
unsigned count = 130;
cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
while (vmx->emulation_required && count-- != 0) {
if (intr_window_requested && vmx_interrupt_allowed(vcpu))
return handle_interrupt_window(&vmx->vcpu);
if (test_bit(KVM_REQ_EVENT, &vcpu->requests))
return 1;
err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
if (err == EMULATE_USER_EXIT) {
++vcpu->stat.mmio_exits;
ret = 0;
goto out;
}
if (err != EMULATE_DONE) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
return 0;
}
if (vcpu->arch.halt_request) {
vcpu->arch.halt_request = 0;
ret = kvm_vcpu_halt(vcpu);
goto out;
}
if (signal_pending(current))
goto out;
if (need_resched())
schedule();
}
out:
return ret;
}
static int __grow_ple_window(int val)
{
if (ple_window_grow < 1)
return ple_window;
val = min(val, ple_window_actual_max);
if (ple_window_grow < ple_window)
val *= ple_window_grow;
else
val += ple_window_grow;
return val;
}
static int __shrink_ple_window(int val, int modifier, int minimum)
{
if (modifier < 1)
return ple_window;
if (modifier < ple_window)
val /= modifier;
else
val -= modifier;
return max(val, minimum);
}
static void grow_ple_window(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int old = vmx->ple_window;
vmx->ple_window = __grow_ple_window(old);
if (vmx->ple_window != old)
vmx->ple_window_dirty = true;
trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old);
}
static void shrink_ple_window(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int old = vmx->ple_window;
vmx->ple_window = __shrink_ple_window(old,
ple_window_shrink, ple_window);
if (vmx->ple_window != old)
vmx->ple_window_dirty = true;
trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old);
}
/*
* ple_window_actual_max is computed to be one grow_ple_window() below
* ple_window_max. (See __grow_ple_window for the reason.)
* This prevents overflows, because ple_window_max is int.
* ple_window_max effectively rounded down to a multiple of ple_window_grow in
* this process.
* ple_window_max is also prevented from setting vmx->ple_window < ple_window.
*/
static void update_ple_window_actual_max(void)
{
ple_window_actual_max =
__shrink_ple_window(max(ple_window_max, ple_window),
ple_window_grow, INT_MIN);
}
/*
* Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
*/
static void wakeup_handler(void)
{
struct kvm_vcpu *vcpu;
int cpu = smp_processor_id();
spin_lock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
list_for_each_entry(vcpu, &per_cpu(blocked_vcpu_on_cpu, cpu),
blocked_vcpu_list) {
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
if (pi_test_on(pi_desc) == 1)
kvm_vcpu_kick(vcpu);
}
spin_unlock(&per_cpu(blocked_vcpu_on_cpu_lock, cpu));
}
static __init int hardware_setup(void)
{
int r = -ENOMEM, i, msr;
rdmsrl_safe(MSR_EFER, &host_efer);
for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
kvm_define_shared_msr(i, vmx_msr_index[i]);
vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_io_bitmap_a)
return r;
vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_io_bitmap_b)
goto out;
vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_msr_bitmap_legacy)
goto out1;
vmx_msr_bitmap_legacy_x2apic =
(unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_msr_bitmap_legacy_x2apic)
goto out2;
vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_msr_bitmap_longmode)
goto out3;
vmx_msr_bitmap_longmode_x2apic =
(unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_msr_bitmap_longmode_x2apic)
goto out4;
if (nested) {
vmx_msr_bitmap_nested =
(unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_msr_bitmap_nested)
goto out5;
}
vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_vmread_bitmap)
goto out6;
vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_vmwrite_bitmap)
goto out7;
memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
/*
* Allow direct access to the PC debug port (it is often used for I/O
* delays, but the vmexits simply slow things down).
*/
memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
clear_bit(0x80, vmx_io_bitmap_a);
memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
if (nested)
memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO;
goto out8;
}
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
if (!cpu_has_vmx_vpid())
enable_vpid = 0;
if (!cpu_has_vmx_shadow_vmcs())
enable_shadow_vmcs = 0;
if (enable_shadow_vmcs)
init_vmcs_shadow_fields();
if (!cpu_has_vmx_ept() ||
!cpu_has_vmx_ept_4levels()) {
enable_ept = 0;
enable_unrestricted_guest = 0;
enable_ept_ad_bits = 0;
}
if (!cpu_has_vmx_ept_ad_bits())
enable_ept_ad_bits = 0;
if (!cpu_has_vmx_unrestricted_guest())
enable_unrestricted_guest = 0;
if (!cpu_has_vmx_flexpriority())
flexpriority_enabled = 0;
/*
* set_apic_access_page_addr() is used to reload apic access
* page upon invalidation. No need to do anything if not
* using the APIC_ACCESS_ADDR VMCS field.
*/
if (!flexpriority_enabled)
kvm_x86_ops->set_apic_access_page_addr = NULL;
if (!cpu_has_vmx_tpr_shadow())
kvm_x86_ops->update_cr8_intercept = NULL;
if (enable_ept && !cpu_has_vmx_ept_2m_page())
kvm_disable_largepages();
if (!cpu_has_vmx_ple())
ple_gap = 0;
if (!cpu_has_vmx_apicv())
enable_apicv = 0;
if (cpu_has_vmx_tsc_scaling()) {
kvm_has_tsc_control = true;
kvm_max_tsc_scaling_ratio = KVM_VMX_TSC_MULTIPLIER_MAX;
kvm_tsc_scaling_ratio_frac_bits = 48;
}
if (enable_apicv)
kvm_x86_ops->update_cr8_intercept = NULL;
else {
kvm_x86_ops->hwapic_irr_update = NULL;
kvm_x86_ops->hwapic_isr_update = NULL;
kvm_x86_ops->deliver_posted_interrupt = NULL;
kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
}
vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
memcpy(vmx_msr_bitmap_legacy_x2apic,
vmx_msr_bitmap_legacy, PAGE_SIZE);
memcpy(vmx_msr_bitmap_longmode_x2apic,
vmx_msr_bitmap_longmode, PAGE_SIZE);
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
if (enable_apicv) {
for (msr = 0x800; msr <= 0x8ff; msr++)
vmx_disable_intercept_msr_read_x2apic(msr);
/* According SDM, in x2apic mode, the whole id reg is used.
* But in KVM, it only use the highest eight bits. Need to
* intercept it */
vmx_enable_intercept_msr_read_x2apic(0x802);
/* TMCCT */
vmx_enable_intercept_msr_read_x2apic(0x839);
/* TPR */
vmx_disable_intercept_msr_write_x2apic(0x808);
/* EOI */
vmx_disable_intercept_msr_write_x2apic(0x80b);
/* SELF-IPI */
vmx_disable_intercept_msr_write_x2apic(0x83f);
}
if (enable_ept) {
kvm_mmu_set_mask_ptes(0ull,
(enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
(enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
0ull, VMX_EPT_EXECUTABLE_MASK);
ept_set_mmio_spte_mask();
kvm_enable_tdp();
} else
kvm_disable_tdp();
update_ple_window_actual_max();
/*
* Only enable PML when hardware supports PML feature, and both EPT
* and EPT A/D bit features are enabled -- PML depends on them to work.
*/
if (!enable_ept || !enable_ept_ad_bits || !cpu_has_vmx_pml())
enable_pml = 0;
if (!enable_pml) {
kvm_x86_ops->slot_enable_log_dirty = NULL;
kvm_x86_ops->slot_disable_log_dirty = NULL;
kvm_x86_ops->flush_log_dirty = NULL;
kvm_x86_ops->enable_log_dirty_pt_masked = NULL;
}
kvm_set_posted_intr_wakeup_handler(wakeup_handler);
return alloc_kvm_area();
out8:
free_page((unsigned long)vmx_vmwrite_bitmap);
out7:
free_page((unsigned long)vmx_vmread_bitmap);
out6:
if (nested)
free_page((unsigned long)vmx_msr_bitmap_nested);
out5:
free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
out4:
free_page((unsigned long)vmx_msr_bitmap_longmode);
out3:
free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
out2:
free_page((unsigned long)vmx_msr_bitmap_legacy);
out1:
free_page((unsigned long)vmx_io_bitmap_b);
out:
free_page((unsigned long)vmx_io_bitmap_a);
return r;
}
static __exit void hardware_unsetup(void)
{
free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
free_page((unsigned long)vmx_msr_bitmap_legacy);
free_page((unsigned long)vmx_msr_bitmap_longmode);
free_page((unsigned long)vmx_io_bitmap_b);
free_page((unsigned long)vmx_io_bitmap_a);
free_page((unsigned long)vmx_vmwrite_bitmap);
free_page((unsigned long)vmx_vmread_bitmap);
if (nested)
free_page((unsigned long)vmx_msr_bitmap_nested);
free_kvm_area();
}
/*
* Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
* exiting, so only get here on cpu with PAUSE-Loop-Exiting.
*/
static int handle_pause(struct kvm_vcpu *vcpu)
{
if (ple_gap)
grow_ple_window(vcpu);
skip_emulated_instruction(vcpu);
kvm_vcpu_on_spin(vcpu);
return 1;
}
static int handle_nop(struct kvm_vcpu *vcpu)
{
skip_emulated_instruction(vcpu);
return 1;
}
static int handle_mwait(struct kvm_vcpu *vcpu)
{
printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
return handle_nop(vcpu);
}
static int handle_monitor_trap(struct kvm_vcpu *vcpu)
{
return 1;
}
static int handle_monitor(struct kvm_vcpu *vcpu)
{
printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
return handle_nop(vcpu);
}
/*
* To run an L2 guest, we need a vmcs02 based on the L1-specified vmcs12.
* We could reuse a single VMCS for all the L2 guests, but we also want the
* option to allocate a separate vmcs02 for each separate loaded vmcs12 - this
* allows keeping them loaded on the processor, and in the future will allow
* optimizations where prepare_vmcs02 doesn't need to set all the fields on
* every entry if they never change.
* So we keep, in vmx->nested.vmcs02_pool, a cache of size VMCS02_POOL_SIZE
* (>=0) with a vmcs02 for each recently loaded vmcs12s, most recent first.
*
* The following functions allocate and free a vmcs02 in this pool.
*/
/* Get a VMCS from the pool to use as vmcs02 for the current vmcs12. */
static struct loaded_vmcs *nested_get_current_vmcs02(struct vcpu_vmx *vmx)
{
struct vmcs02_list *item;
list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
if (item->vmptr == vmx->nested.current_vmptr) {
list_move(&item->list, &vmx->nested.vmcs02_pool);
return &item->vmcs02;
}
if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
/* Recycle the least recently used VMCS. */
item = list_entry(vmx->nested.vmcs02_pool.prev,
struct vmcs02_list, list);
item->vmptr = vmx->nested.current_vmptr;
list_move(&item->list, &vmx->nested.vmcs02_pool);
return &item->vmcs02;
}
/* Create a new VMCS */
item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
if (!item)
return NULL;
item->vmcs02.vmcs = alloc_vmcs();
if (!item->vmcs02.vmcs) {
kfree(item);
return NULL;
}
loaded_vmcs_init(&item->vmcs02);
item->vmptr = vmx->nested.current_vmptr;
list_add(&(item->list), &(vmx->nested.vmcs02_pool));
vmx->nested.vmcs02_num++;
return &item->vmcs02;
}
/* Free and remove from pool a vmcs02 saved for a vmcs12 (if there is one) */
static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
{
struct vmcs02_list *item;
list_for_each_entry(item, &vmx->nested.vmcs02_pool, list)
if (item->vmptr == vmptr) {
free_loaded_vmcs(&item->vmcs02);
list_del(&item->list);
kfree(item);
vmx->nested.vmcs02_num--;
return;
}
}
/*
* Free all VMCSs saved for this vcpu, except the one pointed by
* vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs
* must be &vmx->vmcs01.
*/
static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
{
struct vmcs02_list *item, *n;
WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01);
list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
/*
* Something will leak if the above WARN triggers. Better than
* a use-after-free.
*/
if (vmx->loaded_vmcs == &item->vmcs02)
continue;
free_loaded_vmcs(&item->vmcs02);
list_del(&item->list);
kfree(item);
vmx->nested.vmcs02_num--;
}
}
/*
* The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
* set the success or error code of an emulated VMX instruction, as specified
* by Vol 2B, VMX Instruction Reference, "Conventions".
*/
static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
{
vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
& ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
}
static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
{
vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
& ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
X86_EFLAGS_SF | X86_EFLAGS_OF))
| X86_EFLAGS_CF);
}
static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
u32 vm_instruction_error)
{
if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
/*
* failValid writes the error number to the current VMCS, which
* can't be done there isn't a current VMCS.
*/
nested_vmx_failInvalid(vcpu);
return;
}
vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
& ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
X86_EFLAGS_SF | X86_EFLAGS_OF))
| X86_EFLAGS_ZF);
get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
/*
* We don't need to force a shadow sync because
* VM_INSTRUCTION_ERROR is not shadowed
*/
}
static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
{
/* TODO: not to reset guest simply here. */
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
pr_warn("kvm: nested vmx abort, indicator %d\n", indicator);
}
static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
{
struct vcpu_vmx *vmx =
container_of(timer, struct vcpu_vmx, nested.preemption_timer);
vmx->nested.preemption_timer_expired = true;
kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
kvm_vcpu_kick(&vmx->vcpu);
return HRTIMER_NORESTART;
}
/*
* Decode the memory-address operand of a vmx instruction, as recorded on an
* exit caused by such an instruction (run by a guest hypervisor).
* On success, returns 0. When the operand is invalid, returns 1 and throws
* #UD or #GP.
*/
static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
unsigned long exit_qualification,
u32 vmx_instruction_info, bool wr, gva_t *ret)
{
gva_t off;
bool exn;
struct kvm_segment s;
/*
* According to Vol. 3B, "Information for VM Exits Due to Instruction
* Execution", on an exit, vmx_instruction_info holds most of the
* addressing components of the operand. Only the displacement part
* is put in exit_qualification (see 3B, "Basic VM-Exit Information").
* For how an actual address is calculated from all these components,
* refer to Vol. 1, "Operand Addressing".
*/
int scaling = vmx_instruction_info & 3;
int addr_size = (vmx_instruction_info >> 7) & 7;
bool is_reg = vmx_instruction_info & (1u << 10);
int seg_reg = (vmx_instruction_info >> 15) & 7;
int index_reg = (vmx_instruction_info >> 18) & 0xf;
bool index_is_valid = !(vmx_instruction_info & (1u << 22));
int base_reg = (vmx_instruction_info >> 23) & 0xf;
bool base_is_valid = !(vmx_instruction_info & (1u << 27));
if (is_reg) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
/* Addr = segment_base + offset */
/* offset = base + [index * scale] + displacement */
off = exit_qualification; /* holds the displacement */
if (base_is_valid)
off += kvm_register_read(vcpu, base_reg);
if (index_is_valid)
off += kvm_register_read(vcpu, index_reg)<<scaling;
vmx_get_segment(vcpu, &s, seg_reg);
*ret = s.base + off;
if (addr_size == 1) /* 32 bit */
*ret &= 0xffffffff;
/* Checks for #GP/#SS exceptions. */
exn = false;
if (is_protmode(vcpu)) {
/* Protected mode: apply checks for segment validity in the
* following order:
* - segment type check (#GP(0) may be thrown)
* - usability check (#GP(0)/#SS(0))
* - limit check (#GP(0)/#SS(0))
*/
if (wr)
/* #GP(0) if the destination operand is located in a
* read-only data segment or any code segment.
*/
exn = ((s.type & 0xa) == 0 || (s.type & 8));
else
/* #GP(0) if the source operand is located in an
* execute-only code segment
*/
exn = ((s.type & 0xa) == 8);
}
if (exn) {
kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
return 1;
}
if (is_long_mode(vcpu)) {
/* Long mode: #GP(0)/#SS(0) if the memory address is in a
* non-canonical form. This is an only check for long mode.
*/
exn = is_noncanonical_address(*ret);
} else if (is_protmode(vcpu)) {
/* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
*/
exn = (s.unusable != 0);
/* Protected mode: #GP(0)/#SS(0) if the memory
* operand is outside the segment limit.
*/
exn = exn || (off + sizeof(u64) > s.limit);
}
if (exn) {
kvm_queue_exception_e(vcpu,
seg_reg == VCPU_SREG_SS ?
SS_VECTOR : GP_VECTOR,
0);
return 1;
}
return 0;
}
/*
* This function performs the various checks including
* - if it's 4KB aligned
* - No bits beyond the physical address width are set
* - Returns 0 on success or else 1
* (Intel SDM Section 30.3)
*/
static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
gpa_t *vmpointer)
{
gva_t gva;
gpa_t vmptr;
struct x86_exception e;
struct page *page;
struct vcpu_vmx *vmx = to_vmx(vcpu);
int maxphyaddr = cpuid_maxphyaddr(vcpu);
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
sizeof(vmptr), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
switch (exit_reason) {
case EXIT_REASON_VMON:
/*
* SDM 3: 24.11.5
* The first 4 bytes of VMXON region contain the supported
* VMCS revision identifier
*
* Note - IA32_VMX_BASIC[48] will never be 1
* for the nested case;
* which replaces physical address width with 32
*
*/
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failInvalid(vcpu);
skip_emulated_instruction(vcpu);
return 1;
}
page = nested_get_page(vcpu, vmptr);
if (page == NULL ||
*(u32 *)kmap(page) != VMCS12_REVISION) {
nested_vmx_failInvalid(vcpu);
kunmap(page);
skip_emulated_instruction(vcpu);
return 1;
}
kunmap(page);
vmx->nested.vmxon_ptr = vmptr;
break;
case EXIT_REASON_VMCLEAR:
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_INVALID_ADDRESS);
skip_emulated_instruction(vcpu);
return 1;
}
if (vmptr == vmx->nested.vmxon_ptr) {
nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_VMXON_POINTER);
skip_emulated_instruction(vcpu);
return 1;
}
break;
case EXIT_REASON_VMPTRLD:
if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INVALID_ADDRESS);
skip_emulated_instruction(vcpu);
return 1;
}
if (vmptr == vmx->nested.vmxon_ptr) {
nested_vmx_failValid(vcpu,
VMXERR_VMCLEAR_VMXON_POINTER);
skip_emulated_instruction(vcpu);
return 1;
}
break;
default:
return 1; /* shouldn't happen */
}
if (vmpointer)
*vmpointer = vmptr;
return 0;
}
/*
* Emulate the VMXON instruction.
* Currently, we just remember that VMX is active, and do not save or even
* inspect the argument to VMXON (the so-called "VMXON pointer") because we
* do not currently need to store anything in that guest-allocated memory
* region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
* argument is different from the VMXON pointer (which the spec says they do).
*/
static int handle_vmon(struct kvm_vcpu *vcpu)
{
struct kvm_segment cs;
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs *shadow_vmcs;
const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
| FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
/* The Intel VMX Instruction Reference lists a bunch of bits that
* are prerequisite to running VMXON, most notably cr4.VMXE must be
* set to 1 (see vmx_set_cr4() for when we allow the guest to set this).
* Otherwise, we should fail with #UD. We test these now:
*/
if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE) ||
!kvm_read_cr0_bits(vcpu, X86_CR0_PE) ||
(vmx_get_rflags(vcpu) & X86_EFLAGS_VM)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
if (is_long_mode(vcpu) && !cs.l) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
if (vmx_get_cpl(vcpu)) {
kvm_inject_gp(vcpu, 0);
return 1;
}
if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
return 1;
if (vmx->nested.vmxon) {
nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
skip_emulated_instruction(vcpu);
return 1;
}
if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
!= VMXON_NEEDED_FEATURES) {
kvm_inject_gp(vcpu, 0);
return 1;
}
if (enable_shadow_vmcs) {
shadow_vmcs = alloc_vmcs();
if (!shadow_vmcs)
return -ENOMEM;
/* mark vmcs as shadow */
shadow_vmcs->revision_id |= (1u << 31);
/* init shadow vmcs */
vmcs_clear(shadow_vmcs);
vmx->nested.current_shadow_vmcs = shadow_vmcs;
}
INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
vmx->nested.vmcs02_num = 0;
hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
vmx->nested.vmxon = true;
skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
return 1;
}
/*
* Intel's VMX Instruction Reference specifies a common set of prerequisites
* for running VMX instructions (except VMXON, whose prerequisites are
* slightly different). It also specifies what exception to inject otherwise.
*/
static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
{
struct kvm_segment cs;
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!vmx->nested.vmxon) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 0;
}
vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
if ((vmx_get_rflags(vcpu) & X86_EFLAGS_VM) ||
(is_long_mode(vcpu) && !cs.l)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 0;
}
if (vmx_get_cpl(vcpu)) {
kvm_inject_gp(vcpu, 0);
return 0;
}
return 1;
}
static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
{
if (vmx->nested.current_vmptr == -1ull)
return;
/* current_vmptr and current_vmcs12 are always set/reset together */
if (WARN_ON(vmx->nested.current_vmcs12 == NULL))
return;
if (enable_shadow_vmcs) {
/* copy to memory all shadowed fields in case
they were modified */
copy_shadow_to_vmcs12(vmx);
vmx->nested.sync_shadow_vmcs = false;
vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_SHADOW_VMCS);
vmcs_write64(VMCS_LINK_POINTER, -1ull);
}
vmx->nested.posted_intr_nv = -1;
kunmap(vmx->nested.current_vmcs12_page);
nested_release_page(vmx->nested.current_vmcs12_page);
vmx->nested.current_vmptr = -1ull;
vmx->nested.current_vmcs12 = NULL;
}
/*
* Free whatever needs to be freed from vmx->nested when L1 goes down, or
* just stops using VMX.
*/
static void free_nested(struct vcpu_vmx *vmx)
{
if (!vmx->nested.vmxon)
return;
vmx->nested.vmxon = false;
free_vpid(vmx->nested.vpid02);
nested_release_vmcs12(vmx);
if (enable_shadow_vmcs)
free_vmcs(vmx->nested.current_shadow_vmcs);
/* Unpin physical memory we referred to in current vmcs02 */
if (vmx->nested.apic_access_page) {
nested_release_page(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = NULL;
}
if (vmx->nested.virtual_apic_page) {
nested_release_page(vmx->nested.virtual_apic_page);
vmx->nested.virtual_apic_page = NULL;
}
if (vmx->nested.pi_desc_page) {
kunmap(vmx->nested.pi_desc_page);
nested_release_page(vmx->nested.pi_desc_page);
vmx->nested.pi_desc_page = NULL;
vmx->nested.pi_desc = NULL;
}
nested_free_all_saved_vmcss(vmx);
}
/* Emulate the VMXOFF instruction */
static int handle_vmoff(struct kvm_vcpu *vcpu)
{
if (!nested_vmx_check_permission(vcpu))
return 1;
free_nested(to_vmx(vcpu));
skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
return 1;
}
/* Emulate the VMCLEAR instruction */
static int handle_vmclear(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
gpa_t vmptr;
struct vmcs12 *vmcs12;
struct page *page;
if (!nested_vmx_check_permission(vcpu))
return 1;
if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
return 1;
if (vmptr == vmx->nested.current_vmptr)
nested_release_vmcs12(vmx);
page = nested_get_page(vcpu, vmptr);
if (page == NULL) {
/*
* For accurate processor emulation, VMCLEAR beyond available
* physical memory should do nothing at all. However, it is
* possible that a nested vmx bug, not a guest hypervisor bug,
* resulted in this case, so let's shut down before doing any
* more damage:
*/
kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
return 1;
}
vmcs12 = kmap(page);
vmcs12->launch_state = 0;
kunmap(page);
nested_release_page(page);
nested_free_vmcs02(vmx, vmptr);
skip_emulated_instruction(vcpu);
nested_vmx_succeed(vcpu);
return 1;
}
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch);
/* Emulate the VMLAUNCH instruction */
static int handle_vmlaunch(struct kvm_vcpu *vcpu)
{
return nested_vmx_run(vcpu, true);
}
/* Emulate the VMRESUME instruction */
static int handle_vmresume(struct kvm_vcpu *vcpu)
{
return nested_vmx_run(vcpu, false);
}
enum vmcs_field_type {
VMCS_FIELD_TYPE_U16 = 0,
VMCS_FIELD_TYPE_U64 = 1,
VMCS_FIELD_TYPE_U32 = 2,
VMCS_FIELD_TYPE_NATURAL_WIDTH = 3
};
static inline int vmcs_field_type(unsigned long field)
{
if (0x1 & field) /* the *_HIGH fields are all 32 bit */
return VMCS_FIELD_TYPE_U32;
return (field >> 13) & 0x3 ;
}
static inline int vmcs_field_readonly(unsigned long field)
{
return (((field >> 10) & 0x3) == 1);
}
/*
* Read a vmcs12 field. Since these can have varying lengths and we return
* one type, we chose the biggest type (u64) and zero-extend the return value
* to that size. Note that the caller, handle_vmread, might need to use only
* some of the bits we return here (e.g., on 32-bit guests, only 32 bits of
* 64-bit fields are to be returned).
*/
static inline int vmcs12_read_any(struct kvm_vcpu *vcpu,
unsigned long field, u64 *ret)
{
short offset = vmcs_field_to_offset(field);
char *p;
if (offset < 0)
return offset;
p = ((char *)(get_vmcs12(vcpu))) + offset;
switch (vmcs_field_type(field)) {
case VMCS_FIELD_TYPE_NATURAL_WIDTH:
*ret = *((natural_width *)p);
return 0;
case VMCS_FIELD_TYPE_U16:
*ret = *((u16 *)p);
return 0;
case VMCS_FIELD_TYPE_U32:
*ret = *((u32 *)p);
return 0;
case VMCS_FIELD_TYPE_U64:
*ret = *((u64 *)p);
return 0;
default:
WARN_ON(1);
return -ENOENT;
}
}
static inline int vmcs12_write_any(struct kvm_vcpu *vcpu,
unsigned long field, u64 field_value){
short offset = vmcs_field_to_offset(field);
char *p = ((char *) get_vmcs12(vcpu)) + offset;
if (offset < 0)
return offset;
switch (vmcs_field_type(field)) {
case VMCS_FIELD_TYPE_U16:
*(u16 *)p = field_value;
return 0;
case VMCS_FIELD_TYPE_U32:
*(u32 *)p = field_value;
return 0;
case VMCS_FIELD_TYPE_U64:
*(u64 *)p = field_value;
return 0;
case VMCS_FIELD_TYPE_NATURAL_WIDTH:
*(natural_width *)p = field_value;
return 0;
default:
WARN_ON(1);
return -ENOENT;
}
}
static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
{
int i;
unsigned long field;
u64 field_value;
struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
const unsigned long *fields = shadow_read_write_fields;
const int num_fields = max_shadow_read_write_fields;
preempt_disable();
vmcs_load(shadow_vmcs);
for (i = 0; i < num_fields; i++) {
field = fields[i];
switch (vmcs_field_type(field)) {
case VMCS_FIELD_TYPE_U16:
field_value = vmcs_read16(field);
break;
case VMCS_FIELD_TYPE_U32:
field_value = vmcs_read32(field);
break;
case VMCS_FIELD_TYPE_U64:
field_value = vmcs_read64(field);
break;
case VMCS_FIELD_TYPE_NATURAL_WIDTH:
field_value = vmcs_readl(field);
break;
default:
WARN_ON(1);
continue;
}
vmcs12_write_any(&vmx->vcpu, field, field_value);
}
vmcs_clear(shadow_vmcs);
vmcs_load(vmx->loaded_vmcs->vmcs);
preempt_enable();
}
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
{
const unsigned long *fields[] = {
shadow_read_write_fields,
shadow_read_only_fields
};
const int max_fields[] = {
max_shadow_read_write_fields,
max_shadow_read_only_fields
};
int i, q;
unsigned long field;
u64 field_value = 0;
struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
vmcs_load(shadow_vmcs);
for (q = 0; q < ARRAY_SIZE(fields); q++) {
for (i = 0; i < max_fields[q]; i++) {
field = fields[q][i];
vmcs12_read_any(&vmx->vcpu, field, &field_value);
switch (vmcs_field_type(field)) {
case VMCS_FIELD_TYPE_U16:
vmcs_write16(field, (u16)field_value);
break;
case VMCS_FIELD_TYPE_U32:
vmcs_write32(field, (u32)field_value);
break;
case VMCS_FIELD_TYPE_U64:
vmcs_write64(field, (u64)field_value);
break;
case VMCS_FIELD_TYPE_NATURAL_WIDTH:
vmcs_writel(field, (long)field_value);
break;
default:
WARN_ON(1);
break;
}
}
}
vmcs_clear(shadow_vmcs);
vmcs_load(vmx->loaded_vmcs->vmcs);
}
/*
* VMX instructions which assume a current vmcs12 (i.e., that VMPTRLD was
* used before) all generate the same failure when it is missing.
*/
static int nested_vmx_check_vmcs12(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (vmx->nested.current_vmptr == -1ull) {
nested_vmx_failInvalid(vcpu);
skip_emulated_instruction(vcpu);
return 0;
}
return 1;
}
static int handle_vmread(struct kvm_vcpu *vcpu)
{
unsigned long field;
u64 field_value;
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gva_t gva = 0;
if (!nested_vmx_check_permission(vcpu) ||
!nested_vmx_check_vmcs12(vcpu))
return 1;
/* Decode instruction info and find the field to read */
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
/* Read the field, zero-extended to a u64 field_value */
if (vmcs12_read_any(vcpu, field, &field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
skip_emulated_instruction(vcpu);
return 1;
}
/*
* Now copy part of this value to register or memory, as requested.
* Note that the number of bits actually copied is 32 or 64 depending
* on the guest's mode (32 or 64 bit), not on the given field's length.
*/
if (vmx_instruction_info & (1u << 10)) {
kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
field_value);
} else {
if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, true, &gva))
return 1;
/* _system ok, as nested_vmx_check_permission verified cpl=0 */
kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
&field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
}
nested_vmx_succeed(vcpu);
skip_emulated_instruction(vcpu);
return 1;
}
static int handle_vmwrite(struct kvm_vcpu *vcpu)
{
unsigned long field;
gva_t gva;
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
/* The value to write might be 32 or 64 bits, depending on L1's long
* mode, and eventually we need to write that into a field of several
* possible lengths. The code below first zero-extends the value to 64
* bit (field_value), and then copies only the approriate number of
* bits into the vmcs12 field.
*/
u64 field_value = 0;
struct x86_exception e;
if (!nested_vmx_check_permission(vcpu) ||
!nested_vmx_check_vmcs12(vcpu))
return 1;
if (vmx_instruction_info & (1u << 10))
field_value = kvm_register_readl(vcpu,
(((vmx_instruction_info) >> 3) & 0xf));
else {
if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, false, &gva))
return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
&field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
}
field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
if (vmcs_field_readonly(field)) {
nested_vmx_failValid(vcpu,
VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
skip_emulated_instruction(vcpu);
return 1;
}
if (vmcs12_write_any(vcpu, field, field_value) < 0) {
nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
skip_emulated_instruction(vcpu);
return 1;
}
nested_vmx_succeed(vcpu);
skip_emulated_instruction(vcpu);
return 1;
}
/* Emulate the VMPTRLD instruction */
static int handle_vmptrld(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
gpa_t vmptr;
if (!nested_vmx_check_permission(vcpu))
return 1;
if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
return 1;
if (vmx->nested.current_vmptr != vmptr) {
struct vmcs12 *new_vmcs12;
struct page *page;
page = nested_get_page(vcpu, vmptr);
if (page == NULL) {
nested_vmx_failInvalid(vcpu);
skip_emulated_instruction(vcpu);
return 1;
}
new_vmcs12 = kmap(page);
if (new_vmcs12->revision_id != VMCS12_REVISION) {
kunmap(page);
nested_release_page_clean(page);
nested_vmx_failValid(vcpu,
VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
skip_emulated_instruction(vcpu);
return 1;
}
nested_release_vmcs12(vmx);
vmx->nested.current_vmptr = vmptr;
vmx->nested.current_vmcs12 = new_vmcs12;
vmx->nested.current_vmcs12_page = page;
if (enable_shadow_vmcs) {
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_SHADOW_VMCS);
vmcs_write64(VMCS_LINK_POINTER,
__pa(vmx->nested.current_shadow_vmcs));
vmx->nested.sync_shadow_vmcs = true;
}
}
nested_vmx_succeed(vcpu);
skip_emulated_instruction(vcpu);
return 1;
}
/* Emulate the VMPTRST instruction */
static int handle_vmptrst(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
gva_t vmcs_gva;
struct x86_exception e;
if (!nested_vmx_check_permission(vcpu))
return 1;
if (get_vmx_mem_address(vcpu, exit_qualification,
vmx_instruction_info, true, &vmcs_gva))
return 1;
/* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
(void *)&to_vmx(vcpu)->nested.current_vmptr,
sizeof(u64), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
nested_vmx_succeed(vcpu);
skip_emulated_instruction(vcpu);
return 1;
}
/* Emulate the INVEPT instruction */
static int handle_invept(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 vmx_instruction_info, types;
unsigned long type;
gva_t gva;
struct x86_exception e;
struct {
u64 eptp, gpa;
} operand;
if (!(vmx->nested.nested_vmx_secondary_ctls_high &
SECONDARY_EXEC_ENABLE_EPT) ||
!(vmx->nested.nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
if (!nested_vmx_check_permission(vcpu))
return 1;
if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
if (!(types & (1UL << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return 1;
}
/* According to the Intel VMX instruction reference, the memory
* operand is read even if it isn't needed (e.g., for type==global)
*/
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vmx_instruction_info, false, &gva))
return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
sizeof(operand), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
switch (type) {
case VMX_EPT_EXTENT_GLOBAL:
kvm_mmu_sync_roots(vcpu);
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
nested_vmx_succeed(vcpu);
break;
default:
/* Trap single context invalidation invept calls */
BUG_ON(1);
break;
}
skip_emulated_instruction(vcpu);
return 1;
}
static int handle_invvpid(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 vmx_instruction_info;
unsigned long type, types;
gva_t gva;
struct x86_exception e;
int vpid;
if (!(vmx->nested.nested_vmx_secondary_ctls_high &
SECONDARY_EXEC_ENABLE_VPID) ||
!(vmx->nested.nested_vmx_vpid_caps & VMX_VPID_INVVPID_BIT)) {
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
if (!nested_vmx_check_permission(vcpu))
return 1;
vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7;
if (!(types & (1UL << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return 1;
}
/* according to the intel vmx instruction reference, the memory
* operand is read even if it isn't needed (e.g., for type==global)
*/
if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
vmx_instruction_info, false, &gva))
return 1;
if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vpid,
sizeof(u32), &e)) {
kvm_inject_page_fault(vcpu, &e);
return 1;
}
switch (type) {
case VMX_VPID_EXTENT_ALL_CONTEXT:
if (get_vmcs12(vcpu)->virtual_processor_id == 0) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
return 1;
}
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
nested_vmx_succeed(vcpu);
break;
default:
/* Trap single context invalidation invvpid calls */
BUG_ON(1);
break;
}
skip_emulated_instruction(vcpu);
return 1;
}
static int handle_pml_full(struct kvm_vcpu *vcpu)
{
unsigned long exit_qualification;
trace_kvm_pml_full(vcpu->vcpu_id);
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
/*
* PML buffer FULL happened while executing iret from NMI,
* "blocked by NMI" bit has to be set before next VM entry.
*/
if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
cpu_has_virtual_nmis() &&
(exit_qualification & INTR_INFO_UNBLOCK_NMI))
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
/*
* PML buffer already flushed at beginning of VMEXIT. Nothing to do
* here.., and there's no userspace involvement needed for PML.
*/
return 1;
}
static int handle_pcommit(struct kvm_vcpu *vcpu)
{
/* we never catch pcommit instruct for L1 guest. */
WARN_ON(1);
return 1;
}
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
* to be done to userspace and return 0.
*/
static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_EXCEPTION_NMI] = handle_exception,
[EXIT_REASON_EXTERNAL_INTERRUPT] = handle_external_interrupt,
[EXIT_REASON_TRIPLE_FAULT] = handle_triple_fault,
[EXIT_REASON_NMI_WINDOW] = handle_nmi_window,
[EXIT_REASON_IO_INSTRUCTION] = handle_io,
[EXIT_REASON_CR_ACCESS] = handle_cr,
[EXIT_REASON_DR_ACCESS] = handle_dr,
[EXIT_REASON_CPUID] = handle_cpuid,
[EXIT_REASON_MSR_READ] = handle_rdmsr,
[EXIT_REASON_MSR_WRITE] = handle_wrmsr,
[EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
[EXIT_REASON_HLT] = handle_halt,
[EXIT_REASON_INVD] = handle_invd,
[EXIT_REASON_INVLPG] = handle_invlpg,
[EXIT_REASON_RDPMC] = handle_rdpmc,
[EXIT_REASON_VMCALL] = handle_vmcall,
[EXIT_REASON_VMCLEAR] = handle_vmclear,
[EXIT_REASON_VMLAUNCH] = handle_vmlaunch,
[EXIT_REASON_VMPTRLD] = handle_vmptrld,
[EXIT_REASON_VMPTRST] = handle_vmptrst,
[EXIT_REASON_VMREAD] = handle_vmread,
[EXIT_REASON_VMRESUME] = handle_vmresume,
[EXIT_REASON_VMWRITE] = handle_vmwrite,
[EXIT_REASON_VMOFF] = handle_vmoff,
[EXIT_REASON_VMON] = handle_vmon,
[EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
[EXIT_REASON_APIC_ACCESS] = handle_apic_access,
[EXIT_REASON_APIC_WRITE] = handle_apic_write,
[EXIT_REASON_EOI_INDUCED] = handle_apic_eoi_induced,
[EXIT_REASON_WBINVD] = handle_wbinvd,
[EXIT_REASON_XSETBV] = handle_xsetbv,
[EXIT_REASON_TASK_SWITCH] = handle_task_switch,
[EXIT_REASON_MCE_DURING_VMENTRY] = handle_machine_check,
[EXIT_REASON_EPT_VIOLATION] = handle_ept_violation,
[EXIT_REASON_EPT_MISCONFIG] = handle_ept_misconfig,
[EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
[EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
[EXIT_REASON_MONITOR_TRAP_FLAG] = handle_monitor_trap,
[EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
[EXIT_REASON_INVEPT] = handle_invept,
[EXIT_REASON_INVVPID] = handle_invvpid,
[EXIT_REASON_XSAVES] = handle_xsaves,
[EXIT_REASON_XRSTORS] = handle_xrstors,
[EXIT_REASON_PML_FULL] = handle_pml_full,
[EXIT_REASON_PCOMMIT] = handle_pcommit,
};
static const int kvm_vmx_max_exit_handlers =
ARRAY_SIZE(kvm_vmx_exit_handlers);
static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
unsigned long exit_qualification;
gpa_t bitmap, last_bitmap;
unsigned int port;
int size;
u8 b;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
port = exit_qualification >> 16;
size = (exit_qualification & 7) + 1;
last_bitmap = (gpa_t)-1;
b = -1;
while (size > 0) {
if (port < 0x8000)
bitmap = vmcs12->io_bitmap_a;
else if (port < 0x10000)
bitmap = vmcs12->io_bitmap_b;
else
return true;
bitmap += (port & 0x7fff) / 8;
if (last_bitmap != bitmap)
if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
return true;
if (b & (1 << (port & 7)))
return true;
port++;
size--;
last_bitmap = bitmap;
}
return false;
}
/*
* Return 1 if we should exit from L2 to L1 to handle an MSR access access,
* rather than handle it ourselves in L0. I.e., check whether L1 expressed
* disinterest in the current event (read or write a specific MSR) by using an
* MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
*/
static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12, u32 exit_reason)
{
u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
gpa_t bitmap;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
return true;
/*
* The MSR_BITMAP page is divided into four 1024-byte bitmaps,
* for the four combinations of read/write and low/high MSR numbers.
* First we need to figure out which of the four to use:
*/
bitmap = vmcs12->msr_bitmap;
if (exit_reason == EXIT_REASON_MSR_WRITE)
bitmap += 2048;
if (msr_index >= 0xc0000000) {
msr_index -= 0xc0000000;
bitmap += 1024;
}
/* Then read the msr_index'th bit from this bitmap: */
if (msr_index < 1024*8) {
unsigned char b;
if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
return true;
return 1 & (b >> (msr_index & 7));
} else
return true; /* let L1 handle the wrong parameter */
}
/*
* Return 1 if we should exit from L2 to L1 to handle a CR access exit,
* rather than handle it ourselves in L0. I.e., check if L1 wanted to
* intercept (via guest_host_mask etc.) the current event.
*/
static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
int cr = exit_qualification & 15;
int reg = (exit_qualification >> 8) & 15;
unsigned long val = kvm_register_readl(vcpu, reg);
switch ((exit_qualification >> 4) & 3) {
case 0: /* mov to cr */
switch (cr) {
case 0:
if (vmcs12->cr0_guest_host_mask &
(val ^ vmcs12->cr0_read_shadow))
return true;
break;
case 3:
if ((vmcs12->cr3_target_count >= 1 &&
vmcs12->cr3_target_value0 == val) ||
(vmcs12->cr3_target_count >= 2 &&
vmcs12->cr3_target_value1 == val) ||
(vmcs12->cr3_target_count >= 3 &&
vmcs12->cr3_target_value2 == val) ||
(vmcs12->cr3_target_count >= 4 &&
vmcs12->cr3_target_value3 == val))
return false;
if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
return true;
break;
case 4:
if (vmcs12->cr4_guest_host_mask &
(vmcs12->cr4_read_shadow ^ val))
return true;
break;
case 8:
if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
return true;
break;
}
break;
case 2: /* clts */
if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
(vmcs12->cr0_read_shadow & X86_CR0_TS))
return true;
break;
case 1: /* mov from cr */
switch (cr) {
case 3:
if (vmcs12->cpu_based_vm_exec_control &
CPU_BASED_CR3_STORE_EXITING)
return true;
break;
case 8:
if (vmcs12->cpu_based_vm_exec_control &
CPU_BASED_CR8_STORE_EXITING)
return true;
break;
}
break;
case 3: /* lmsw */
/*
* lmsw can change bits 1..3 of cr0, and only set bit 0 of
* cr0. Other attempted changes are ignored, with no exit.
*/
if (vmcs12->cr0_guest_host_mask & 0xe &
(val ^ vmcs12->cr0_read_shadow))
return true;
if ((vmcs12->cr0_guest_host_mask & 0x1) &&
!(vmcs12->cr0_read_shadow & 0x1) &&
(val & 0x1))
return true;
break;
}
return false;
}
/*
* Return 1 if we should exit from L2 to L1 to handle an exit, or 0 if we
* should handle it ourselves in L0 (and then continue L2). Only call this
* when in is_guest_mode (L2).
*/
static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
{
u32 intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 exit_reason = vmx->exit_reason;
trace_kvm_nested_vmexit(kvm_rip_read(vcpu), exit_reason,
vmcs_readl(EXIT_QUALIFICATION),
vmx->idt_vectoring_info,
intr_info,
vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
KVM_ISA_VMX);
if (vmx->nested.nested_run_pending)
return false;
if (unlikely(vmx->fail)) {
pr_info_ratelimited("%s failed vm entry %x\n", __func__,
vmcs_read32(VM_INSTRUCTION_ERROR));
return true;
}
switch (exit_reason) {
case EXIT_REASON_EXCEPTION_NMI:
if (!is_exception(intr_info))
return false;
else if (is_page_fault(intr_info))
return enable_ept;
else if (is_no_device(intr_info) &&
!(vmcs12->guest_cr0 & X86_CR0_TS))
return false;
return vmcs12->exception_bitmap &
(1u << (intr_info & INTR_INFO_VECTOR_MASK));
case EXIT_REASON_EXTERNAL_INTERRUPT:
return false;
case EXIT_REASON_TRIPLE_FAULT:
return true;
case EXIT_REASON_PENDING_INTERRUPT:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_INTR_PENDING);
case EXIT_REASON_NMI_WINDOW:
return nested_cpu_has(vmcs12, CPU_BASED_VIRTUAL_NMI_PENDING);
case EXIT_REASON_TASK_SWITCH:
return true;
case EXIT_REASON_CPUID:
if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa)
return false;
return true;
case EXIT_REASON_HLT:
return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
case EXIT_REASON_INVD:
return true;
case EXIT_REASON_INVLPG:
return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
case EXIT_REASON_RDPMC:
return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
/*
* VMX instructions trap unconditionally. This allows L1 to
* emulate them for its L2 guest, i.e., allows 3-level nesting!
*/
return true;
case EXIT_REASON_CR_ACCESS:
return nested_vmx_exit_handled_cr(vcpu, vmcs12);
case EXIT_REASON_DR_ACCESS:
return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
case EXIT_REASON_IO_INSTRUCTION:
return nested_vmx_exit_handled_io(vcpu, vmcs12);
case EXIT_REASON_MSR_READ:
case EXIT_REASON_MSR_WRITE:
return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
case EXIT_REASON_INVALID_STATE:
return true;
case EXIT_REASON_MWAIT_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
case EXIT_REASON_MONITOR_TRAP_FLAG:
return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
case EXIT_REASON_MONITOR_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
case EXIT_REASON_PAUSE_INSTRUCTION:
return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
nested_cpu_has2(vmcs12,
SECONDARY_EXEC_PAUSE_LOOP_EXITING);
case EXIT_REASON_MCE_DURING_VMENTRY:
return false;
case EXIT_REASON_TPR_BELOW_THRESHOLD:
return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
case EXIT_REASON_APIC_ACCESS:
return nested_cpu_has2(vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
case EXIT_REASON_APIC_WRITE:
case EXIT_REASON_EOI_INDUCED:
/* apic_write and eoi_induced should exit unconditionally. */
return true;
case EXIT_REASON_EPT_VIOLATION:
/*
* L0 always deals with the EPT violation. If nested EPT is
* used, and the nested mmu code discovers that the address is
* missing in the guest EPT table (EPT12), the EPT violation
* will be injected with nested_ept_inject_page_fault()
*/
return false;
case EXIT_REASON_EPT_MISCONFIG:
/*
* L2 never uses directly L1's EPT, but rather L0's own EPT
* table (shadow on EPT) or a merged EPT table that L0 built
* (EPT on EPT). So any problems with the structure of the
* table is L0's fault.
*/
return false;
case EXIT_REASON_WBINVD:
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
case EXIT_REASON_XSETBV:
return true;
case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
/*
* This should never happen, since it is not possible to
* set XSS to a non-zero value---neither in L1 nor in L2.
* If if it were, XSS would have to be checked against
* the XSS exit bitmap in vmcs12.
*/
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
case EXIT_REASON_PCOMMIT:
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_PCOMMIT);
default:
return true;
}
}
static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
{
*info1 = vmcs_readl(EXIT_QUALIFICATION);
*info2 = vmcs_read32(VM_EXIT_INTR_INFO);
}
static int vmx_create_pml_buffer(struct vcpu_vmx *vmx)
{
struct page *pml_pg;
pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!pml_pg)
return -ENOMEM;
vmx->pml_pg = pml_pg;
vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
return 0;
}
static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
{
if (vmx->pml_pg) {
__free_page(vmx->pml_pg);
vmx->pml_pg = NULL;
}
}
static void vmx_flush_pml_buffer(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u64 *pml_buf;
u16 pml_idx;
pml_idx = vmcs_read16(GUEST_PML_INDEX);
/* Do nothing if PML buffer is empty */
if (pml_idx == (PML_ENTITY_NUM - 1))
return;
/* PML index always points to next available PML buffer entity */
if (pml_idx >= PML_ENTITY_NUM)
pml_idx = 0;
else
pml_idx++;
pml_buf = page_address(vmx->pml_pg);
for (; pml_idx < PML_ENTITY_NUM; pml_idx++) {
u64 gpa;
gpa = pml_buf[pml_idx];
WARN_ON(gpa & (PAGE_SIZE - 1));
kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
}
/* reset PML index */
vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
}
/*
* Flush all vcpus' PML buffer and update logged GPAs to dirty_bitmap.
* Called before reporting dirty_bitmap to userspace.
*/
static void kvm_flush_pml_buffers(struct kvm *kvm)
{
int i;
struct kvm_vcpu *vcpu;
/*
* We only need to kick vcpu out of guest mode here, as PML buffer
* is flushed at beginning of all VMEXITs, and it's obvious that only
* vcpus running in guest are possible to have unflushed GPAs in PML
* buffer.
*/
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vcpu_kick(vcpu);
}
static void vmx_dump_sel(char *name, uint32_t sel)
{
pr_err("%s sel=0x%04x, attr=0x%05x, limit=0x%08x, base=0x%016lx\n",
name, vmcs_read32(sel),
vmcs_read32(sel + GUEST_ES_AR_BYTES - GUEST_ES_SELECTOR),
vmcs_read32(sel + GUEST_ES_LIMIT - GUEST_ES_SELECTOR),
vmcs_readl(sel + GUEST_ES_BASE - GUEST_ES_SELECTOR));
}
static void vmx_dump_dtsel(char *name, uint32_t limit)
{
pr_err("%s limit=0x%08x, base=0x%016lx\n",
name, vmcs_read32(limit),
vmcs_readl(limit + GUEST_GDTR_BASE - GUEST_GDTR_LIMIT));
}
static void dump_vmcs(void)
{
u32 vmentry_ctl = vmcs_read32(VM_ENTRY_CONTROLS);
u32 vmexit_ctl = vmcs_read32(VM_EXIT_CONTROLS);
u32 cpu_based_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
u32 pin_based_exec_ctrl = vmcs_read32(PIN_BASED_VM_EXEC_CONTROL);
u32 secondary_exec_control = 0;
unsigned long cr4 = vmcs_readl(GUEST_CR4);
u64 efer = vmcs_readl(GUEST_IA32_EFER);
int i, n;
if (cpu_has_secondary_exec_ctrls())
secondary_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
pr_err("*** Guest State ***\n");
pr_err("CR0: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
vmcs_readl(GUEST_CR0), vmcs_readl(CR0_READ_SHADOW),
vmcs_readl(CR0_GUEST_HOST_MASK));
pr_err("CR4: actual=0x%016lx, shadow=0x%016lx, gh_mask=%016lx\n",
cr4, vmcs_readl(CR4_READ_SHADOW), vmcs_readl(CR4_GUEST_HOST_MASK));
pr_err("CR3 = 0x%016lx\n", vmcs_readl(GUEST_CR3));
if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT) &&
(cr4 & X86_CR4_PAE) && !(efer & EFER_LMA))
{
pr_err("PDPTR0 = 0x%016lx PDPTR1 = 0x%016lx\n",
vmcs_readl(GUEST_PDPTR0), vmcs_readl(GUEST_PDPTR1));
pr_err("PDPTR2 = 0x%016lx PDPTR3 = 0x%016lx\n",
vmcs_readl(GUEST_PDPTR2), vmcs_readl(GUEST_PDPTR3));
}
pr_err("RSP = 0x%016lx RIP = 0x%016lx\n",
vmcs_readl(GUEST_RSP), vmcs_readl(GUEST_RIP));
pr_err("RFLAGS=0x%08lx DR7 = 0x%016lx\n",
vmcs_readl(GUEST_RFLAGS), vmcs_readl(GUEST_DR7));
pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
vmcs_readl(GUEST_SYSENTER_ESP),
vmcs_read32(GUEST_SYSENTER_CS), vmcs_readl(GUEST_SYSENTER_EIP));
vmx_dump_sel("CS: ", GUEST_CS_SELECTOR);
vmx_dump_sel("DS: ", GUEST_DS_SELECTOR);
vmx_dump_sel("SS: ", GUEST_SS_SELECTOR);
vmx_dump_sel("ES: ", GUEST_ES_SELECTOR);
vmx_dump_sel("FS: ", GUEST_FS_SELECTOR);
vmx_dump_sel("GS: ", GUEST_GS_SELECTOR);
vmx_dump_dtsel("GDTR:", GUEST_GDTR_LIMIT);
vmx_dump_sel("LDTR:", GUEST_LDTR_SELECTOR);
vmx_dump_dtsel("IDTR:", GUEST_IDTR_LIMIT);
vmx_dump_sel("TR: ", GUEST_TR_SELECTOR);
if ((vmexit_ctl & (VM_EXIT_SAVE_IA32_PAT | VM_EXIT_SAVE_IA32_EFER)) ||
(vmentry_ctl & (VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_IA32_EFER)))
pr_err("EFER = 0x%016llx PAT = 0x%016lx\n",
efer, vmcs_readl(GUEST_IA32_PAT));
pr_err("DebugCtl = 0x%016lx DebugExceptions = 0x%016lx\n",
vmcs_readl(GUEST_IA32_DEBUGCTL),
vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS));
if (vmentry_ctl & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL)
pr_err("PerfGlobCtl = 0x%016lx\n",
vmcs_readl(GUEST_IA32_PERF_GLOBAL_CTRL));
if (vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS)
pr_err("BndCfgS = 0x%016lx\n", vmcs_readl(GUEST_BNDCFGS));
pr_err("Interruptibility = %08x ActivityState = %08x\n",
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO),
vmcs_read32(GUEST_ACTIVITY_STATE));
if (secondary_exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
pr_err("InterruptStatus = %04x\n",
vmcs_read16(GUEST_INTR_STATUS));
pr_err("*** Host State ***\n");
pr_err("RIP = 0x%016lx RSP = 0x%016lx\n",
vmcs_readl(HOST_RIP), vmcs_readl(HOST_RSP));
pr_err("CS=%04x SS=%04x DS=%04x ES=%04x FS=%04x GS=%04x TR=%04x\n",
vmcs_read16(HOST_CS_SELECTOR), vmcs_read16(HOST_SS_SELECTOR),
vmcs_read16(HOST_DS_SELECTOR), vmcs_read16(HOST_ES_SELECTOR),
vmcs_read16(HOST_FS_SELECTOR), vmcs_read16(HOST_GS_SELECTOR),
vmcs_read16(HOST_TR_SELECTOR));
pr_err("FSBase=%016lx GSBase=%016lx TRBase=%016lx\n",
vmcs_readl(HOST_FS_BASE), vmcs_readl(HOST_GS_BASE),
vmcs_readl(HOST_TR_BASE));
pr_err("GDTBase=%016lx IDTBase=%016lx\n",
vmcs_readl(HOST_GDTR_BASE), vmcs_readl(HOST_IDTR_BASE));
pr_err("CR0=%016lx CR3=%016lx CR4=%016lx\n",
vmcs_readl(HOST_CR0), vmcs_readl(HOST_CR3),
vmcs_readl(HOST_CR4));
pr_err("Sysenter RSP=%016lx CS:RIP=%04x:%016lx\n",
vmcs_readl(HOST_IA32_SYSENTER_ESP),
vmcs_read32(HOST_IA32_SYSENTER_CS),
vmcs_readl(HOST_IA32_SYSENTER_EIP));
if (vmexit_ctl & (VM_EXIT_LOAD_IA32_PAT | VM_EXIT_LOAD_IA32_EFER))
pr_err("EFER = 0x%016lx PAT = 0x%016lx\n",
vmcs_readl(HOST_IA32_EFER), vmcs_readl(HOST_IA32_PAT));
if (vmexit_ctl & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
pr_err("PerfGlobCtl = 0x%016lx\n",
vmcs_readl(HOST_IA32_PERF_GLOBAL_CTRL));
pr_err("*** Control State ***\n");
pr_err("PinBased=%08x CPUBased=%08x SecondaryExec=%08x\n",
pin_based_exec_ctrl, cpu_based_exec_ctrl, secondary_exec_control);
pr_err("EntryControls=%08x ExitControls=%08x\n", vmentry_ctl, vmexit_ctl);
pr_err("ExceptionBitmap=%08x PFECmask=%08x PFECmatch=%08x\n",
vmcs_read32(EXCEPTION_BITMAP),
vmcs_read32(PAGE_FAULT_ERROR_CODE_MASK),
vmcs_read32(PAGE_FAULT_ERROR_CODE_MATCH));
pr_err("VMEntry: intr_info=%08x errcode=%08x ilen=%08x\n",
vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
vmcs_read32(VM_ENTRY_EXCEPTION_ERROR_CODE),
vmcs_read32(VM_ENTRY_INSTRUCTION_LEN));
pr_err("VMExit: intr_info=%08x errcode=%08x ilen=%08x\n",
vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_read32(VM_EXIT_INTR_ERROR_CODE),
vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
pr_err(" reason=%08x qualification=%016lx\n",
vmcs_read32(VM_EXIT_REASON), vmcs_readl(EXIT_QUALIFICATION));
pr_err("IDTVectoring: info=%08x errcode=%08x\n",
vmcs_read32(IDT_VECTORING_INFO_FIELD),
vmcs_read32(IDT_VECTORING_ERROR_CODE));
pr_err("TSC Offset = 0x%016lx\n", vmcs_readl(TSC_OFFSET));
if (secondary_exec_control & SECONDARY_EXEC_TSC_SCALING)
pr_err("TSC Multiplier = 0x%016lx\n",
vmcs_readl(TSC_MULTIPLIER));
if (cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW)
pr_err("TPR Threshold = 0x%02x\n", vmcs_read32(TPR_THRESHOLD));
if (pin_based_exec_ctrl & PIN_BASED_POSTED_INTR)
pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
pr_err("EPT pointer = 0x%016lx\n", vmcs_readl(EPT_POINTER));
n = vmcs_read32(CR3_TARGET_COUNT);
for (i = 0; i + 1 < n; i += 4)
pr_err("CR3 target%u=%016lx target%u=%016lx\n",
i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2),
i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2));
if (i < n)
pr_err("CR3 target%u=%016lx\n",
i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2));
if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
pr_err("PLE Gap=%08x Window=%08x\n",
vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
if (secondary_exec_control & SECONDARY_EXEC_ENABLE_VPID)
pr_err("Virtual processor ID = 0x%04x\n",
vmcs_read16(VIRTUAL_PROCESSOR_ID));
}
/*
* The guest has exited. See if we can fix it or if we need userspace
* assistance.
*/
static int vmx_handle_exit(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exit_reason = vmx->exit_reason;
u32 vectoring_info = vmx->idt_vectoring_info;
/*
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
* updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
* querying dirty_bitmap, we only need to kick all vcpus out of guest
* mode as if vcpus is in root mode, the PML buffer must has been
* flushed already.
*/
if (enable_pml)
vmx_flush_pml_buffer(vcpu);
/* If guest state is invalid, start emulating */
if (vmx->emulation_required)
return handle_invalid_guest_state(vcpu);
if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
nested_vmx_vmexit(vcpu, exit_reason,
vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_readl(EXIT_QUALIFICATION));
return 1;
}
if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
dump_vmcs();
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
vcpu->run->fail_entry.hardware_entry_failure_reason
= exit_reason;
return 0;
}
if (unlikely(vmx->fail)) {
vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
vcpu->run->fail_entry.hardware_entry_failure_reason
= vmcs_read32(VM_INSTRUCTION_ERROR);
return 0;
}
/*
* Note:
* Do not try to fix EXIT_REASON_EPT_MISCONFIG if it caused by
* delivery event since it indicates guest is accessing MMIO.
* The vm-exit can be triggered again after return to guest that
* will cause infinite loop.
*/
if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
(exit_reason != EXIT_REASON_EXCEPTION_NMI &&
exit_reason != EXIT_REASON_EPT_VIOLATION &&
exit_reason != EXIT_REASON_TASK_SWITCH)) {
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV;
vcpu->run->internal.ndata = 2;
vcpu->run->internal.data[0] = vectoring_info;
vcpu->run->internal.data[1] = exit_reason;
return 0;
}
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
!(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
get_vmcs12(vcpu))))) {
if (vmx_interrupt_allowed(vcpu)) {
vmx->soft_vnmi_blocked = 0;
} else if (vmx->vnmi_blocked_time > 1000000000LL &&
vcpu->arch.nmi_pending) {
/*
* This CPU don't support us in finding the end of an
* NMI-blocked window if the guest runs with IRQs
* disabled. So we pull the trigger after 1 s of
* futile waiting, but inform the user about this.
*/
printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
"state on VCPU %d after 1 s timeout\n",
__func__, vcpu->vcpu_id);
vmx->soft_vnmi_blocked = 0;
}
}
if (exit_reason < kvm_vmx_max_exit_handlers
&& kvm_vmx_exit_handlers[exit_reason])
return kvm_vmx_exit_handlers[exit_reason](vcpu);
else {
WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
}
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
if (is_guest_mode(vcpu) &&
nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
return;
if (irr == -1 || tpr < irr) {
vmcs_write32(TPR_THRESHOLD, 0);
return;
}
vmcs_write32(TPR_THRESHOLD, irr);
}
static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
{
u32 sec_exec_control;
/*
* There is not point to enable virtualize x2apic without enable
* apicv
*/
if (!cpu_has_vmx_virtualize_x2apic_mode() ||
!vmx_cpu_uses_apicv(vcpu))
return;
if (!cpu_need_tpr_shadow(vcpu))
return;
sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
if (set) {
sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
} else {
sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
}
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
vmx_set_msr_bitmap(vcpu);
}
static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
* Currently we do not handle the nested case where L2 has an
* APIC access page of its own; that page is still pinned.
* Hence, we skip the case where the VCPU is in guest mode _and_
* L1 prepared an APIC access page for L2.
*
* For the case where L1 and L2 share the same APIC access page
* (flexpriority=Y but SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES clear
* in the vmcs12), this function will only update either the vmcs01
* or the vmcs02. If the former, the vmcs02 will be updated by
* prepare_vmcs02. If the latter, the vmcs01 will be updated in
* the next L2->L1 exit.
*/
if (!is_guest_mode(vcpu) ||
!nested_cpu_has2(vmx->nested.current_vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
vmcs_write64(APIC_ACCESS_ADDR, hpa);
}
static void vmx_hwapic_isr_update(struct kvm *kvm, int isr)
{
u16 status;
u8 old;
if (isr == -1)
isr = 0;
status = vmcs_read16(GUEST_INTR_STATUS);
old = status >> 8;
if (isr != old) {
status &= 0xff;
status |= isr << 8;
vmcs_write16(GUEST_INTR_STATUS, status);
}
}
static void vmx_set_rvi(int vector)
{
u16 status;
u8 old;
if (vector == -1)
vector = 0;
status = vmcs_read16(GUEST_INTR_STATUS);
old = (u8)status & 0xff;
if ((u8)vector != old) {
status &= ~0xff;
status |= (u8)vector;
vmcs_write16(GUEST_INTR_STATUS, status);
}
}
static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
{
if (!is_guest_mode(vcpu)) {
vmx_set_rvi(max_irr);
return;
}
if (max_irr == -1)
return;
/*
* In guest mode. If a vmexit is needed, vmx_check_nested_events
* handles it.
*/
if (nested_exit_on_intr(vcpu))
return;
/*
* Else, fall back to pre-APICv interrupt injection since L2
* is run without virtual interrupt delivery.
*/
if (!kvm_event_needs_reinjection(vcpu) &&
vmx_interrupt_allowed(vcpu)) {
kvm_queue_interrupt(vcpu, max_irr, false);
vmx_inject_irq(vcpu);
}
}
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu)
{
u64 *eoi_exit_bitmap = vcpu->arch.eoi_exit_bitmap;
if (!vmx_cpu_uses_apicv(vcpu))
return;
vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
vmcs_write64(EOI_EXIT_BITMAP1, eoi_exit_bitmap[1]);
vmcs_write64(EOI_EXIT_BITMAP2, eoi_exit_bitmap[2]);
vmcs_write64(EOI_EXIT_BITMAP3, eoi_exit_bitmap[3]);
}
static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
{
u32 exit_intr_info;
if (!(vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY
|| vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI))
return;
vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
exit_intr_info = vmx->exit_intr_info;
/* Handle machine checks before interrupts are enabled */
if (is_machine_check(exit_intr_info))
kvm_machine_check();
/* We need to handle NMIs before interrupts are enabled */
if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
(exit_intr_info & INTR_INFO_VALID_MASK)) {
kvm_before_handle_nmi(&vmx->vcpu);
asm("int $2");
kvm_after_handle_nmi(&vmx->vcpu);
}
}
static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
{
u32 exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
/*
* If external interrupt exists, IF bit is set in rflags/eflags on the
* interrupt stack frame, and interrupt will be enabled on a return
* from interrupt handler.
*/
if ((exit_intr_info & (INTR_INFO_VALID_MASK | INTR_INFO_INTR_TYPE_MASK))
== (INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR)) {
unsigned int vector;
unsigned long entry;
gate_desc *desc;
struct vcpu_vmx *vmx = to_vmx(vcpu);
#ifdef CONFIG_X86_64
unsigned long tmp;
#endif
vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
desc = (gate_desc *)vmx->host_idt_base + vector;
entry = gate_offset(*desc);
asm volatile(
#ifdef CONFIG_X86_64
"mov %%" _ASM_SP ", %[sp]\n\t"
"and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
"push $%c[ss]\n\t"
"push %[sp]\n\t"
#endif
"pushf\n\t"
"orl $0x200, (%%" _ASM_SP ")\n\t"
__ASM_SIZE(push) " $%c[cs]\n\t"
"call *%[entry]\n\t"
:
#ifdef CONFIG_X86_64
[sp]"=&r"(tmp)
#endif
:
[entry]"r"(entry),
[ss]"i"(__KERNEL_DS),
[cs]"i"(__KERNEL_CS)
);
} else
local_irq_enable();
}
static bool vmx_has_high_real_mode_segbase(void)
{
return enable_unrestricted_guest || emulate_invalid_guest_state;
}
static bool vmx_mpx_supported(void)
{
return (vmcs_config.vmexit_ctrl & VM_EXIT_CLEAR_BNDCFGS) &&
(vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_BNDCFGS);
}
static bool vmx_xsaves_supported(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
SECONDARY_EXEC_XSAVES;
}
static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
{
u32 exit_intr_info;
bool unblock_nmi;
u8 vector;
bool idtv_info_valid;
idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
if (cpu_has_virtual_nmis()) {
if (vmx->nmi_known_unmasked)
return;
/*
* Can't use vmx->exit_intr_info since we're not sure what
* the exit reason is.
*/
exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
/*
* SDM 3: 27.7.1.2 (September 2008)
* Re-set bit "block by NMI" before VM entry if vmexit caused by
* a guest IRET fault.
* SDM 3: 23.2.2 (September 2008)
* Bit 12 is undefined in any of the following cases:
* If the VM exit sets the valid bit in the IDT-vectoring
* information field.
* If the VM exit is due to a double fault.
*/
if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
vector != DF_VECTOR && !idtv_info_valid)
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
else
vmx->nmi_known_unmasked =
!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
& GUEST_INTR_STATE_NMI);
} else if (unlikely(vmx->soft_vnmi_blocked))
vmx->vnmi_blocked_time +=
ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
}
static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
u32 idt_vectoring_info,
int instr_len_field,
int error_code_field)
{
u8 vector;
int type;
bool idtv_info_valid;
idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
vcpu->arch.nmi_injected = false;
kvm_clear_exception_queue(vcpu);
kvm_clear_interrupt_queue(vcpu);
if (!idtv_info_valid)
return;
kvm_make_request(KVM_REQ_EVENT, vcpu);
vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
switch (type) {
case INTR_TYPE_NMI_INTR:
vcpu->arch.nmi_injected = true;
/*
* SDM 3: 27.7.1.2 (September 2008)
* Clear bit "block by NMI" before VM entry if a NMI
* delivery faulted.
*/
vmx_set_nmi_mask(vcpu, false);
break;
case INTR_TYPE_SOFT_EXCEPTION:
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
/* fall through */
case INTR_TYPE_HARD_EXCEPTION:
if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
u32 err = vmcs_read32(error_code_field);
kvm_requeue_exception_e(vcpu, vector, err);
} else
kvm_requeue_exception(vcpu, vector);
break;
case INTR_TYPE_SOFT_INTR:
vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field);
/* fall through */
case INTR_TYPE_EXT_INTR:
kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR);
break;
default:
break;
}
}
static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
{
__vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info,
VM_EXIT_INSTRUCTION_LEN,
IDT_VECTORING_ERROR_CODE);
}
static void vmx_cancel_injection(struct kvm_vcpu *vcpu)
{
__vmx_complete_interrupts(vcpu,
vmcs_read32(VM_ENTRY_INTR_INFO_FIELD),
VM_ENTRY_INSTRUCTION_LEN,
VM_ENTRY_EXCEPTION_ERROR_CODE);
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
}
static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx)
{
int i, nr_msrs;
struct perf_guest_switch_msr *msrs;
msrs = perf_guest_get_msrs(&nr_msrs);
if (!msrs)
return;
for (i = 0; i < nr_msrs; i++)
if (msrs[i].host == msrs[i].guest)
clear_atomic_switch_msr(vmx, msrs[i].msr);
else
add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
msrs[i].host);
}
static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long debugctlmsr, cr4;
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
vmx->entry_time = ktime_get();
/* Don't enter VMX if guest state is invalid, let the exit handler
start emulation until we arrive back to a valid state */
if (vmx->emulation_required)
return;
if (vmx->ple_window_dirty) {
vmx->ple_window_dirty = false;
vmcs_write32(PLE_WINDOW, vmx->ple_window);
}
if (vmx->nested.sync_shadow_vmcs) {
copy_vmcs12_to_shadow(vmx);
vmx->nested.sync_shadow_vmcs = false;
}
if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
cr4 = cr4_read_shadow();
if (unlikely(cr4 != vmx->host_state.vmcs_host_cr4)) {
vmcs_writel(HOST_CR4, cr4);
vmx->host_state.vmcs_host_cr4 = cr4;
}
/* When single-stepping over STI and MOV SS, we must clear the
* corresponding interruptibility bits in the guest state. Otherwise
* vmentry fails as it then expects bit 14 (BS) in pending debug
* exceptions being set, but that's not correct for the guest debugging
* case. */
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
vmx_set_interrupt_shadow(vcpu, 0);
atomic_switch_perf_msrs(vmx);
debugctlmsr = get_debugctlmsr();
vmx->__launched = vmx->loaded_vmcs->launched;
asm(
/* Store host registers */
"push %%" _ASM_DX "; push %%" _ASM_BP ";"
"push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
"push %%" _ASM_CX " \n\t"
"cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
"je 1f \n\t"
"mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
__ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
"1: \n\t"
/* Reload cr2 if changed */
"mov %c[cr2](%0), %%" _ASM_AX " \n\t"
"mov %%cr2, %%" _ASM_DX " \n\t"
"cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
"je 2f \n\t"
"mov %%" _ASM_AX", %%cr2 \n\t"
"2: \n\t"
/* Check if vmlaunch of vmresume is needed */
"cmpl $0, %c[launched](%0) \n\t"
/* Load guest registers. Don't clobber flags. */
"mov %c[rax](%0), %%" _ASM_AX " \n\t"
"mov %c[rbx](%0), %%" _ASM_BX " \n\t"
"mov %c[rdx](%0), %%" _ASM_DX " \n\t"
"mov %c[rsi](%0), %%" _ASM_SI " \n\t"
"mov %c[rdi](%0), %%" _ASM_DI " \n\t"
"mov %c[rbp](%0), %%" _ASM_BP " \n\t"
#ifdef CONFIG_X86_64
"mov %c[r8](%0), %%r8 \n\t"
"mov %c[r9](%0), %%r9 \n\t"
"mov %c[r10](%0), %%r10 \n\t"
"mov %c[r11](%0), %%r11 \n\t"
"mov %c[r12](%0), %%r12 \n\t"
"mov %c[r13](%0), %%r13 \n\t"
"mov %c[r14](%0), %%r14 \n\t"
"mov %c[r15](%0), %%r15 \n\t"
#endif
"mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
/* Enter guest mode */
"jne 1f \n\t"
__ex(ASM_VMX_VMLAUNCH) "\n\t"
"jmp 2f \n\t"
"1: " __ex(ASM_VMX_VMRESUME) "\n\t"
"2: "
/* Save guest registers, load host registers, keep flags */
"mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
"pop %0 \n\t"
"mov %%" _ASM_AX ", %c[rax](%0) \n\t"
"mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
__ASM_SIZE(pop) " %c[rcx](%0) \n\t"
"mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
"mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
"mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
"mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
#ifdef CONFIG_X86_64
"mov %%r8, %c[r8](%0) \n\t"
"mov %%r9, %c[r9](%0) \n\t"
"mov %%r10, %c[r10](%0) \n\t"
"mov %%r11, %c[r11](%0) \n\t"
"mov %%r12, %c[r12](%0) \n\t"
"mov %%r13, %c[r13](%0) \n\t"
"mov %%r14, %c[r14](%0) \n\t"
"mov %%r15, %c[r15](%0) \n\t"
#endif
"mov %%cr2, %%" _ASM_AX " \n\t"
"mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
"pop %%" _ASM_BP "; pop %%" _ASM_DX " \n\t"
"setbe %c[fail](%0) \n\t"
".pushsection .rodata \n\t"
".global vmx_return \n\t"
"vmx_return: " _ASM_PTR " 2b \n\t"
".popsection"
: : "c"(vmx), "d"((unsigned long)HOST_RSP),
[launched]"i"(offsetof(struct vcpu_vmx, __launched)),
[fail]"i"(offsetof(struct vcpu_vmx, fail)),
[host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
[rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
[rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
[rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
[rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
[rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
[rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
[rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
#ifdef CONFIG_X86_64
[r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
[r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
[r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
[r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
[r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
[r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
[r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
[r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
#endif
[cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
[wordsize]"i"(sizeof(ulong))
: "cc", "memory"
#ifdef CONFIG_X86_64
, "rax", "rbx", "rdi", "rsi"
, "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
#else
, "eax", "ebx", "edi", "esi"
#endif
);
/* MSR_IA32_DEBUGCTLMSR is zeroed on vmexit. Restore it if needed */
if (debugctlmsr)
update_debugctlmsr(debugctlmsr);
#ifndef CONFIG_X86_64
/*
* The sysexit path does not restore ds/es, so we must set them to
* a reasonable value ourselves.
*
* We can't defer this to vmx_load_host_state() since that function
* may be executed in interrupt context, which saves and restore segments
* around it, nullifying its effect.
*/
loadsegment(ds, __USER_DS);
loadsegment(es, __USER_DS);
#endif
vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
| (1 << VCPU_EXREG_RFLAGS)
| (1 << VCPU_EXREG_PDPTR)
| (1 << VCPU_EXREG_SEGMENTS)
| (1 << VCPU_EXREG_CR3));
vcpu->arch.regs_dirty = 0;
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
vmx->loaded_vmcs->launched = 1;
vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
/*
* the KVM_REQ_EVENT optimization bit is only on for one entry, and if
* we did not inject a still-pending event to L1 now because of
* nested_run_pending, we need to re-enable this bit.
*/
if (vmx->nested.nested_run_pending)
kvm_make_request(KVM_REQ_EVENT, vcpu);
vmx->nested.nested_run_pending = 0;
vmx_complete_atomic_exit(vmx);
vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx);
}
static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int cpu;
if (vmx->loaded_vmcs == &vmx->vmcs01)
return;
cpu = get_cpu();
vmx->loaded_vmcs = &vmx->vmcs01;
vmx_vcpu_put(vcpu);
vmx_vcpu_load(vcpu, cpu);
vcpu->cpu = cpu;
put_cpu();
}
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (enable_pml)
vmx_destroy_pml_buffer(vmx);
free_vpid(vmx->vpid);
leave_guest_mode(vcpu);
vmx_load_vmcs01(vcpu);
free_nested(vmx);
free_loaded_vmcs(vmx->loaded_vmcs);
kfree(vmx->guest_msrs);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, vmx);
}
static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
{
int err;
struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
int cpu;
if (!vmx)
return ERR_PTR(-ENOMEM);
vmx->vpid = allocate_vpid();
err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
if (err)
goto free_vcpu;
vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
> PAGE_SIZE);
err = -ENOMEM;
if (!vmx->guest_msrs) {
goto uninit_vcpu;
}
vmx->loaded_vmcs = &vmx->vmcs01;
vmx->loaded_vmcs->vmcs = alloc_vmcs();
if (!vmx->loaded_vmcs->vmcs)
goto free_msrs;
if (!vmm_exclusive)
kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id())));
loaded_vmcs_init(vmx->loaded_vmcs);
if (!vmm_exclusive)
kvm_cpu_vmxoff();
cpu = get_cpu();
vmx_vcpu_load(&vmx->vcpu, cpu);
vmx->vcpu.cpu = cpu;
err = vmx_vcpu_setup(vmx);
vmx_vcpu_put(&vmx->vcpu);
put_cpu();
if (err)
goto free_vmcs;
if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
err = alloc_apic_access_page(kvm);
if (err)
goto free_vmcs;
}
if (enable_ept) {
if (!kvm->arch.ept_identity_map_addr)
kvm->arch.ept_identity_map_addr =
VMX_EPT_IDENTITY_PAGETABLE_ADDR;
err = init_rmode_identity_map(kvm);
if (err)
goto free_vmcs;
}
if (nested) {
nested_vmx_setup_ctls_msrs(vmx);
vmx->nested.vpid02 = allocate_vpid();
}
vmx->nested.posted_intr_nv = -1;
vmx->nested.current_vmptr = -1ull;
vmx->nested.current_vmcs12 = NULL;
/*
* If PML is turned on, failure on enabling PML just results in failure
* of creating the vcpu, therefore we can simplify PML logic (by
* avoiding dealing with cases, such as enabling PML partially on vcpus
* for the guest, etc.
*/
if (enable_pml) {
err = vmx_create_pml_buffer(vmx);
if (err)
goto free_vmcs;
}
return &vmx->vcpu;
free_vmcs:
free_vpid(vmx->nested.vpid02);
free_loaded_vmcs(vmx->loaded_vmcs);
free_msrs:
kfree(vmx->guest_msrs);
uninit_vcpu:
kvm_vcpu_uninit(&vmx->vcpu);
free_vcpu:
free_vpid(vmx->vpid);
kmem_cache_free(kvm_vcpu_cache, vmx);
return ERR_PTR(err);
}
static void __init vmx_check_processor_compat(void *rtn)
{
struct vmcs_config vmcs_conf;
*(int *)rtn = 0;
if (setup_vmcs_config(&vmcs_conf) < 0)
*(int *)rtn = -EIO;
if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
smp_processor_id());
*(int *)rtn = -EIO;
}
}
static int get_ept_level(void)
{
return VMX_EPT_DEFAULT_GAW + 1;
}
static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{
u8 cache;
u64 ipat = 0;
/* For VT-d and EPT combination
* 1. MMIO: always map as UC
* 2. EPT with VT-d:
* a. VT-d without snooping control feature: can't guarantee the
* result, try to trust guest.
* b. VT-d with snooping control feature: snooping control feature of
* VT-d engine can guarantee the cache correctness. Just set it
* to WB to keep consistent with host. So the same as item 3.
* 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
* consistent with host MTRR
*/
if (is_mmio) {
cache = MTRR_TYPE_UNCACHABLE;
goto exit;
}
if (!kvm_arch_has_noncoherent_dma(vcpu->kvm)) {
ipat = VMX_EPT_IPAT_BIT;
cache = MTRR_TYPE_WRBACK;
goto exit;
}
if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
ipat = VMX_EPT_IPAT_BIT;
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
cache = MTRR_TYPE_WRBACK;
else
cache = MTRR_TYPE_UNCACHABLE;
goto exit;
}
cache = kvm_mtrr_get_guest_memory_type(vcpu, gfn);
exit:
return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat;
}
static int vmx_get_lpage_level(void)
{
if (enable_ept && !cpu_has_vmx_ept_1g_page())
return PT_DIRECTORY_LEVEL;
else
/* For shadow and EPT supported 1GB page */
return PT_PDPE_LEVEL;
}
static void vmcs_set_secondary_exec_control(u32 new_ctl)
{
/*
* These bits in the secondary execution controls field
* are dynamic, the others are mostly based on the hypervisor
* architecture and the guest's CPUID. Do not touch the
* dynamic bits.
*/
u32 mask =
SECONDARY_EXEC_SHADOW_VMCS |
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
u32 cur_ctl = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
(new_ctl & ~mask) | (cur_ctl & mask));
}
static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 secondary_exec_ctl = vmx_secondary_exec_control(vmx);
if (vmx_rdtscp_supported()) {
bool rdtscp_enabled = guest_cpuid_has_rdtscp(vcpu);
if (!rdtscp_enabled)
secondary_exec_ctl &= ~SECONDARY_EXEC_RDTSCP;
if (nested) {
if (rdtscp_enabled)
vmx->nested.nested_vmx_secondary_ctls_high |=
SECONDARY_EXEC_RDTSCP;
else
vmx->nested.nested_vmx_secondary_ctls_high &=
~SECONDARY_EXEC_RDTSCP;
}
}
/* Exposing INVPCID only when PCID is exposed */
best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
if (vmx_invpcid_supported() &&
(!best || !(best->ebx & bit(X86_FEATURE_INVPCID)) ||
!guest_cpuid_has_pcid(vcpu))) {
secondary_exec_ctl &= ~SECONDARY_EXEC_ENABLE_INVPCID;
if (best)
best->ebx &= ~bit(X86_FEATURE_INVPCID);
}
vmcs_set_secondary_exec_control(secondary_exec_ctl);
if (static_cpu_has(X86_FEATURE_PCOMMIT) && nested) {
if (guest_cpuid_has_pcommit(vcpu))
vmx->nested.nested_vmx_secondary_ctls_high |=
SECONDARY_EXEC_PCOMMIT;
else
vmx->nested.nested_vmx_secondary_ctls_high &=
~SECONDARY_EXEC_PCOMMIT;
}
}
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
{
if (func == 1 && nested)
entry->ecx |= bit(X86_FEATURE_VMX);
}
static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
u32 exit_reason;
if (fault->error_code & PFERR_RSVD_MASK)
exit_reason = EXIT_REASON_EPT_MISCONFIG;
else
exit_reason = EXIT_REASON_EPT_VIOLATION;
nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification);
vmcs12->guest_physical_address = fault->address;
}
/* Callbacks for nested_ept_init_mmu_context: */
static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
{
/* return the page table to be shadowed - in our case, EPT12 */
return get_vmcs12(vcpu)->ept_pointer;
}
static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{
WARN_ON(mmu_is_nested(vcpu));
kvm_init_shadow_ept_mmu(vcpu,
to_vmx(vcpu)->nested.nested_vmx_ept_caps &
VMX_EPT_EXECUTE_ONLY_BIT);
vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
}
static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
{
vcpu->arch.walk_mmu = &vcpu->arch.mmu;
}
static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
u16 error_code)
{
bool inequality, bit;
bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
inequality =
(error_code & vmcs12->page_fault_error_code_mask) !=
vmcs12->page_fault_error_code_match;
return inequality ^ bit;
}
static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
WARN_ON(!is_guest_mode(vcpu));
if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code))
nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_readl(EXIT_QUALIFICATION));
else
kvm_inject_page_fault(vcpu, fault);
}
static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int maxphyaddr = cpuid_maxphyaddr(vcpu);
if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
if (!PAGE_ALIGNED(vmcs12->apic_access_addr) ||
vmcs12->apic_access_addr >> maxphyaddr)
return false;
/*
* Translate L1 physical address to host physical
* address for vmcs02. Keep the page pinned, so this
* physical address remains valid. We keep a reference
* to it so we can release it later.
*/
if (vmx->nested.apic_access_page) /* shouldn't happen */
nested_release_page(vmx->nested.apic_access_page);
vmx->nested.apic_access_page =
nested_get_page(vcpu, vmcs12->apic_access_addr);
}
if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
if (!PAGE_ALIGNED(vmcs12->virtual_apic_page_addr) ||
vmcs12->virtual_apic_page_addr >> maxphyaddr)
return false;
if (vmx->nested.virtual_apic_page) /* shouldn't happen */
nested_release_page(vmx->nested.virtual_apic_page);
vmx->nested.virtual_apic_page =
nested_get_page(vcpu, vmcs12->virtual_apic_page_addr);
/*
* Failing the vm entry is _not_ what the processor does
* but it's basically the only possibility we have.
* We could still enter the guest if CR8 load exits are
* enabled, CR8 store exits are enabled, and virtualize APIC
* access is disabled; in this case the processor would never
* use the TPR shadow and we could simply clear the bit from
* the execution control. But such a configuration is useless,
* so let's keep the code simple.
*/
if (!vmx->nested.virtual_apic_page)
return false;
}
if (nested_cpu_has_posted_intr(vmcs12)) {
if (!IS_ALIGNED(vmcs12->posted_intr_desc_addr, 64) ||
vmcs12->posted_intr_desc_addr >> maxphyaddr)
return false;
if (vmx->nested.pi_desc_page) { /* shouldn't happen */
kunmap(vmx->nested.pi_desc_page);
nested_release_page(vmx->nested.pi_desc_page);
}
vmx->nested.pi_desc_page =
nested_get_page(vcpu, vmcs12->posted_intr_desc_addr);
if (!vmx->nested.pi_desc_page)
return false;
vmx->nested.pi_desc =
(struct pi_desc *)kmap(vmx->nested.pi_desc_page);
if (!vmx->nested.pi_desc) {
nested_release_page_clean(vmx->nested.pi_desc_page);
return false;
}
vmx->nested.pi_desc =
(struct pi_desc *)((void *)vmx->nested.pi_desc +
(unsigned long)(vmcs12->posted_intr_desc_addr &
(PAGE_SIZE - 1)));
}
return true;
}
static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
{
u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (vcpu->arch.virtual_tsc_khz == 0)
return;
/* Make sure short timeouts reliably trigger an immediate vmexit.
* hrtimer_start does not guarantee this. */
if (preemption_timeout <= 1) {
vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
return;
}
preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
preemption_timeout *= 1000000;
do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
hrtimer_start(&vmx->nested.preemption_timer,
ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
}
static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
int maxphyaddr;
u64 addr;
if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
return 0;
if (vmcs12_read_any(vcpu, MSR_BITMAP, &addr)) {
WARN_ON(1);
return -EINVAL;
}
maxphyaddr = cpuid_maxphyaddr(vcpu);
if (!PAGE_ALIGNED(vmcs12->msr_bitmap) ||
((addr + PAGE_SIZE) >> maxphyaddr))
return -EINVAL;
return 0;
}
/*
* Merge L0's and L1's MSR bitmap, return false to indicate that
* we do not use the hardware.
*/
static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
int msr;
struct page *page;
unsigned long *msr_bitmap;
if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
return false;
page = nested_get_page(vcpu, vmcs12->msr_bitmap);
if (!page) {
WARN_ON(1);
return false;
}
msr_bitmap = (unsigned long *)kmap(page);
if (!msr_bitmap) {
nested_release_page_clean(page);
WARN_ON(1);
return false;
}
if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
if (nested_cpu_has_apic_reg_virt(vmcs12))
for (msr = 0x800; msr <= 0x8ff; msr++)
nested_vmx_disable_intercept_for_msr(
msr_bitmap,
vmx_msr_bitmap_nested,
msr, MSR_TYPE_R);
/* TPR is allowed */
nested_vmx_disable_intercept_for_msr(msr_bitmap,
vmx_msr_bitmap_nested,
APIC_BASE_MSR + (APIC_TASKPRI >> 4),
MSR_TYPE_R | MSR_TYPE_W);
if (nested_cpu_has_vid(vmcs12)) {
/* EOI and self-IPI are allowed */
nested_vmx_disable_intercept_for_msr(
msr_bitmap,
vmx_msr_bitmap_nested,
APIC_BASE_MSR + (APIC_EOI >> 4),
MSR_TYPE_W);
nested_vmx_disable_intercept_for_msr(
msr_bitmap,
vmx_msr_bitmap_nested,
APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
MSR_TYPE_W);
}
} else {
/*
* Enable reading intercept of all the x2apic
* MSRs. We should not rely on vmcs12 to do any
* optimizations here, it may have been modified
* by L1.
*/
for (msr = 0x800; msr <= 0x8ff; msr++)
__vmx_enable_intercept_for_msr(
vmx_msr_bitmap_nested,
msr,
MSR_TYPE_R);
__vmx_enable_intercept_for_msr(
vmx_msr_bitmap_nested,
APIC_BASE_MSR + (APIC_TASKPRI >> 4),
MSR_TYPE_W);
__vmx_enable_intercept_for_msr(
vmx_msr_bitmap_nested,
APIC_BASE_MSR + (APIC_EOI >> 4),
MSR_TYPE_W);
__vmx_enable_intercept_for_msr(
vmx_msr_bitmap_nested,
APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
MSR_TYPE_W);
}
kunmap(page);
nested_release_page_clean(page);
return true;
}
static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
!nested_cpu_has_apic_reg_virt(vmcs12) &&
!nested_cpu_has_vid(vmcs12) &&
!nested_cpu_has_posted_intr(vmcs12))
return 0;
/*
* If virtualize x2apic mode is enabled,
* virtualize apic access must be disabled.
*/
if (nested_cpu_has_virt_x2apic_mode(vmcs12) &&
nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
return -EINVAL;
/*
* If virtual interrupt delivery is enabled,
* we must exit on external interrupts.
*/
if (nested_cpu_has_vid(vmcs12) &&
!nested_exit_on_intr(vcpu))
return -EINVAL;
/*
* bits 15:8 should be zero in posted_intr_nv,
* the descriptor address has been already checked
* in nested_get_vmcs12_pages.
*/
if (nested_cpu_has_posted_intr(vmcs12) &&
(!nested_cpu_has_vid(vmcs12) ||
!nested_exit_intr_ack_set(vcpu) ||
vmcs12->posted_intr_nv & 0xff00))
return -EINVAL;
/* tpr shadow is needed by all apicv features. */
if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
return -EINVAL;
return 0;
}
static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
unsigned long count_field,
unsigned long addr_field)
{
int maxphyaddr;
u64 count, addr;
if (vmcs12_read_any(vcpu, count_field, &count) ||
vmcs12_read_any(vcpu, addr_field, &addr)) {
WARN_ON(1);
return -EINVAL;
}
if (count == 0)
return 0;
maxphyaddr = cpuid_maxphyaddr(vcpu);
if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
(addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr) {
pr_warn_ratelimited(
"nVMX: invalid MSR switch (0x%lx, %d, %llu, 0x%08llx)",
addr_field, maxphyaddr, count, addr);
return -EINVAL;
}
return 0;
}
static int nested_vmx_check_msr_switch_controls(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
if (vmcs12->vm_exit_msr_load_count == 0 &&
vmcs12->vm_exit_msr_store_count == 0 &&
vmcs12->vm_entry_msr_load_count == 0)
return 0; /* Fast path */
if (nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_LOAD_COUNT,
VM_EXIT_MSR_LOAD_ADDR) ||
nested_vmx_check_msr_switch(vcpu, VM_EXIT_MSR_STORE_COUNT,
VM_EXIT_MSR_STORE_ADDR) ||
nested_vmx_check_msr_switch(vcpu, VM_ENTRY_MSR_LOAD_COUNT,
VM_ENTRY_MSR_LOAD_ADDR))
return -EINVAL;
return 0;
}
static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
struct vmx_msr_entry *e)
{
/* x2APIC MSR accesses are not allowed */
if (vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8)
return -EINVAL;
if (e->index == MSR_IA32_UCODE_WRITE || /* SDM Table 35-2 */
e->index == MSR_IA32_UCODE_REV)
return -EINVAL;
if (e->reserved != 0)
return -EINVAL;
return 0;
}
static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
struct vmx_msr_entry *e)
{
if (e->index == MSR_FS_BASE ||
e->index == MSR_GS_BASE ||
e->index == MSR_IA32_SMM_MONITOR_CTL || /* SMM is not supported */
nested_vmx_msr_check_common(vcpu, e))
return -EINVAL;
return 0;
}
static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
struct vmx_msr_entry *e)
{
if (e->index == MSR_IA32_SMBASE || /* SMM is not supported */
nested_vmx_msr_check_common(vcpu, e))
return -EINVAL;
return 0;
}
/*
* Load guest's/host's msr at nested entry/exit.
* return 0 for success, entry index for failure.
*/
static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
{
u32 i;
struct vmx_msr_entry e;
struct msr_data msr;
msr.host_initiated = false;
for (i = 0; i < count; i++) {
if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
&e, sizeof(e))) {
pr_warn_ratelimited(
"%s cannot read MSR entry (%u, 0x%08llx)\n",
__func__, i, gpa + i * sizeof(e));
goto fail;
}
if (nested_vmx_load_msr_check(vcpu, &e)) {
pr_warn_ratelimited(
"%s check failed (%u, 0x%x, 0x%x)\n",
__func__, i, e.index, e.reserved);
goto fail;
}
msr.index = e.index;
msr.data = e.value;
if (kvm_set_msr(vcpu, &msr)) {
pr_warn_ratelimited(
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
__func__, i, e.index, e.value);
goto fail;
}
}
return 0;
fail:
return i + 1;
}
static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
{
u32 i;
struct vmx_msr_entry e;
for (i = 0; i < count; i++) {
struct msr_data msr_info;
if (kvm_vcpu_read_guest(vcpu,
gpa + i * sizeof(e),
&e, 2 * sizeof(u32))) {
pr_warn_ratelimited(
"%s cannot read MSR entry (%u, 0x%08llx)\n",
__func__, i, gpa + i * sizeof(e));
return -EINVAL;
}
if (nested_vmx_store_msr_check(vcpu, &e)) {
pr_warn_ratelimited(
"%s check failed (%u, 0x%x, 0x%x)\n",
__func__, i, e.index, e.reserved);
return -EINVAL;
}
msr_info.host_initiated = false;
msr_info.index = e.index;
if (kvm_get_msr(vcpu, &msr_info)) {
pr_warn_ratelimited(
"%s cannot read MSR (%u, 0x%x)\n",
__func__, i, e.index);
return -EINVAL;
}
if (kvm_vcpu_write_guest(vcpu,
gpa + i * sizeof(e) +
offsetof(struct vmx_msr_entry, value),
&msr_info.data, sizeof(msr_info.data))) {
pr_warn_ratelimited(
"%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
__func__, i, e.index, msr_info.data);
return -EINVAL;
}
}
return 0;
}
/*
* prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
* L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
* with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
* guest in a way that will both be appropriate to L1's requests, and our
* needs. In addition to modifying the active vmcs (which is vmcs02), this
* function also has additional necessary side-effects, like setting various
* vcpu->arch fields.
*/
static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exec_control;
vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
} else {
kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
}
vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
vmcs12->vm_entry_intr_info_field);
vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
vmcs12->vm_entry_exception_error_code);
vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
vmcs12->vm_entry_instruction_len);
vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
vmcs12->guest_interruptibility_info);
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
vmx_set_rflags(vcpu, vmcs12->guest_rflags);
vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
vmcs12->guest_pending_dbg_exceptions);
vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
if (nested_cpu_has_xsaves(vmcs12))
vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
vmcs_write64(VMCS_LINK_POINTER, -1ull);
exec_control = vmcs12->pin_based_vm_exec_control;
exec_control |= vmcs_config.pin_based_exec_ctrl;
exec_control &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
if (nested_cpu_has_posted_intr(vmcs12)) {
/*
* Note that we use L0's vector here and in
* vmx_deliver_nested_posted_interrupt.
*/
vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
vmx->nested.pi_pending = false;
vmcs_write64(POSTED_INTR_NV, POSTED_INTR_VECTOR);
vmcs_write64(POSTED_INTR_DESC_ADDR,
page_to_phys(vmx->nested.pi_desc_page) +
(unsigned long)(vmcs12->posted_intr_desc_addr &
(PAGE_SIZE - 1)));
} else
exec_control &= ~PIN_BASED_POSTED_INTR;
vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, exec_control);
vmx->nested.preemption_timer_expired = false;
if (nested_cpu_has_preemption_timer(vmcs12))
vmx_start_preemption_timer(vcpu);
/*
* Whether page-faults are trapped is determined by a combination of
* 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF.
* If enable_ept, L0 doesn't care about page faults and we should
* set all of these to L1's desires. However, if !enable_ept, L0 does
* care about (at least some) page faults, and because it is not easy
* (if at all possible?) to merge L0 and L1's desires, we simply ask
* to exit on each and every L2 page fault. This is done by setting
* MASK=MATCH=0 and (see below) EB.PF=1.
* Note that below we don't need special code to set EB.PF beyond the
* "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
* vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
* !enable_ept, EB.PF is 1, so the "or" will always be 1.
*
* A problem with this approach (when !enable_ept) is that L1 may be
* injected with more page faults than it asked for. This could have
* caused problems, but in practice existing hypervisors don't care.
* To fix this, we will need to emulate the PFEC checking (on the L1
* page tables), using walk_addr(), when injecting PFs to L1.
*/
vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK,
enable_ept ? vmcs12->page_fault_error_code_mask : 0);
vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH,
enable_ept ? vmcs12->page_fault_error_code_match : 0);
if (cpu_has_secondary_exec_ctrls()) {
exec_control = vmx_secondary_exec_control(vmx);
/* Take the following fields only from vmcs12 */
exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_RDTSCP |
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
SECONDARY_EXEC_APIC_REGISTER_VIRT |
SECONDARY_EXEC_PCOMMIT);
if (nested_cpu_has(vmcs12,
CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
exec_control |= vmcs12->secondary_vm_exec_control;
if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
/*
* If translation failed, no matter: This feature asks
* to exit when accessing the given address, and if it
* can never be accessed, this feature won't do
* anything anyway.
*/
if (!vmx->nested.apic_access_page)
exec_control &=
~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
else
vmcs_write64(APIC_ACCESS_ADDR,
page_to_phys(vmx->nested.apic_access_page));
} else if (!(nested_cpu_has_virt_x2apic_mode(vmcs12)) &&
cpu_need_virtualize_apic_accesses(&vmx->vcpu)) {
exec_control |=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
kvm_vcpu_reload_apic_access_page(vcpu);
}
if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) {
vmcs_write64(EOI_EXIT_BITMAP0,
vmcs12->eoi_exit_bitmap0);
vmcs_write64(EOI_EXIT_BITMAP1,
vmcs12->eoi_exit_bitmap1);
vmcs_write64(EOI_EXIT_BITMAP2,
vmcs12->eoi_exit_bitmap2);
vmcs_write64(EOI_EXIT_BITMAP3,
vmcs12->eoi_exit_bitmap3);
vmcs_write16(GUEST_INTR_STATUS,
vmcs12->guest_intr_status);
}
vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
}
/*
* Set host-state according to L0's settings (vmcs12 is irrelevant here)
* Some constant fields are set here by vmx_set_constant_host_state().
* Other fields are different per CPU, and will be set later when
* vmx_vcpu_load() is called, and when vmx_save_host_state() is called.
*/
vmx_set_constant_host_state(vmx);
/*
* HOST_RSP is normally set correctly in vmx_vcpu_run() just before
* entry, but only if the current (host) sp changed from the value
* we wrote last (vmx->host_rsp). This cache is no longer relevant
* if we switch vmcs, and rather than hold a separate cache per vmcs,
* here we just force the write to happen on entry.
*/
vmx->host_rsp = 0;
exec_control = vmx_exec_control(vmx); /* L0's desires */
exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
exec_control &= ~CPU_BASED_TPR_SHADOW;
exec_control |= vmcs12->cpu_based_vm_exec_control;
if (exec_control & CPU_BASED_TPR_SHADOW) {
vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
page_to_phys(vmx->nested.virtual_apic_page));
vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
}
if (cpu_has_vmx_msr_bitmap() &&
exec_control & CPU_BASED_USE_MSR_BITMAPS) {
nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
/* MSR_BITMAP will be set by following vmx_set_efer. */
} else
exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
/*
* Merging of IO bitmap not currently supported.
* Rather, exit every time.
*/
exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
exec_control |= CPU_BASED_UNCOND_IO_EXITING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
/* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
* bitwise-or of what L1 wants to trap for L2, and what we want to
* trap. Note that CR0.TS also needs updating - we do this later.
*/
update_exception_bitmap(vcpu);
vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
/* L2->L1 exit controls are emulated - the hardware exit is to L0 so
* we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
* bits are further modified by vmx_set_efer() below.
*/
vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
/* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
* emulated by vmx_set_efer(), below.
*/
vm_entry_controls_init(vmx,
(vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
~VM_ENTRY_IA32E_MODE) |
(vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) {
vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
vcpu->arch.pat = vmcs12->guest_ia32_pat;
} else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
set_cr4_guest_host_mask(vmx);
if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vmcs_write64(TSC_OFFSET,
vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
else
vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
if (enable_vpid) {
/*
* There is no direct mapping between vpid02 and vpid12, the
* vpid02 is per-vCPU for L0 and reused while the value of
* vpid12 is changed w/ one invvpid during nested vmentry.
* The vpid12 is allocated by L1 for L2, so it will not
* influence global bitmap(for vpid01 and vpid02 allocation)
* even if spawn a lot of nested vCPUs.
*/
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) {
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
vmx->nested.last_vpid = vmcs12->virtual_processor_id;
__vmx_flush_tlb(vcpu, to_vmx(vcpu)->nested.vpid02);
}
} else {
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
vmx_flush_tlb(vcpu);
}
}
if (nested_cpu_has_ept(vmcs12)) {
kvm_mmu_unload(vcpu);
nested_ept_init_mmu_context(vcpu);
}
if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
vcpu->arch.efer = vmcs12->guest_ia32_efer;
else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
vcpu->arch.efer |= (EFER_LMA | EFER_LME);
else
vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
/* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
vmx_set_efer(vcpu, vcpu->arch.efer);
/*
* This sets GUEST_CR0 to vmcs12->guest_cr0, with possibly a modified
* TS bit (for lazy fpu) and bits which we consider mandatory enabled.
* The CR0_READ_SHADOW is what L2 should have expected to read given
* the specifications by L1; It's not enough to take
* vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
* have more bits than L1 expected.
*/
vmx_set_cr0(vcpu, vmcs12->guest_cr0);
vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
vmx_set_cr4(vcpu, vmcs12->guest_cr4);
vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
/* shadow page tables on either EPT or shadow page tables */
kvm_set_cr3(vcpu, vmcs12->guest_cr3);
kvm_mmu_reset_context(vcpu);
if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
/*
* L1 may access the L2's PDPTR, so save them to construct vmcs12
*/
if (enable_ept) {
vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
}
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
}
/*
* nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
* for running an L2 nested guest.
*/
static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
{
struct vmcs12 *vmcs12;
struct vcpu_vmx *vmx = to_vmx(vcpu);
int cpu;
struct loaded_vmcs *vmcs02;
bool ia32e;
u32 msr_entry_idx;
if (!nested_vmx_check_permission(vcpu) ||
!nested_vmx_check_vmcs12(vcpu))
return 1;
skip_emulated_instruction(vcpu);
vmcs12 = get_vmcs12(vcpu);
if (enable_shadow_vmcs)
copy_shadow_to_vmcs12(vmx);
/*
* The nested entry process starts with enforcing various prerequisites
* on vmcs12 as required by the Intel SDM, and act appropriately when
* they fail: As the SDM explains, some conditions should cause the
* instruction to fail, while others will cause the instruction to seem
* to succeed, but return an EXIT_REASON_INVALID_STATE.
* To speed up the normal (success) code path, we should avoid checking
* for misconfigurations which will anyway be caught by the processor
* when using the merged vmcs02.
*/
if (vmcs12->launch_state == launch) {
nested_vmx_failValid(vcpu,
launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
: VMXERR_VMRESUME_NONLAUNCHED_VMCS);
return 1;
}
if (vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1;
}
if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1;
}
if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1;
}
if (nested_vmx_check_apicv_controls(vcpu, vmcs12)) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1;
}
if (nested_vmx_check_msr_switch_controls(vcpu, vmcs12)) {
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1;
}
if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
vmx->nested.nested_vmx_true_procbased_ctls_low,
vmx->nested.nested_vmx_procbased_ctls_high) ||
!vmx_control_verify(vmcs12->secondary_vm_exec_control,
vmx->nested.nested_vmx_secondary_ctls_low,
vmx->nested.nested_vmx_secondary_ctls_high) ||
!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
vmx->nested.nested_vmx_pinbased_ctls_low,
vmx->nested.nested_vmx_pinbased_ctls_high) ||
!vmx_control_verify(vmcs12->vm_exit_controls,
vmx->nested.nested_vmx_true_exit_ctls_low,
vmx->nested.nested_vmx_exit_ctls_high) ||
!vmx_control_verify(vmcs12->vm_entry_controls,
vmx->nested.nested_vmx_true_entry_ctls_low,
vmx->nested.nested_vmx_entry_ctls_high))
{
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
return 1;
}
if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) ||
((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
nested_vmx_failValid(vcpu,
VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
return 1;
}
if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) ||
((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
return 1;
}
if (vmcs12->vmcs_link_pointer != -1ull) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_VMCS_LINK_PTR);
return 1;
}
/*
* If the load IA32_EFER VM-entry control is 1, the following checks
* are performed on the field for the IA32_EFER MSR:
* - Bits reserved in the IA32_EFER MSR must be 0.
* - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
* the IA-32e mode guest VM-exit control. It must also be identical
* to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
* CR0.PG) is 1.
*/
if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) {
ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
((vmcs12->guest_cr0 & X86_CR0_PG) &&
ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
return 1;
}
}
/*
* If the load IA32_EFER VM-exit control is 1, bits reserved in the
* IA32_EFER MSR must be 0 in the field for that register. In addition,
* the values of the LMA and LME bits in the field must each be that of
* the host address-space size VM-exit control.
*/
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
ia32e = (vmcs12->vm_exit_controls &
VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
return 1;
}
}
/*
* We're finally done with prerequisite checking, and can start with
* the nested entry.
*/
vmcs02 = nested_get_current_vmcs02(vmx);
if (!vmcs02)
return -ENOMEM;
enter_guest_mode(vcpu);
vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
cpu = get_cpu();
vmx->loaded_vmcs = vmcs02;
vmx_vcpu_put(vcpu);
vmx_vcpu_load(vcpu, cpu);
vcpu->cpu = cpu;
put_cpu();
vmx_segment_cache_clear(vmx);
prepare_vmcs02(vcpu, vmcs12);
msr_entry_idx = nested_vmx_load_msr(vcpu,
vmcs12->vm_entry_msr_load_addr,
vmcs12->vm_entry_msr_load_count);
if (msr_entry_idx) {
leave_guest_mode(vcpu);
vmx_load_vmcs01(vcpu);
nested_vmx_entry_failure(vcpu, vmcs12,
EXIT_REASON_MSR_LOAD_FAIL, msr_entry_idx);
return 1;
}
vmcs12->launch_state = 1;
if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT)
return kvm_vcpu_halt(vcpu);
vmx->nested.nested_run_pending = 1;
/*
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
* we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
* returned as far as L1 is concerned. It will only return (and set
* the success flag) when L2 exits (see nested_vmx_vmexit()).
*/
return 1;
}
/*
* On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
* because L2 may have changed some cr0 bits directly (CRO_GUEST_HOST_MASK).
* This function returns the new value we should put in vmcs12.guest_cr0.
* It's not enough to just return the vmcs02 GUEST_CR0. Rather,
* 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
* available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
* didn't trap the bit, because if L1 did, so would L0).
* 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
* been modified by L2, and L1 knows it. So just leave the old value of
* the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
* isn't relevant, because if L0 traps this bit it can set it to anything.
* 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
* changed these bits, and therefore they need to be updated, but L0
* didn't necessarily allow them to be changed in GUEST_CR0 - and rather
* put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
*/
static inline unsigned long
vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
{
return
/*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
/*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
/*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
vcpu->arch.cr0_guest_owned_bits));
}
static inline unsigned long
vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
{
return
/*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
/*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
/*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
vcpu->arch.cr4_guest_owned_bits));
}
static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
u32 idt_vectoring;
unsigned int nr;
if (vcpu->arch.exception.pending && vcpu->arch.exception.reinject) {
nr = vcpu->arch.exception.nr;
idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
if (kvm_exception_is_soft(nr)) {
vmcs12->vm_exit_instruction_len =
vcpu->arch.event_exit_inst_len;
idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
} else
idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
if (vcpu->arch.exception.has_error_code) {
idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
vmcs12->idt_vectoring_error_code =
vcpu->arch.exception.error_code;
}
vmcs12->idt_vectoring_info_field = idt_vectoring;
} else if (vcpu->arch.nmi_injected) {
vmcs12->idt_vectoring_info_field =
INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
} else if (vcpu->arch.interrupt.pending) {
nr = vcpu->arch.interrupt.nr;
idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
if (vcpu->arch.interrupt.soft) {
idt_vectoring |= INTR_TYPE_SOFT_INTR;
vmcs12->vm_entry_instruction_len =
vcpu->arch.event_exit_inst_len;
} else
idt_vectoring |= INTR_TYPE_EXT_INTR;
vmcs12->idt_vectoring_info_field = idt_vectoring;
}
}
static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
vmx->nested.preemption_timer_expired) {
if (vmx->nested.nested_run_pending)
return -EBUSY;
nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
return 0;
}
if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
if (vmx->nested.nested_run_pending ||
vcpu->arch.interrupt.pending)
return -EBUSY;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
NMI_VECTOR | INTR_TYPE_NMI_INTR |
INTR_INFO_VALID_MASK, 0);
/*
* The NMI-triggered VM exit counts as injection:
* clear this one and block further NMIs.
*/
vcpu->arch.nmi_pending = 0;
vmx_set_nmi_mask(vcpu, true);
return 0;
}
if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
nested_exit_on_intr(vcpu)) {
if (vmx->nested.nested_run_pending)
return -EBUSY;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
return 0;
}
return vmx_complete_nested_posted_interrupt(vcpu);
}
static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
{
ktime_t remaining =
hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
u64 value;
if (ktime_to_ns(remaining) <= 0)
return 0;
value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
do_div(value, 1000000);
return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
}
/*
* prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
* and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
* and this function updates it to reflect the changes to the guest state while
* L2 was running (and perhaps made some exits which were handled directly by L0
* without going back to L1), and to reflect the exit reason.
* Note that we do not have to copy here all VMCS fields, just those that
* could have changed by the L2 guest or the exit - i.e., the guest-state and
* exit-information fields only. Other fields are modified by L1 with VMWRITE,
* which already writes to vmcs12 directly.
*/
static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
u32 exit_reason, u32 exit_intr_info,
unsigned long exit_qualification)
{
/* update guest state fields: */
vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
vmcs12->guest_interruptibility_info =
vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
vmcs12->guest_pending_dbg_exceptions =
vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
else
vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
if (nested_cpu_has_preemption_timer(vmcs12)) {
if (vmcs12->vm_exit_controls &
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER)
vmcs12->vmx_preemption_timer_value =
vmx_get_preemption_timer_value(vcpu);
hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
}
/*
* In some cases (usually, nested EPT), L2 is allowed to change its
* own CR3 without exiting. If it has changed it, we must keep it.
* Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
* by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
*
* Additionally, restore L2's PDPTR to vmcs12.
*/
if (enable_ept) {
vmcs12->guest_cr3 = vmcs_read64(GUEST_CR3);
vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
}
if (nested_cpu_has_vid(vmcs12))
vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
vmcs12->vm_entry_controls =
(vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
(vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
}
/* TODO: These cannot have changed unless we have MSR bitmaps and
* the relevant bit asks not to trap the change */
if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
vmcs12->guest_ia32_efer = vcpu->arch.efer;
vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
if (vmx_mpx_supported())
vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
if (nested_cpu_has_xsaves(vmcs12))
vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);
/* update exit information fields: */
vmcs12->vm_exit_reason = exit_reason;
vmcs12->exit_qualification = exit_qualification;
vmcs12->vm_exit_intr_info = exit_intr_info;
if ((vmcs12->vm_exit_intr_info &
(INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
(INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK))
vmcs12->vm_exit_intr_error_code =
vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
vmcs12->idt_vectoring_info_field = 0;
vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
/* vm_entry_intr_info_field is cleared on exit. Emulate this
* instead of reading the real value. */
vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
/*
* Transfer the event that L0 or L1 may wanted to inject into
* L2 to IDT_VECTORING_INFO_FIELD.
*/
vmcs12_save_pending_event(vcpu, vmcs12);
}
/*
* Drop what we picked up for L2 via vmx_complete_interrupts. It is
* preserved above and would only end up incorrectly in L1.
*/
vcpu->arch.nmi_injected = false;
kvm_clear_exception_queue(vcpu);
kvm_clear_interrupt_queue(vcpu);
}
/*
* A part of what we need to when the nested L2 guest exits and we want to
* run its L1 parent, is to reset L1's guest state to the host state specified
* in vmcs12.
* This function is to be called not only on normal nested exit, but also on
* a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
* Failures During or After Loading Guest State").
* This function should be called when the active VMCS is L1's (vmcs01).
*/
static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
struct kvm_segment seg;
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
vcpu->arch.efer = vmcs12->host_ia32_efer;
else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
vcpu->arch.efer |= (EFER_LMA | EFER_LME);
else
vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
vmx_set_efer(vcpu, vcpu->arch.efer);
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->host_rsp);
kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->host_rip);
vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
/*
* Note that calling vmx_set_cr0 is important, even if cr0 hasn't
* actually changed, because it depends on the current state of
* fpu_active (which may have changed).
* Note that vmx_set_cr0 refers to efer set above.
*/
vmx_set_cr0(vcpu, vmcs12->host_cr0);
/*
* If we did fpu_activate()/fpu_deactivate() during L2's run, we need
* to apply the same changes to L1's vmcs. We just set cr0 correctly,
* but we also need to update cr0_guest_host_mask and exception_bitmap.
*/
update_exception_bitmap(vcpu);
vcpu->arch.cr0_guest_owned_bits = (vcpu->fpu_active ? X86_CR0_TS : 0);
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
/*
* Note that CR4_GUEST_HOST_MASK is already set in the original vmcs01
* (KVM doesn't change it)- no reason to call set_cr4_guest_host_mask();
*/
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
kvm_set_cr4(vcpu, vmcs12->host_cr4);
nested_ept_uninit_mmu_context(vcpu);
kvm_set_cr3(vcpu, vmcs12->host_cr3);
kvm_mmu_reset_context(vcpu);
if (!enable_ept)
vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
if (enable_vpid) {
/*
* Trivially support vpid by letting L2s share their parent
* L1's vpid. TODO: move to a more elaborate solution, giving
* each L2 its own vpid and exposing the vpid feature to L1.
*/
vmx_flush_tlb(vcpu);
}
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
/* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
vmcs_write64(GUEST_BNDCFGS, 0);
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
vcpu->arch.pat = vmcs12->host_ia32_pat;
}
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
vmcs12->host_ia32_perf_global_ctrl);
/* Set L1 segment info according to Intel SDM
27.5.2 Loading Host Segment and Descriptor-Table Registers */
seg = (struct kvm_segment) {
.base = 0,
.limit = 0xFFFFFFFF,
.selector = vmcs12->host_cs_selector,
.type = 11,
.present = 1,
.s = 1,
.g = 1
};
if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
seg.l = 1;
else
seg.db = 1;
vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
seg = (struct kvm_segment) {
.base = 0,
.limit = 0xFFFFFFFF,
.type = 3,
.present = 1,
.s = 1,
.db = 1,
.g = 1
};
seg.selector = vmcs12->host_ds_selector;
vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
seg.selector = vmcs12->host_es_selector;
vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
seg.selector = vmcs12->host_ss_selector;
vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
seg.selector = vmcs12->host_fs_selector;
seg.base = vmcs12->host_fs_base;
vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
seg.selector = vmcs12->host_gs_selector;
seg.base = vmcs12->host_gs_base;
vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
seg = (struct kvm_segment) {
.base = vmcs12->host_tr_base,
.limit = 0x67,
.selector = vmcs12->host_tr_selector,
.type = 11,
.present = 1
};
vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
kvm_set_dr(vcpu, 7, 0x400);
vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
if (cpu_has_vmx_msr_bitmap())
vmx_set_msr_bitmap(vcpu);
if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
vmcs12->vm_exit_msr_load_count))
nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
}
/*
* Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
* and modify vmcs12 to make it see what it would expect to see there if
* L2 was its real guest. Must only be called when in L2 (is_guest_mode())
*/
static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
u32 exit_intr_info,
unsigned long exit_qualification)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
/* trying to cancel vmlaunch/vmresume is a bug */
WARN_ON_ONCE(vmx->nested.nested_run_pending);
leave_guest_mode(vcpu);
prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
exit_qualification);
if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr,
vmcs12->vm_exit_msr_store_count))
nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
vmx_load_vmcs01(vcpu);
if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
&& nested_exit_intr_ack_set(vcpu)) {
int irq = kvm_cpu_get_interrupt(vcpu);
WARN_ON(irq < 0);
vmcs12->vm_exit_intr_info = irq |
INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
}
trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
vmcs12->exit_qualification,
vmcs12->idt_vectoring_info_field,
vmcs12->vm_exit_intr_info,
vmcs12->vm_exit_intr_error_code,
KVM_ISA_VMX);
vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
vmx_segment_cache_clear(vmx);
/* if no vmcs02 cache requested, remove the one we used */
if (VMCS02_POOL_SIZE == 0)
nested_free_vmcs02(vmx, vmx->nested.current_vmptr);
load_vmcs12_host_state(vcpu, vmcs12);
/* Update TSC_OFFSET if TSC was changed while L2 ran */
vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
/* This is needed for same reason as it was needed in prepare_vmcs02 */
vmx->host_rsp = 0;
/* Unpin physical memory we referred to in vmcs02 */
if (vmx->nested.apic_access_page) {
nested_release_page(vmx->nested.apic_access_page);
vmx->nested.apic_access_page = NULL;
}
if (vmx->nested.virtual_apic_page) {
nested_release_page(vmx->nested.virtual_apic_page);
vmx->nested.virtual_apic_page = NULL;
}
if (vmx->nested.pi_desc_page) {
kunmap(vmx->nested.pi_desc_page);
nested_release_page(vmx->nested.pi_desc_page);
vmx->nested.pi_desc_page = NULL;
vmx->nested.pi_desc = NULL;
}
/*
* We are now running in L2, mmu_notifier will force to reload the
* page's hpa for L2 vmcs. Need to reload it for L1 before entering L1.
*/
kvm_vcpu_reload_apic_access_page(vcpu);
/*
* Exiting from L2 to L1, we're now back to L1 which thinks it just
* finished a VMLAUNCH or VMRESUME instruction, so we need to set the
* success or failure flag accordingly.
*/
if (unlikely(vmx->fail)) {
vmx->fail = 0;
nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
} else
nested_vmx_succeed(vcpu);
if (enable_shadow_vmcs)
vmx->nested.sync_shadow_vmcs = true;
/* in case we halted in L2 */
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
}
/*
* Forcibly leave nested mode in order to be able to reset the VCPU later on.
*/
static void vmx_leave_nested(struct kvm_vcpu *vcpu)
{
if (is_guest_mode(vcpu))
nested_vmx_vmexit(vcpu, -1, 0, 0);
free_nested(to_vmx(vcpu));
}
/*
* L1's failure to enter L2 is a subset of a normal exit, as explained in
* 23.7 "VM-entry failures during or after loading guest state" (this also
* lists the acceptable exit-reason and exit-qualification parameters).
* It should only be called before L2 actually succeeded to run, and when
* vmcs01 is current (it doesn't leave_guest_mode() or switch vmcss).
*/
static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12,
u32 reason, unsigned long qualification)
{
load_vmcs12_host_state(vcpu, vmcs12);
vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
vmcs12->exit_qualification = qualification;
nested_vmx_succeed(vcpu);
if (enable_shadow_vmcs)
to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
}
static int vmx_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
return X86EMUL_CONTINUE;
}
static void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
if (ple_gap)
shrink_ple_window(vcpu);
}
static void vmx_slot_enable_log_dirty(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
kvm_mmu_slot_leaf_clear_dirty(kvm, slot);
kvm_mmu_slot_largepage_remove_write_access(kvm, slot);
}
static void vmx_slot_disable_log_dirty(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
kvm_mmu_slot_set_dirty(kvm, slot);
}
static void vmx_flush_log_dirty(struct kvm *kvm)
{
kvm_flush_pml_buffers(kvm);
}
static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *memslot,
gfn_t offset, unsigned long mask)
{
kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
}
/*
* This routine does the following things for vCPU which is going
* to be blocked if VT-d PI is enabled.
* - Store the vCPU to the wakeup list, so when interrupts happen
* we can find the right vCPU to wake up.
* - Change the Posted-interrupt descriptor as below:
* 'NDST' <-- vcpu->pre_pcpu
* 'NV' <-- POSTED_INTR_WAKEUP_VECTOR
* - If 'ON' is set during this process, which means at least one
* interrupt is posted for this vCPU, we cannot block it, in
* this case, return 1, otherwise, return 0.
*
*/
static int vmx_pre_block(struct kvm_vcpu *vcpu)
{
unsigned long flags;
unsigned int dest;
struct pi_desc old, new;
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
!irq_remapping_cap(IRQ_POSTING_CAP))
return 0;
vcpu->pre_pcpu = vcpu->cpu;
spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
vcpu->pre_pcpu), flags);
list_add_tail(&vcpu->blocked_vcpu_list,
&per_cpu(blocked_vcpu_on_cpu,
vcpu->pre_pcpu));
spin_unlock_irqrestore(&per_cpu(blocked_vcpu_on_cpu_lock,
vcpu->pre_pcpu), flags);
do {
old.control = new.control = pi_desc->control;
/*
* We should not block the vCPU if
* an interrupt is posted for it.
*/
if (pi_test_on(pi_desc) == 1) {
spin_lock_irqsave(&per_cpu(blocked_vcpu_on_cpu_lock,
vcpu->pre_pcpu), flags);
list_del(&vcpu->blocked_vcpu_list);
spin_unlock_irqrestore(
&per_cpu(blocked_vcpu_on_cpu_lock,
vcpu->pre_pcpu), flags);
vcpu->pre_pcpu = -1;
return 1;
}
WARN((pi_desc->sn == 1),
"Warning: SN field of posted-interrupts "
"is set before blocking\n");
/*
* Since vCPU can be preempted during this process,
* vcpu->cpu could be different with pre_pcpu, we
* need to set pre_pcpu as the destination of wakeup
* notification event, then we can find the right vCPU
* to wakeup in wakeup handler if interrupts happen
* when the vCPU is in blocked state.
*/
dest = cpu_physical_id(vcpu->pre_pcpu);
if (x2apic_enabled())
new.ndst = dest;
else
new.ndst = (dest << 8) & 0xFF00;
/* set 'NV' to 'wakeup vector' */
new.nv = POSTED_INTR_WAKEUP_VECTOR;
} while (cmpxchg(&pi_desc->control, old.control,
new.control) != old.control);
return 0;
}
static void vmx_post_block(struct kvm_vcpu *vcpu)
{
struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
struct pi_desc old, new;
unsigned int dest;
unsigned long flags;
if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
!irq_remapping_cap(IRQ_POSTING_CAP))
return;
do {
old.control = new.control = pi_desc->control;
dest = cpu_physical_id(vcpu->cpu);
if (x2apic_enabled())
new.ndst = dest;
else
new.ndst = (dest << 8) & 0xFF00;
/* Allow posting non-urgent interrupts */
new.sn = 0;
/* set 'NV' to 'notification vector' */
new.nv = POSTED_INTR_VECTOR;
} while (cmpxchg(&pi_desc->control, old.control,
new.control) != old.control);
if(vcpu->pre_pcpu != -1) {
spin_lock_irqsave(
&per_cpu(blocked_vcpu_on_cpu_lock,
vcpu->pre_pcpu), flags);
list_del(&vcpu->blocked_vcpu_list);
spin_unlock_irqrestore(
&per_cpu(blocked_vcpu_on_cpu_lock,
vcpu->pre_pcpu), flags);
vcpu->pre_pcpu = -1;
}
}
/*
* vmx_update_pi_irte - set IRTE for Posted-Interrupts
*
* @kvm: kvm
* @host_irq: host irq of the interrupt
* @guest_irq: gsi of the interrupt
* @set: set or unset PI
* returns 0 on success, < 0 on failure
*/
static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set)
{
struct kvm_kernel_irq_routing_entry *e;
struct kvm_irq_routing_table *irq_rt;
struct kvm_lapic_irq irq;
struct kvm_vcpu *vcpu;
struct vcpu_data vcpu_info;
int idx, ret = -EINVAL;
if (!kvm_arch_has_assigned_device(kvm) ||
!irq_remapping_cap(IRQ_POSTING_CAP))
return 0;
idx = srcu_read_lock(&kvm->irq_srcu);
irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
BUG_ON(guest_irq >= irq_rt->nr_rt_entries);
hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
if (e->type != KVM_IRQ_ROUTING_MSI)
continue;
/*
* VT-d PI cannot support posting multicast/broadcast
* interrupts to a vCPU, we still use interrupt remapping
* for these kind of interrupts.
*
* For lowest-priority interrupts, we only support
* those with single CPU as the destination, e.g. user
* configures the interrupts via /proc/irq or uses
* irqbalance to make the interrupts single-CPU.
*
* We will support full lowest-priority interrupt later.
*/
kvm_set_msi_irq(e, &irq);
if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu))
continue;
vcpu_info.pi_desc_addr = __pa(vcpu_to_pi_desc(vcpu));
vcpu_info.vector = irq.vector;
trace_kvm_pi_irte_update(vcpu->vcpu_id, e->gsi,
vcpu_info.vector, vcpu_info.pi_desc_addr, set);
if (set)
ret = irq_set_vcpu_affinity(host_irq, &vcpu_info);
else {
/* suppress notification event before unposting */
pi_set_sn(vcpu_to_pi_desc(vcpu));
ret = irq_set_vcpu_affinity(host_irq, NULL);
pi_clear_sn(vcpu_to_pi_desc(vcpu));
}
if (ret < 0) {
printk(KERN_INFO "%s: failed to update PI IRTE\n",
__func__);
goto out;
}
}
ret = 0;
out:
srcu_read_unlock(&kvm->irq_srcu, idx);
return ret;
}
static struct kvm_x86_ops vmx_x86_ops = {
.cpu_has_kvm_support = cpu_has_kvm_support,
.disabled_by_bios = vmx_disabled_by_bios,
.hardware_setup = hardware_setup,
.hardware_unsetup = hardware_unsetup,
.check_processor_compatibility = vmx_check_processor_compat,
.hardware_enable = hardware_enable,
.hardware_disable = hardware_disable,
.cpu_has_accelerated_tpr = report_flexpriority,
.cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase,
.vcpu_create = vmx_create_vcpu,
.vcpu_free = vmx_free_vcpu,
.vcpu_reset = vmx_vcpu_reset,
.prepare_guest_switch = vmx_save_host_state,
.vcpu_load = vmx_vcpu_load,
.vcpu_put = vmx_vcpu_put,
.update_db_bp_intercept = update_exception_bitmap,
.get_msr = vmx_get_msr,
.set_msr = vmx_set_msr,
.get_segment_base = vmx_get_segment_base,
.get_segment = vmx_get_segment,
.set_segment = vmx_set_segment,
.get_cpl = vmx_get_cpl,
.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
.decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
.decache_cr3 = vmx_decache_cr3,
.decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
.set_cr0 = vmx_set_cr0,
.set_cr3 = vmx_set_cr3,
.set_cr4 = vmx_set_cr4,
.set_efer = vmx_set_efer,
.get_idt = vmx_get_idt,
.set_idt = vmx_set_idt,
.get_gdt = vmx_get_gdt,
.set_gdt = vmx_set_gdt,
.get_dr6 = vmx_get_dr6,
.set_dr6 = vmx_set_dr6,
.set_dr7 = vmx_set_dr7,
.sync_dirty_debug_regs = vmx_sync_dirty_debug_regs,
.cache_reg = vmx_cache_reg,
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
.fpu_activate = vmx_fpu_activate,
.fpu_deactivate = vmx_fpu_deactivate,
.tlb_flush = vmx_flush_tlb,
.run = vmx_vcpu_run,
.handle_exit = vmx_handle_exit,
.skip_emulated_instruction = skip_emulated_instruction,
.set_interrupt_shadow = vmx_set_interrupt_shadow,
.get_interrupt_shadow = vmx_get_interrupt_shadow,
.patch_hypercall = vmx_patch_hypercall,
.set_irq = vmx_inject_irq,
.set_nmi = vmx_inject_nmi,
.queue_exception = vmx_queue_exception,
.cancel_injection = vmx_cancel_injection,
.interrupt_allowed = vmx_interrupt_allowed,
.nmi_allowed = vmx_nmi_allowed,
.get_nmi_mask = vmx_get_nmi_mask,
.set_nmi_mask = vmx_set_nmi_mask,
.enable_nmi_window = enable_nmi_window,
.enable_irq_window = enable_irq_window,
.update_cr8_intercept = update_cr8_intercept,
.set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
.set_apic_access_page_addr = vmx_set_apic_access_page_addr,
.cpu_uses_apicv = vmx_cpu_uses_apicv,
.load_eoi_exitmap = vmx_load_eoi_exitmap,
.hwapic_irr_update = vmx_hwapic_irr_update,
.hwapic_isr_update = vmx_hwapic_isr_update,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
.set_tss_addr = vmx_set_tss_addr,
.get_tdp_level = get_ept_level,
.get_mt_mask = vmx_get_mt_mask,
.get_exit_info = vmx_get_exit_info,
.get_lpage_level = vmx_get_lpage_level,
.cpuid_update = vmx_cpuid_update,
.rdtscp_supported = vmx_rdtscp_supported,
.invpcid_supported = vmx_invpcid_supported,
.set_supported_cpuid = vmx_set_supported_cpuid,
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
.read_tsc_offset = vmx_read_tsc_offset,
.write_tsc_offset = vmx_write_tsc_offset,
.adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest,
.read_l1_tsc = vmx_read_l1_tsc,
.set_tdp_cr3 = vmx_set_cr3,
.check_intercept = vmx_check_intercept,
.handle_external_intr = vmx_handle_external_intr,
.mpx_supported = vmx_mpx_supported,
.xsaves_supported = vmx_xsaves_supported,
.check_nested_events = vmx_check_nested_events,
.sched_in = vmx_sched_in,
.slot_enable_log_dirty = vmx_slot_enable_log_dirty,
.slot_disable_log_dirty = vmx_slot_disable_log_dirty,
.flush_log_dirty = vmx_flush_log_dirty,
.enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
.pre_block = vmx_pre_block,
.post_block = vmx_post_block,
.pmu_ops = &intel_pmu_ops,
.update_pi_irte = vmx_update_pi_irte,
};
static int __init vmx_init(void)
{
int r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
__alignof__(struct vcpu_vmx), THIS_MODULE);
if (r)
return r;
#ifdef CONFIG_KEXEC_CORE
rcu_assign_pointer(crash_vmclear_loaded_vmcss,
crash_vmclear_local_loaded_vmcss);
#endif
return 0;
}
static void __exit vmx_exit(void)
{
#ifdef CONFIG_KEXEC_CORE
RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
synchronize_rcu();
#endif
kvm_exit();
}
module_init(vmx_init)
module_exit(vmx_exit)
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_1675_2 |
crossvul-cpp_data_good_3471_3 | /*
* linux/net/sunrpc/sched.c
*
* Scheduling for synchronous and asynchronous RPC requests.
*
* Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
*
* TCP NFS related read + write fixes
* (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
*/
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/mempool.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/sunrpc/clnt.h>
#include "sunrpc.h"
#ifdef RPC_DEBUG
#define RPCDBG_FACILITY RPCDBG_SCHED
#endif
/*
* RPC slabs and memory pools
*/
#define RPC_BUFFER_MAXSIZE (2048)
#define RPC_BUFFER_POOLSIZE (8)
#define RPC_TASK_POOLSIZE (8)
static struct kmem_cache *rpc_task_slabp __read_mostly;
static struct kmem_cache *rpc_buffer_slabp __read_mostly;
static mempool_t *rpc_task_mempool __read_mostly;
static mempool_t *rpc_buffer_mempool __read_mostly;
static void rpc_async_schedule(struct work_struct *);
static void rpc_release_task(struct rpc_task *task);
static void __rpc_queue_timer_fn(unsigned long ptr);
/*
* RPC tasks sit here while waiting for conditions to improve.
*/
static struct rpc_wait_queue delay_queue;
/*
* rpciod-related stuff
*/
struct workqueue_struct *rpciod_workqueue;
/*
* Disable the timer for a given RPC task. Should be called with
* queue->lock and bh_disabled in order to avoid races within
* rpc_run_timer().
*/
static void
__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
{
if (task->tk_timeout == 0)
return;
dprintk("RPC: %5u disabling timer\n", task->tk_pid);
task->tk_timeout = 0;
list_del(&task->u.tk_wait.timer_list);
if (list_empty(&queue->timer_list.list))
del_timer(&queue->timer_list.timer);
}
static void
rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
{
queue->timer_list.expires = expires;
mod_timer(&queue->timer_list.timer, expires);
}
/*
* Set up a timer for the current task.
*/
static void
__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
{
if (!task->tk_timeout)
return;
dprintk("RPC: %5u setting alarm for %lu ms\n",
task->tk_pid, task->tk_timeout * 1000 / HZ);
task->u.tk_wait.expires = jiffies + task->tk_timeout;
if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
rpc_set_queue_timer(queue, task->u.tk_wait.expires);
list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
}
/*
* Add new request to a priority queue.
*/
static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct rpc_task *task)
{
struct list_head *q;
struct rpc_task *t;
INIT_LIST_HEAD(&task->u.tk_wait.links);
q = &queue->tasks[task->tk_priority];
if (unlikely(task->tk_priority > queue->maxpriority))
q = &queue->tasks[queue->maxpriority];
list_for_each_entry(t, q, u.tk_wait.list) {
if (t->tk_owner == task->tk_owner) {
list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links);
return;
}
}
list_add_tail(&task->u.tk_wait.list, q);
}
/*
* Add new request to wait queue.
*
* Swapper tasks always get inserted at the head of the queue.
* This should avoid many nasty memory deadlocks and hopefully
* improve overall performance.
* Everyone else gets appended to the queue to ensure proper FIFO behavior.
*/
static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
{
BUG_ON (RPC_IS_QUEUED(task));
if (RPC_IS_PRIORITY(queue))
__rpc_add_wait_queue_priority(queue, task);
else if (RPC_IS_SWAPPER(task))
list_add(&task->u.tk_wait.list, &queue->tasks[0]);
else
list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
task->tk_waitqueue = queue;
queue->qlen++;
rpc_set_queued(task);
dprintk("RPC: %5u added to queue %p \"%s\"\n",
task->tk_pid, queue, rpc_qname(queue));
}
/*
* Remove request from a priority queue.
*/
static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
{
struct rpc_task *t;
if (!list_empty(&task->u.tk_wait.links)) {
t = list_entry(task->u.tk_wait.links.next, struct rpc_task, u.tk_wait.list);
list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
}
}
/*
* Remove request from queue.
* Note: must be called with spin lock held.
*/
static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
{
__rpc_disable_timer(queue, task);
if (RPC_IS_PRIORITY(queue))
__rpc_remove_wait_queue_priority(task);
list_del(&task->u.tk_wait.list);
queue->qlen--;
dprintk("RPC: %5u removed from queue %p \"%s\"\n",
task->tk_pid, queue, rpc_qname(queue));
}
static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
{
queue->priority = priority;
queue->count = 1 << (priority * 2);
}
static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid)
{
queue->owner = pid;
queue->nr = RPC_BATCH_COUNT;
}
static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
{
rpc_set_waitqueue_priority(queue, queue->maxpriority);
rpc_set_waitqueue_owner(queue, 0);
}
static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
{
int i;
spin_lock_init(&queue->lock);
for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
INIT_LIST_HEAD(&queue->tasks[i]);
queue->maxpriority = nr_queues - 1;
rpc_reset_waitqueue_priority(queue);
queue->qlen = 0;
setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
INIT_LIST_HEAD(&queue->timer_list.list);
#ifdef RPC_DEBUG
queue->name = qname;
#endif
}
void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
{
__rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
}
EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
{
__rpc_init_priority_wait_queue(queue, qname, 1);
}
EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
{
del_timer_sync(&queue->timer_list.timer);
}
EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
static int rpc_wait_bit_killable(void *word)
{
if (fatal_signal_pending(current))
return -ERESTARTSYS;
schedule();
return 0;
}
#ifdef RPC_DEBUG
static void rpc_task_set_debuginfo(struct rpc_task *task)
{
static atomic_t rpc_pid;
task->tk_pid = atomic_inc_return(&rpc_pid);
}
#else
static inline void rpc_task_set_debuginfo(struct rpc_task *task)
{
}
#endif
static void rpc_set_active(struct rpc_task *task)
{
rpc_task_set_debuginfo(task);
set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
}
/*
* Mark an RPC call as having completed by clearing the 'active' bit
* and then waking up all tasks that were sleeping.
*/
static int rpc_complete_task(struct rpc_task *task)
{
void *m = &task->tk_runstate;
wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
unsigned long flags;
int ret;
spin_lock_irqsave(&wq->lock, flags);
clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
ret = atomic_dec_and_test(&task->tk_count);
if (waitqueue_active(wq))
__wake_up_locked_key(wq, TASK_NORMAL, &k);
spin_unlock_irqrestore(&wq->lock, flags);
return ret;
}
/*
* Allow callers to wait for completion of an RPC call
*
* Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
* to enforce taking of the wq->lock and hence avoid races with
* rpc_complete_task().
*/
int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
{
if (action == NULL)
action = rpc_wait_bit_killable;
return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
action, TASK_KILLABLE);
}
EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
/*
* Make an RPC task runnable.
*
* Note: If the task is ASYNC, this must be called with
* the spinlock held to protect the wait queue operation.
*/
static void rpc_make_runnable(struct rpc_task *task)
{
rpc_clear_queued(task);
if (rpc_test_and_set_running(task))
return;
if (RPC_IS_ASYNC(task)) {
INIT_WORK(&task->u.tk_work, rpc_async_schedule);
queue_work(rpciod_workqueue, &task->u.tk_work);
} else
wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
}
/*
* Prepare for sleeping on a wait queue.
* By always appending tasks to the list we ensure FIFO behavior.
* NB: An RPC task will only receive interrupt-driven events as long
* as it's on a wait queue.
*/
static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
rpc_action action)
{
dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
task->tk_pid, rpc_qname(q), jiffies);
__rpc_add_wait_queue(q, task);
BUG_ON(task->tk_callback != NULL);
task->tk_callback = action;
__rpc_add_timer(q, task);
}
void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
rpc_action action)
{
/* We shouldn't ever put an inactive task to sleep */
BUG_ON(!RPC_IS_ACTIVATED(task));
/*
* Protect the queue operations.
*/
spin_lock_bh(&q->lock);
__rpc_sleep_on(q, task, action);
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(rpc_sleep_on);
/**
* __rpc_do_wake_up_task - wake up a single rpc_task
* @queue: wait queue
* @task: task to be woken up
*
* Caller must hold queue->lock, and have cleared the task queued flag.
*/
static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
{
dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
task->tk_pid, jiffies);
/* Has the task been executed yet? If not, we cannot wake it up! */
if (!RPC_IS_ACTIVATED(task)) {
printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
return;
}
__rpc_remove_wait_queue(queue, task);
rpc_make_runnable(task);
dprintk("RPC: __rpc_wake_up_task done\n");
}
/*
* Wake up a queued task while the queue lock is being held
*/
static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
{
if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
__rpc_do_wake_up_task(queue, task);
}
/*
* Tests whether rpc queue is empty
*/
int rpc_queue_empty(struct rpc_wait_queue *queue)
{
int res;
spin_lock_bh(&queue->lock);
res = queue->qlen;
spin_unlock_bh(&queue->lock);
return res == 0;
}
EXPORT_SYMBOL_GPL(rpc_queue_empty);
/*
* Wake up a task on a specific queue
*/
void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
{
spin_lock_bh(&queue->lock);
rpc_wake_up_task_queue_locked(queue, task);
spin_unlock_bh(&queue->lock);
}
EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
/*
* Wake up the next task on a priority queue.
*/
static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queue)
{
struct list_head *q;
struct rpc_task *task;
/*
* Service a batch of tasks from a single owner.
*/
q = &queue->tasks[queue->priority];
if (!list_empty(q)) {
task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
if (queue->owner == task->tk_owner) {
if (--queue->nr)
goto out;
list_move_tail(&task->u.tk_wait.list, q);
}
/*
* Check if we need to switch queues.
*/
if (--queue->count)
goto new_owner;
}
/*
* Service the next queue.
*/
do {
if (q == &queue->tasks[0])
q = &queue->tasks[queue->maxpriority];
else
q = q - 1;
if (!list_empty(q)) {
task = list_entry(q->next, struct rpc_task, u.tk_wait.list);
goto new_queue;
}
} while (q != &queue->tasks[queue->priority]);
rpc_reset_waitqueue_priority(queue);
return NULL;
new_queue:
rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
new_owner:
rpc_set_waitqueue_owner(queue, task->tk_owner);
out:
rpc_wake_up_task_queue_locked(queue, task);
return task;
}
/*
* Wake up the next task on the wait queue.
*/
struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
{
struct rpc_task *task = NULL;
dprintk("RPC: wake_up_next(%p \"%s\")\n",
queue, rpc_qname(queue));
spin_lock_bh(&queue->lock);
if (RPC_IS_PRIORITY(queue))
task = __rpc_wake_up_next_priority(queue);
else {
task_for_first(task, &queue->tasks[0])
rpc_wake_up_task_queue_locked(queue, task);
}
spin_unlock_bh(&queue->lock);
return task;
}
EXPORT_SYMBOL_GPL(rpc_wake_up_next);
/**
* rpc_wake_up - wake up all rpc_tasks
* @queue: rpc_wait_queue on which the tasks are sleeping
*
* Grabs queue->lock
*/
void rpc_wake_up(struct rpc_wait_queue *queue)
{
struct rpc_task *task, *next;
struct list_head *head;
spin_lock_bh(&queue->lock);
head = &queue->tasks[queue->maxpriority];
for (;;) {
list_for_each_entry_safe(task, next, head, u.tk_wait.list)
rpc_wake_up_task_queue_locked(queue, task);
if (head == &queue->tasks[0])
break;
head--;
}
spin_unlock_bh(&queue->lock);
}
EXPORT_SYMBOL_GPL(rpc_wake_up);
/**
* rpc_wake_up_status - wake up all rpc_tasks and set their status value.
* @queue: rpc_wait_queue on which the tasks are sleeping
* @status: status value to set
*
* Grabs queue->lock
*/
void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
{
struct rpc_task *task, *next;
struct list_head *head;
spin_lock_bh(&queue->lock);
head = &queue->tasks[queue->maxpriority];
for (;;) {
list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
task->tk_status = status;
rpc_wake_up_task_queue_locked(queue, task);
}
if (head == &queue->tasks[0])
break;
head--;
}
spin_unlock_bh(&queue->lock);
}
EXPORT_SYMBOL_GPL(rpc_wake_up_status);
static void __rpc_queue_timer_fn(unsigned long ptr)
{
struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
struct rpc_task *task, *n;
unsigned long expires, now, timeo;
spin_lock(&queue->lock);
expires = now = jiffies;
list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
timeo = task->u.tk_wait.expires;
if (time_after_eq(now, timeo)) {
dprintk("RPC: %5u timeout\n", task->tk_pid);
task->tk_status = -ETIMEDOUT;
rpc_wake_up_task_queue_locked(queue, task);
continue;
}
if (expires == now || time_after(expires, timeo))
expires = timeo;
}
if (!list_empty(&queue->timer_list.list))
rpc_set_queue_timer(queue, expires);
spin_unlock(&queue->lock);
}
static void __rpc_atrun(struct rpc_task *task)
{
task->tk_status = 0;
}
/*
* Run a task at a later time
*/
void rpc_delay(struct rpc_task *task, unsigned long delay)
{
task->tk_timeout = delay;
rpc_sleep_on(&delay_queue, task, __rpc_atrun);
}
EXPORT_SYMBOL_GPL(rpc_delay);
/*
* Helper to call task->tk_ops->rpc_call_prepare
*/
void rpc_prepare_task(struct rpc_task *task)
{
task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
}
/*
* Helper that calls task->tk_ops->rpc_call_done if it exists
*/
void rpc_exit_task(struct rpc_task *task)
{
task->tk_action = NULL;
if (task->tk_ops->rpc_call_done != NULL) {
task->tk_ops->rpc_call_done(task, task->tk_calldata);
if (task->tk_action != NULL) {
WARN_ON(RPC_ASSASSINATED(task));
/* Always release the RPC slot and buffer memory */
xprt_release(task);
}
}
}
void rpc_exit(struct rpc_task *task, int status)
{
task->tk_status = status;
task->tk_action = rpc_exit_task;
if (RPC_IS_QUEUED(task))
rpc_wake_up_queued_task(task->tk_waitqueue, task);
}
EXPORT_SYMBOL_GPL(rpc_exit);
void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
{
if (ops->rpc_release != NULL)
ops->rpc_release(calldata);
}
/*
* This is the RPC `scheduler' (or rather, the finite state machine).
*/
static void __rpc_execute(struct rpc_task *task)
{
struct rpc_wait_queue *queue;
int task_is_async = RPC_IS_ASYNC(task);
int status = 0;
dprintk("RPC: %5u __rpc_execute flags=0x%x\n",
task->tk_pid, task->tk_flags);
BUG_ON(RPC_IS_QUEUED(task));
for (;;) {
/*
* Execute any pending callback.
*/
if (task->tk_callback) {
void (*save_callback)(struct rpc_task *);
/*
* We set tk_callback to NULL before calling it,
* in case it sets the tk_callback field itself:
*/
save_callback = task->tk_callback;
task->tk_callback = NULL;
save_callback(task);
} else {
/*
* Perform the next FSM step.
* tk_action may be NULL when the task has been killed
* by someone else.
*/
if (task->tk_action == NULL)
break;
task->tk_action(task);
}
/*
* Lockless check for whether task is sleeping or not.
*/
if (!RPC_IS_QUEUED(task))
continue;
/*
* The queue->lock protects against races with
* rpc_make_runnable().
*
* Note that once we clear RPC_TASK_RUNNING on an asynchronous
* rpc_task, rpc_make_runnable() can assign it to a
* different workqueue. We therefore cannot assume that the
* rpc_task pointer may still be dereferenced.
*/
queue = task->tk_waitqueue;
spin_lock_bh(&queue->lock);
if (!RPC_IS_QUEUED(task)) {
spin_unlock_bh(&queue->lock);
continue;
}
rpc_clear_running(task);
spin_unlock_bh(&queue->lock);
if (task_is_async)
return;
/* sync task: sleep here */
dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid);
status = out_of_line_wait_on_bit(&task->tk_runstate,
RPC_TASK_QUEUED, rpc_wait_bit_killable,
TASK_KILLABLE);
if (status == -ERESTARTSYS) {
/*
* When a sync task receives a signal, it exits with
* -ERESTARTSYS. In order to catch any callbacks that
* clean up after sleeping on some queue, we don't
* break the loop here, but go around once more.
*/
dprintk("RPC: %5u got signal\n", task->tk_pid);
task->tk_flags |= RPC_TASK_KILLED;
rpc_exit(task, -ERESTARTSYS);
}
rpc_set_running(task);
dprintk("RPC: %5u sync task resuming\n", task->tk_pid);
}
dprintk("RPC: %5u return %d, status %d\n", task->tk_pid, status,
task->tk_status);
/* Release all resources associated with the task */
rpc_release_task(task);
}
/*
* User-visible entry point to the scheduler.
*
* This may be called recursively if e.g. an async NFS task updates
* the attributes and finds that dirty pages must be flushed.
* NOTE: Upon exit of this function the task is guaranteed to be
* released. In particular note that tk_release() will have
* been called, so your task memory may have been freed.
*/
void rpc_execute(struct rpc_task *task)
{
rpc_set_active(task);
rpc_make_runnable(task);
if (!RPC_IS_ASYNC(task))
__rpc_execute(task);
}
static void rpc_async_schedule(struct work_struct *work)
{
__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
}
/**
* rpc_malloc - allocate an RPC buffer
* @task: RPC task that will use this buffer
* @size: requested byte size
*
* To prevent rpciod from hanging, this allocator never sleeps,
* returning NULL if the request cannot be serviced immediately.
* The caller can arrange to sleep in a way that is safe for rpciod.
*
* Most requests are 'small' (under 2KiB) and can be serviced from a
* mempool, ensuring that NFS reads and writes can always proceed,
* and that there is good locality of reference for these buffers.
*
* In order to avoid memory starvation triggering more writebacks of
* NFS requests, we avoid using GFP_KERNEL.
*/
void *rpc_malloc(struct rpc_task *task, size_t size)
{
struct rpc_buffer *buf;
gfp_t gfp = RPC_IS_SWAPPER(task) ? GFP_ATOMIC : GFP_NOWAIT;
size += sizeof(struct rpc_buffer);
if (size <= RPC_BUFFER_MAXSIZE)
buf = mempool_alloc(rpc_buffer_mempool, gfp);
else
buf = kmalloc(size, gfp);
if (!buf)
return NULL;
buf->len = size;
dprintk("RPC: %5u allocated buffer of size %zu at %p\n",
task->tk_pid, size, buf);
return &buf->data;
}
EXPORT_SYMBOL_GPL(rpc_malloc);
/**
* rpc_free - free buffer allocated via rpc_malloc
* @buffer: buffer to free
*
*/
void rpc_free(void *buffer)
{
size_t size;
struct rpc_buffer *buf;
if (!buffer)
return;
buf = container_of(buffer, struct rpc_buffer, data);
size = buf->len;
dprintk("RPC: freeing buffer of size %zu at %p\n",
size, buf);
if (size <= RPC_BUFFER_MAXSIZE)
mempool_free(buf, rpc_buffer_mempool);
else
kfree(buf);
}
EXPORT_SYMBOL_GPL(rpc_free);
/*
* Creation and deletion of RPC task structures
*/
static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
{
memset(task, 0, sizeof(*task));
atomic_set(&task->tk_count, 1);
task->tk_flags = task_setup_data->flags;
task->tk_ops = task_setup_data->callback_ops;
task->tk_calldata = task_setup_data->callback_data;
INIT_LIST_HEAD(&task->tk_task);
/* Initialize retry counters */
task->tk_garb_retry = 2;
task->tk_cred_retry = 2;
task->tk_rebind_retry = 2;
task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
task->tk_owner = current->tgid;
/* Initialize workqueue for async tasks */
task->tk_workqueue = task_setup_data->workqueue;
if (task->tk_ops->rpc_call_prepare != NULL)
task->tk_action = rpc_prepare_task;
/* starting timestamp */
task->tk_start = ktime_get();
dprintk("RPC: new task initialized, procpid %u\n",
task_pid_nr(current));
}
static struct rpc_task *
rpc_alloc_task(void)
{
return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
}
/*
* Create a new task for the specified client.
*/
struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
{
struct rpc_task *task = setup_data->task;
unsigned short flags = 0;
if (task == NULL) {
task = rpc_alloc_task();
if (task == NULL) {
rpc_release_calldata(setup_data->callback_ops,
setup_data->callback_data);
return ERR_PTR(-ENOMEM);
}
flags = RPC_TASK_DYNAMIC;
}
rpc_init_task(task, setup_data);
task->tk_flags |= flags;
dprintk("RPC: allocated task %p\n", task);
return task;
}
static void rpc_free_task(struct rpc_task *task)
{
const struct rpc_call_ops *tk_ops = task->tk_ops;
void *calldata = task->tk_calldata;
if (task->tk_flags & RPC_TASK_DYNAMIC) {
dprintk("RPC: %5u freeing task\n", task->tk_pid);
mempool_free(task, rpc_task_mempool);
}
rpc_release_calldata(tk_ops, calldata);
}
static void rpc_async_release(struct work_struct *work)
{
rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
}
static void rpc_release_resources_task(struct rpc_task *task)
{
if (task->tk_rqstp)
xprt_release(task);
if (task->tk_msg.rpc_cred) {
put_rpccred(task->tk_msg.rpc_cred);
task->tk_msg.rpc_cred = NULL;
}
rpc_task_release_client(task);
}
static void rpc_final_put_task(struct rpc_task *task,
struct workqueue_struct *q)
{
if (q != NULL) {
INIT_WORK(&task->u.tk_work, rpc_async_release);
queue_work(q, &task->u.tk_work);
} else
rpc_free_task(task);
}
static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
{
if (atomic_dec_and_test(&task->tk_count)) {
rpc_release_resources_task(task);
rpc_final_put_task(task, q);
}
}
void rpc_put_task(struct rpc_task *task)
{
rpc_do_put_task(task, NULL);
}
EXPORT_SYMBOL_GPL(rpc_put_task);
void rpc_put_task_async(struct rpc_task *task)
{
rpc_do_put_task(task, task->tk_workqueue);
}
EXPORT_SYMBOL_GPL(rpc_put_task_async);
static void rpc_release_task(struct rpc_task *task)
{
dprintk("RPC: %5u release task\n", task->tk_pid);
BUG_ON (RPC_IS_QUEUED(task));
rpc_release_resources_task(task);
/*
* Note: at this point we have been removed from rpc_clnt->cl_tasks,
* so it should be safe to use task->tk_count as a test for whether
* or not any other processes still hold references to our rpc_task.
*/
if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
/* Wake up anyone who may be waiting for task completion */
if (!rpc_complete_task(task))
return;
} else {
if (!atomic_dec_and_test(&task->tk_count))
return;
}
rpc_final_put_task(task, task->tk_workqueue);
}
int rpciod_up(void)
{
return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
}
void rpciod_down(void)
{
module_put(THIS_MODULE);
}
/*
* Start up the rpciod workqueue.
*/
static int rpciod_start(void)
{
struct workqueue_struct *wq;
/*
* Create the rpciod thread and wait for it to start.
*/
dprintk("RPC: creating workqueue rpciod\n");
wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM, 0);
rpciod_workqueue = wq;
return rpciod_workqueue != NULL;
}
static void rpciod_stop(void)
{
struct workqueue_struct *wq = NULL;
if (rpciod_workqueue == NULL)
return;
dprintk("RPC: destroying workqueue rpciod\n");
wq = rpciod_workqueue;
rpciod_workqueue = NULL;
destroy_workqueue(wq);
}
void
rpc_destroy_mempool(void)
{
rpciod_stop();
if (rpc_buffer_mempool)
mempool_destroy(rpc_buffer_mempool);
if (rpc_task_mempool)
mempool_destroy(rpc_task_mempool);
if (rpc_task_slabp)
kmem_cache_destroy(rpc_task_slabp);
if (rpc_buffer_slabp)
kmem_cache_destroy(rpc_buffer_slabp);
rpc_destroy_wait_queue(&delay_queue);
}
int
rpc_init_mempool(void)
{
/*
* The following is not strictly a mempool initialisation,
* but there is no harm in doing it here
*/
rpc_init_wait_queue(&delay_queue, "delayq");
if (!rpciod_start())
goto err_nomem;
rpc_task_slabp = kmem_cache_create("rpc_tasks",
sizeof(struct rpc_task),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!rpc_task_slabp)
goto err_nomem;
rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
RPC_BUFFER_MAXSIZE,
0, SLAB_HWCACHE_ALIGN,
NULL);
if (!rpc_buffer_slabp)
goto err_nomem;
rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
rpc_task_slabp);
if (!rpc_task_mempool)
goto err_nomem;
rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
rpc_buffer_slabp);
if (!rpc_buffer_mempool)
goto err_nomem;
return 0;
err_nomem:
rpc_destroy_mempool();
return -ENOMEM;
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3471_3 |
crossvul-cpp_data_good_944_1 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% OOO PPPP EEEE RRRR AA TTTTT III OOO N N %
% O O P P E R R A A T I O O NN N %
% O O PPPP EEE RRRR AAAA T I O O N N N %
% O O P E R R A A T I O O N NN %
% OOO P EEEE R RR A A T III OOO N N %
% %
% %
% CLI Magick Option Methods %
% %
% Dragon Computing %
% Anthony Thyssen %
% September 2011 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Apply the given options (settings, and simple, or sequence operations) to
% the given image(s) according to the current "image_info", "draw_info", and
% "quantize_info" settings, stored in a special CLI Image Wand.
%
% The final goal is to allow the execution in a strict one option at a time
% manner that is needed for 'pipelining and file scripting' of options in
% IMv7.
%
% Anthony Thyssen, September 2011
*/
/*
Include declarations.
*/
#include "MagickWand/studio.h"
#include "MagickWand/MagickWand.h"
#include "MagickWand/magick-wand-private.h"
#include "MagickWand/mogrify.h"
#include "MagickWand/operation.h"
#include "MagickWand/wand.h"
#include "MagickWand/wandcli.h"
#include "MagickWand/wandcli-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/image-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer-private.h"
/*
Constant declaration.
*/
static const char
MogrifyAlphaColor[] = "#bdbdbd", /* slightly darker gray */
MogrifyBackgroundColor[] = "#fff", /* white */
MogrifyBorderColor[] = "#dfdfdf"; /* sRGB gray */
/*
Define declarations.
*/
#define USE_WAND_METHODS 1
#define MAX_STACK_DEPTH 32
#define UNDEFINED_COMPRESSION_QUALITY 0UL
/* FUTURE: why is this default so specific? */
#define DEFAULT_DISSIMILARITY_THRESHOLD "0.31830988618379067154"
/* For Debugging Geometry Input */
#define ReportGeometry(flags,info) \
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", \
flags, info.rho, info.sigma, info.xi, info.psi )
/*
** Function to report on the progress of image operations
*/
static MagickBooleanType MonitorProgress(const char *text,
const MagickOffsetType offset,const MagickSizeType extent,
void *wand_unused(client_data))
{
char
message[MagickPathExtent],
tag[MagickPathExtent];
const char
*locale_message;
register char
*p;
magick_unreferenced(client_data);
if ((extent <= 1) || (offset < 0) || (offset >= (MagickOffsetType) extent))
return(MagickTrue);
if ((offset != (MagickOffsetType) (extent-1)) && ((offset % 50) != 0))
return(MagickTrue);
(void) CopyMagickString(tag,text,MagickPathExtent);
p=strrchr(tag,'/');
if (p != (char *) NULL)
*p='\0';
(void) FormatLocaleString(message,MagickPathExtent,"Monitor/%s",tag);
locale_message=GetLocaleMessage(message);
if (locale_message == message)
locale_message=tag;
if (p == (char *) NULL)
(void) FormatLocaleFile(stderr,"%s: %ld of %lu, %02ld%% complete\r",
locale_message,(long) offset,(unsigned long) extent,(long)
(100L*offset/(extent-1)));
else
(void) FormatLocaleFile(stderr,"%s[%s]: %ld of %lu, %02ld%% complete\r",
locale_message,p+1,(long) offset,(unsigned long) extent,(long)
(100L*offset/(extent-1)));
if (offset == (MagickOffsetType) (extent-1))
(void) FormatLocaleFile(stderr,"\n");
(void) fflush(stderr);
return(MagickTrue);
}
/*
** GetImageCache() will read an image into a image cache if not already
** present then return the image that is in the cache under that filename.
*/
static inline Image *GetImageCache(const ImageInfo *image_info,const char *path,
ExceptionInfo *exception)
{
char
key[MagickPathExtent];
ExceptionInfo
*sans_exception;
Image
*image;
ImageInfo
*read_info;
(void) FormatLocaleString(key,MagickPathExtent,"cache:%s",path);
sans_exception=AcquireExceptionInfo();
image=(Image *) GetImageRegistry(ImageRegistryType,key,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (image != (Image *) NULL)
return(image);
read_info=CloneImageInfo(image_info);
if (path != (const char *) NULL)
(void) CopyMagickString(read_info->filename,path,MagickPathExtent);
image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
if (image != (Image *) NULL)
(void) SetImageRegistry(ImageRegistryType,key,image,exception);
return(image);
}
/*
SparseColorOption() parse the complex -sparse-color argument into an
an array of floating point values than call SparseColorImage().
Argument is a complex mix of floating-point pixel coodinates, and color
specifications (or direct floating point numbers). The number of floats
needed to represent a color varies depending on the current channel
setting.
This really should be in MagickCore, so that other API's can make use of it.
*/
static Image *SparseColorOption(const Image *image,
const SparseColorMethod method,const char *arguments,ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p;
double
*sparse_arguments;
Image
*sparse_image;
PixelInfo
color;
MagickBooleanType
error;
register size_t
x;
size_t
number_arguments,
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Limit channels according to image
add up number of values needed per color.
*/
number_colors=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
number_colors++;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
image->alpha_trait != UndefinedPixelTrait)
number_colors++;
/*
Read string, to determine number of arguments needed,
*/
p=arguments;
x=0;
while( *p != '\0' )
{
GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == ',' ) continue;
if ( isalpha((int) token[0]) || token[0] == '#' )
x += number_colors; /* color argument found */
else
x++; /* floating point argument */
}
/* control points and color values */
if ((x % (2+number_colors)) != 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","'%s': %s", "sparse-color",
"Invalid number of Arguments");
return( (Image *) NULL);
}
error=MagickFalse;
number_arguments=x;
/* Allocate and fill in the floating point arguments */
sparse_arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*sparse_arguments));
if (sparse_arguments == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","%s","SparseColorOption");
return( (Image *) NULL);
}
(void) memset(sparse_arguments,0,number_arguments*
sizeof(*sparse_arguments));
p=arguments;
x=0;
while( *p != '\0' && x < number_arguments ) {
/* X coordinate */
token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "'%s': %s", "sparse-color",
"Color found, instead of X-coord");
error=MagickTrue;
break;
}
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
/* Y coordinate */
token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "'%s': %s", "sparse-color",
"Color found, instead of Y-coord");
error=MagickTrue;
break;
}
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
/* color name or function given in string argument */
token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
/* Color string given */
(void) QueryColorCompliance(token,AllCompliance,&color,
exception);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
sparse_arguments[x++] = QuantumScale*color.red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
sparse_arguments[x++] = QuantumScale*color.green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
sparse_arguments[x++] = QuantumScale*color.blue;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
sparse_arguments[x++] = QuantumScale*color.black;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
image->alpha_trait != UndefinedPixelTrait)
sparse_arguments[x++] = QuantumScale*color.alpha;
}
else {
/* Colors given as a set of floating point values - experimental */
/* NB: token contains the first floating point value to use! */
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
image->alpha_trait != UndefinedPixelTrait)
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
}
}
if (error != MagickFalse)
{
sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments);
return((Image *) NULL);
}
if (number_arguments != x)
{
sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","'%s': %s","sparse-color","Argument Parsing Error");
return((Image *) NULL);
}
/* Call the Sparse Color Interpolation function with the parsed arguments */
sparse_image=SparseColorImage(image,method,number_arguments,sparse_arguments,
exception);
sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments);
return( sparse_image );
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C L I S e t t i n g O p t i o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLISettingOptionInfo() applies a single settings option into a CLI wand
% holding the image_info, draw_info, quantize_info structures that will be
% used when processing the images.
%
% These options do no require images to be present in the CLI wand for them
% to be able to be set, in which case they will generally be applied to image
% that are read in later
%
% Options handled by this function are listed in CommandOptions[] of
% "option.c" that is one of "SettingOptionFlags" option flags.
%
% The format of the CLISettingOptionInfo method is:
%
% void CLISettingOptionInfo(MagickCLI *cli_wand,
% const char *option, const char *arg1, const char *arg2)
%
% A description of each parameter follows:
%
% o cli_wand: structure holding settings to be applied
%
% o option: The option string to be set
%
% o arg1, arg2: optional argument strings to the operation
% arg2 is currently only used by "-limit"
%
*/
WandPrivate void CLISettingOptionInfo(MagickCLI *cli_wand,
const char *option,const char *arg1n, const char *arg2n)
{
ssize_t
parse; /* option argument parsing (string to value table lookup) */
const char /* percent escaped versions of the args */
*arg1,
*arg2;
#define _image_info (cli_wand->wand.image_info)
#define _image (cli_wand->wand.images)
#define _exception (cli_wand->wand.exception)
#define _draw_info (cli_wand->draw_info)
#define _quantize_info (cli_wand->quantize_info)
#define IfSetOption (*option=='-')
#define ArgBoolean IfSetOption ? MagickTrue : MagickFalse
#define ArgBooleanNot IfSetOption ? MagickFalse : MagickTrue
#define ArgBooleanString (IfSetOption?"true":"false")
#define ArgOption(def) (IfSetOption?arg1:(const char *)(def))
assert(cli_wand != (MagickCLI *) NULL);
assert(cli_wand->signature == MagickWandSignature);
assert(cli_wand->wand.signature == MagickWandSignature);
if (cli_wand->wand.debug != MagickFalse)
(void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(),
"- Setting Option: %s \"%s\" \"%s\"", option,arg1n,arg2n);
arg1 = arg1n,
arg2 = arg2n;
#if 1
#define _process_flags (cli_wand->process_flags)
#define _option_type ((CommandOptionFlags) cli_wand->command->flags)
/* Interpret Percent Escapes in Arguments - using first image */
if ( (((_process_flags & ProcessInterpretProperities) != 0 )
|| ((_option_type & AlwaysInterpretArgsFlag) != 0)
) && ((_option_type & NeverInterpretArgsFlag) == 0) ) {
/* Interpret Percent escapes in argument 1 */
if (arg1n != (char *) NULL) {
arg1=InterpretImageProperties(_image_info,_image,arg1n,_exception);
if (arg1 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg1=arg1n; /* use the given argument as is */
}
}
if (arg2n != (char *) NULL) {
arg2=InterpretImageProperties(_image_info,_image,arg2n,_exception);
if (arg2 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg2=arg2n; /* use the given argument as is */
}
}
}
#undef _process_flags
#undef _option_type
#endif
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adjoin",option+1) == 0)
{
_image_info->adjoin = ArgBoolean;
break;
}
if (LocaleCompare("affine",option+1) == 0)
{
CLIWandWarnReplaced("-draw 'affine ...'");
if (IfSetOption)
(void) ParseAffineGeometry(arg1,&_draw_info->affine,_exception);
else
GetAffineMatrix(&_draw_info->affine);
break;
}
if (LocaleCompare("antialias",option+1) == 0)
{
_image_info->antialias =
_draw_info->stroke_antialias =
_draw_info->text_antialias = ArgBoolean;
break;
}
if (LocaleCompare("attenuate",option+1) == 0)
{
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption("1.0"));
break;
}
if (LocaleCompare("authenticate",option+1) == 0)
{
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'b':
{
if (LocaleCompare("background",option+1) == 0)
{
/* FUTURE: both _image_info attribute & ImageOption in use!
_image_info only used directly for generating new images.
SyncImageSettings() used to set per-image attribute.
FUTURE: if _image_info->background_color is not set then
we should fall back to per-image background_color
At this time -background will 'wipe out' the per-image
background color!
Better error handling of QueryColorCompliance() needed.
*/
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
(void) QueryColorCompliance(ArgOption(MogrifyBackgroundColor),AllCompliance,
&_image_info->background_color,_exception);
break;
}
if (LocaleCompare("bias",option+1) == 0)
{
/* FUTURE: bias OBSOLETED, replaced by Artifact "convolve:bias"
as it is actually rarely used except in direct convolve operations
Usage outside a direct convolve operation is actally non-sensible!
SyncImageSettings() used to set per-image attribute.
*/
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,"convolve:bias",ArgOption(NULL));
break;
}
if (LocaleCompare("black-point-compensation",option+1) == 0)
{
/* Used as a image chromaticity setting
SyncImageSettings() used to set per-image attribute.
*/
(void) SetImageOption(_image_info,option+1,ArgBooleanString);
break;
}
if (LocaleCompare("blue-primary",option+1) == 0)
{
/* Image chromaticity X,Y NB: Y=X if Y not defined
Used by many coders including PNG
SyncImageSettings() used to set per-image attribute.
*/
arg1=ArgOption("0.0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
/* FUTURE: both _image_info attribute & ImageOption in use!
SyncImageSettings() used to set per-image attribute.
Better error checking of QueryColorCompliance().
*/
if (IfSetOption)
{
(void) SetImageOption(_image_info,option+1,arg1);
(void) QueryColorCompliance(arg1,AllCompliance,
&_image_info->border_color,_exception);
(void) QueryColorCompliance(arg1,AllCompliance,
&_draw_info->border_color,_exception);
break;
}
(void) DeleteImageOption(_image_info,option+1);
(void) QueryColorCompliance(MogrifyBorderColor,AllCompliance,
&_image_info->border_color,_exception);
(void) QueryColorCompliance(MogrifyBorderColor,AllCompliance,
&_draw_info->border_color,_exception);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
CLIWandWarnReplaced("-undercolor");
CLISettingOptionInfo(cli_wand,"-undercolor",arg1, arg2);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'c':
{
if (LocaleCompare("cache",option+1) == 0)
{
MagickSizeType
limit;
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
limit=MagickResourceInfinity;
if (LocaleCompare("unlimited",arg1) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(arg1,100.0);
(void) SetMagickResourceLimit(MemoryResource,limit);
(void) SetMagickResourceLimit(MapResource,2*limit);
break;
}
if (LocaleCompare("caption",option+1) == 0)
{
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
/* Setting used for new images via AquireImage()
But also used as a SimpleImageOperator
Undefined colorspace means don't modify images on
read or as a operation */
parse=ParseCommandOption(MagickColorspaceOptions,MagickFalse,
ArgOption("undefined"));
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedColorspace",option,
arg1);
_image_info->colorspace=(ColorspaceType) parse;
break;
}
if (LocaleCompare("comment",option+1) == 0)
{
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
if (LocaleCompare("compose",option+1) == 0)
{
/* FUTURE: _image_info should be used,
SyncImageSettings() used to set per-image attribute. - REMOVE
This setting should NOT be used to set image 'compose'
"-layer" operators shoud use _image_info if defined otherwise
they should use a per-image compose setting.
*/
parse = ParseCommandOption(MagickComposeOptions,MagickFalse,
ArgOption("undefined"));
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedComposeOperator",
option,arg1);
_image_info->compose=(CompositeOperator) parse;
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
if (LocaleCompare("compress",option+1) == 0)
{
/* FUTURE: What should be used? _image_info or ImageOption ???
The former is more efficent, but Crisy prefers the latter!
SyncImageSettings() used to set per-image attribute.
The coders appears to use _image_info, not Image_Option
however the image attribute (for save) is set from the
ImageOption!
Note that "undefined" is a different setting to "none".
*/
parse = ParseCommandOption(MagickCompressOptions,MagickFalse,
ArgOption("undefined"));
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedImageCompression",
option,arg1);
_image_info->compression=(CompressionType) parse;
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'd':
{
if (LocaleCompare("debug",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
arg1=ArgOption("none");
parse = ParseCommandOption(MagickLogEventOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedEventType",
option,arg1);
(void) SetLogEventMask(arg1);
_image_info->debug=IsEventLogging(); /* extract logging*/
cli_wand->wand.debug=IsEventLogging();
break;
}
if (LocaleCompare("define",option+1) == 0)
{
if (LocaleNCompare(arg1,"registry:",9) == 0)
{
if (IfSetOption)
(void) DefineImageRegistry(StringRegistryType,arg1+9,_exception);
else
(void) DeleteImageRegistry(arg1+9);
break;
}
/* DefineImageOption() equals SetImageOption() but with '=' */
if (IfSetOption)
(void) DefineImageOption(_image_info,arg1);
else if (DeleteImageOption(_image_info,arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"NoSuchOption",option,arg1);
break;
}
if (LocaleCompare("delay",option+1) == 0)
{
/* Only used for new images via AcquireImage()
FUTURE: Option should also be used for "-morph" (color morphing)
*/
arg1=ArgOption("0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("density",option+1) == 0)
{
/* FUTURE: strings used in _image_info attr and _draw_info!
Basically as density can be in a XxY form!
SyncImageSettings() used to set per-image attribute.
*/
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
(void) CloneString(&_image_info->density,ArgOption(NULL));
(void) CloneString(&_draw_info->density,_image_info->density);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
/* This is also a SimpleImageOperator! for 8->16 vaule trunc !!!!
SyncImageSettings() used to set per-image attribute.
*/
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
_image_info->depth=IfSetOption?StringToUnsignedLong(arg1)
:MAGICKCORE_QUANTUM_DEPTH;
break;
}
if (LocaleCompare("direction",option+1) == 0)
{
/* Image Option is only used to set _draw_info */
arg1=ArgOption("undefined");
parse = ParseCommandOption(MagickDirectionOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedDirectionType",
option,arg1);
_draw_info->direction=(DirectionType) parse;
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
(void) CloneString(&_image_info->server_name,ArgOption(NULL));
(void) CloneString(&_draw_info->server_name,_image_info->server_name);
break;
}
if (LocaleCompare("dispose",option+1) == 0)
{
/* only used in setting new images */
arg1=ArgOption("undefined");
parse = ParseCommandOption(MagickDisposeOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedDisposeMethod",
option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption("undefined"));
break;
}
if (LocaleCompare("dissimilarity-threshold",option+1) == 0)
{
/* FUTURE: this is only used by CompareImages() which is used
only by the "compare" CLI program at this time. */
arg1=ArgOption(DEFAULT_DISSIMILARITY_THRESHOLD);
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
/* _image_info attr (on/off), _quantize_info attr (on/off)
but also ImageInfo and _quantize_info method!
FUTURE: merge the duality of the dithering options
*/
_image_info->dither = ArgBoolean;
(void) SetImageOption(_image_info,option+1,ArgOption("none"));
_quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,ArgOption("none"));
if (_quantize_info->dither_method == NoDitherMethod)
_image_info->dither = MagickFalse;
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'e':
{
if (LocaleCompare("encoding",option+1) == 0)
{
(void) CloneString(&_draw_info->encoding,ArgOption("undefined"));
(void) SetImageOption(_image_info,option+1,_draw_info->encoding);
break;
}
if (LocaleCompare("endian",option+1) == 0)
{
/* Both _image_info attr and ImageInfo */
arg1 = ArgOption("undefined");
parse = ParseCommandOption(MagickEndianOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedEndianType",
option,arg1);
/* FUTURE: check alloc/free of endian string! - remove? */
_image_info->endian=(EndianType) (*arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("extract",option+1) == 0)
{
(void) CloneString(&_image_info->extract,ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
(void) CloneString(&_draw_info->family,ArgOption(NULL));
break;
}
if (LocaleCompare("features",option+1) == 0)
{
(void) SetImageOption(_image_info,"identify:features",
ArgBooleanString);
if (IfSetOption)
(void) SetImageArtifact(_image,"verbose","true");
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
/* Set "fill" OR "fill-pattern" in _draw_info
The original fill color is preserved if a fill-pattern is given.
That way it does not effect other operations that directly using
the fill color and, can be retored using "+tile".
*/
MagickBooleanType
status;
ExceptionInfo
*sans;
PixelInfo
color;
arg1 = ArgOption("none"); /* +fill turns it off! */
(void) SetImageOption(_image_info,option+1,arg1);
if (_draw_info->fill_pattern != (Image *) NULL)
_draw_info->fill_pattern=DestroyImage(_draw_info->fill_pattern);
/* is it a color or a image? -- ignore exceptions */
sans=AcquireExceptionInfo();
status=QueryColorCompliance(arg1,AllCompliance,&color,sans);
sans=DestroyExceptionInfo(sans);
if (status == MagickFalse)
_draw_info->fill_pattern=GetImageCache(_image_info,arg1,_exception);
else
_draw_info->fill=color;
break;
}
if (LocaleCompare("filter",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
arg1 = ArgOption("undefined");
parse = ParseCommandOption(MagickFilterOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedImageFilter",
option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
(void) CloneString(&_draw_info->font,ArgOption(NULL));
(void) CloneString(&_image_info->font,_draw_info->font);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
/* FUTURE: why the ping test, you could set ping after this! */
/*
register const char
*q;
for (q=strchr(arg1,'%'); q != (char *) NULL; q=strchr(q+1,'%'))
if (strchr("Agkrz@[#",*(q+1)) != (char *) NULL)
_image_info->ping=MagickFalse;
*/
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
if (LocaleCompare("fuzz",option+1) == 0)
{
/* Option used to set image fuzz! unless blank canvas (from color)
Image attribute used for color compare operations
SyncImageSettings() used to set per-image attribute.
FUTURE: Can't find anything else using _image_info->fuzz directly!
convert structure attribute to 'option' string
*/
arg1=ArgOption("0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
_image_info->fuzz=StringToDoubleInterval(arg1,(double)
QuantumRange+1.0);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'g':
{
if (LocaleCompare("gravity",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
arg1 = ArgOption("none");
parse = ParseCommandOption(MagickGravityOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedGravityType",
option,arg1);
_draw_info->gravity=(GravityType) parse;
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("green-primary",option+1) == 0)
{
/* Image chromaticity X,Y NB: Y=X if Y not defined
SyncImageSettings() used to set per-image attribute.
Used directly by many coders
*/
arg1=ArgOption("0.0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'h':
{
if (LocaleCompare("highlight-color",option+1) == 0)
{
/* FUTURE: this is only used by CompareImages() which is used
only by the "compare" CLI program at this time. */
(void) SetImageOption(_image_info,"compare:highlight-color",
ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'i':
{
if (LocaleCompare("intensity",option+1) == 0)
{
arg1 = ArgOption("undefined");
parse = ParseCommandOption(MagickPixelIntensityOptions,MagickFalse,
arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedIntensityType",
option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("intent",option+1) == 0)
{
/* Only used by coders: MIFF, MPC, BMP, PNG
and for image profile call to AcquireTransformThreadSet()
SyncImageSettings() used to set per-image attribute.
*/
arg1 = ArgOption("undefined");
parse = ParseCommandOption(MagickIntentOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedIntentType",
option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("interlace",option+1) == 0)
{
/* _image_info is directly used by coders (so why an image setting?)
SyncImageSettings() used to set per-image attribute.
*/
arg1 = ArgOption("undefined");
parse = ParseCommandOption(MagickInterlaceOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedInterlaceType",
option,arg1);
_image_info->interlace=(InterlaceType) parse;
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1, ArgOption(NULL));
_draw_info->interline_spacing=StringToDouble(ArgOption("0"),
(char **) NULL);
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
arg1 = ArgOption("undefined");
parse = ParseCommandOption(MagickInterpolateOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedInterpolateMethod",
option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1, ArgOption(NULL));
_draw_info->interword_spacing=StringToDouble(ArgOption("0"),(char **) NULL);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
_draw_info->kerning=StringToDouble(ArgOption("0"),(char **) NULL);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'l':
{
if (LocaleCompare("label",option+1) == 0)
{
/* only used for new images - not in SyncImageOptions() */
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
if (LocaleCompare("limit",option+1) == 0)
{
MagickSizeType
limit;
limit=MagickResourceInfinity;
parse= ParseCommandOption(MagickResourceOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedResourceType",
option,arg1);
if (LocaleCompare("unlimited",arg2) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(arg2,100.0);
(void) SetMagickResourceLimit((ResourceType)parse,limit);
break;
}
if (LocaleCompare("log",option+1) == 0)
{
if (IfSetOption) {
if ((strchr(arg1,'%') == (char *) NULL))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetLogFormat(arg1);
}
break;
}
if (LocaleCompare("lowlight-color",option+1) == 0)
{
/* FUTURE: this is only used by CompareImages() which is used
only by the "compare" CLI program at this time. */
(void) SetImageOption(_image_info,"compare:lowlight-color",
ArgOption(NULL));
break;
}
if (LocaleCompare("loop",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
arg1=ArgOption("0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'm':
{
if (LocaleCompare("mattecolor",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
(void) QueryColorCompliance(ArgOption(MogrifyAlphaColor),
AllCompliance,&_image_info->matte_color,_exception);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
/* FUTURE: this is only used by CompareImages() which is used
only by the "compare" CLI program at this time. */
parse=ParseCommandOption(MagickMetricOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedMetricType",
option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
if (LocaleCompare("moments",option+1) == 0)
{
(void) SetImageOption(_image_info,"identify:moments",
ArgBooleanString);
if (IfSetOption)
(void) SetImageArtifact(_image,"verbose","true");
break;
}
if (LocaleCompare("monitor",option+1) == 0)
{
(void) SetImageInfoProgressMonitor(_image_info, IfSetOption?
MonitorProgress: (MagickProgressMonitor) NULL, (void *) NULL);
break;
}
if (LocaleCompare("monochrome",option+1) == 0)
{
/* Setting (used by some input coders!) -- why?
Warning: This is also Special '-type' SimpleOperator
*/
_image_info->monochrome= ArgBoolean;
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'o':
{
if (LocaleCompare("orient",option+1) == 0)
{
/* Is not used when defining for new images.
This makes it more of a 'operation' than a setting
FUTURE: make set meta-data operator instead.
SyncImageSettings() used to set per-image attribute.
*/
parse=ParseCommandOption(MagickOrientationOptions,MagickFalse,
ArgOption("undefined"));
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedImageOrientation",
option,arg1);
_image_info->orientation=(OrientationType)parse;
(void) SetImageOption(_image_info,option+1, ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'p':
{
if (LocaleCompare("page",option+1) == 0)
{
/* Only used for new images and image generators.
SyncImageSettings() used to set per-image attribute. ?????
That last is WRONG!!!!
FUTURE: adjust named 'page' sizes according density
*/
char
*canonical_page,
page[MagickPathExtent];
const char
*image_option;
MagickStatusType
flags;
RectangleInfo
geometry;
if (!IfSetOption)
{
(void) DeleteImageOption(_image_info,option+1);
(void) CloneString(&_image_info->page,(char *) NULL);
break;
}
(void) memset(&geometry,0,sizeof(geometry));
image_option=GetImageOption(_image_info,"page");
if (image_option != (const char *) NULL)
flags=ParseAbsoluteGeometry(image_option,&geometry);
canonical_page=GetPageGeometry(arg1);
flags=ParseAbsoluteGeometry(canonical_page,&geometry);
canonical_page=DestroyString(canonical_page);
(void) FormatLocaleString(page,MagickPathExtent,"%lux%lu",
(unsigned long) geometry.width,(unsigned long) geometry.height);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
(void) FormatLocaleString(page,MagickPathExtent,"%lux%lu%+ld%+ld",
(unsigned long) geometry.width,(unsigned long) geometry.height,
(long) geometry.x,(long) geometry.y);
(void) SetImageOption(_image_info,option+1,page);
(void) CloneString(&_image_info->page,page);
break;
}
if (LocaleCompare("ping",option+1) == 0)
{
_image_info->ping = ArgBoolean;
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (IfSetOption) {
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
_image_info->pointsize =
_draw_info->pointsize =
StringToDouble(arg1,(char **) NULL);
}
else {
_image_info->pointsize=0.0; /* unset pointsize */
_draw_info->pointsize=12.0;
}
break;
}
if (LocaleCompare("precision",option+1) == 0)
{
arg1=ArgOption("-1");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetMagickPrecision(StringToInteger(arg1));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'q':
{
if (LocaleCompare("quality",option+1) == 0)
{
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
_image_info->quality= IfSetOption ? StringToUnsignedLong(arg1)
: UNDEFINED_COMPRESSION_QUALITY;
(void) SetImageOption(_image_info,option+1,ArgOption("0"));
break;
}
if (LocaleCompare("quantize",option+1) == 0)
{
/* Just a set direct in _quantize_info */
arg1=ArgOption("undefined");
parse=ParseCommandOption(MagickColorspaceOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedColorspace",
option,arg1);
_quantize_info->colorspace=(ColorspaceType)parse;
break;
}
if (LocaleCompare("quiet",option+1) == 0)
{
/* FUTURE: if two -quiet is performed you can not do +quiet!
This needs to be checked over thoughly.
*/
static WarningHandler
warning_handler = (WarningHandler) NULL;
WarningHandler
tmp = SetWarningHandler((WarningHandler) NULL);
if ( tmp != (WarningHandler) NULL)
warning_handler = tmp; /* remember the old handler */
if (!IfSetOption) /* set the old handler */
warning_handler=SetWarningHandler(warning_handler);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'r':
{
if (LocaleCompare("red-primary",option+1) == 0)
{
/* Image chromaticity X,Y NB: Y=X if Y not defined
Used by many coders
SyncImageSettings() used to set per-image attribute.
*/
arg1=ArgOption("0.0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("regard-warnings",option+1) == 0)
/* FUTURE: to be replaced by a 'fatal-level' type setting */
break;
if (LocaleCompare("render",option+1) == 0)
{
/* _draw_info only setting */
_draw_info->render= ArgBooleanNot;
break;
}
if (LocaleCompare("respect-parenthesis",option+1) == 0)
{
/* link image and setting stacks - option is itself saved on stack! */
(void) SetImageOption(_image_info,option+1,ArgBooleanString);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 's':
{
if (LocaleCompare("sampling-factor",option+1) == 0)
{
/* FUTURE: should be converted to jpeg:sampling_factor */
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) CloneString(&_image_info->sampling_factor,ArgOption(NULL));
break;
}
if (LocaleCompare("scene",option+1) == 0)
{
/* SyncImageSettings() used to set this as a per-image attribute.
What ??? Why ????
*/
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
_image_info->scene=StringToUnsignedLong(ArgOption("0"));
break;
}
if (LocaleCompare("seed",option+1) == 0)
{
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
SetRandomSecretKey(
IfSetOption ? (unsigned long) StringToUnsignedLong(arg1)
: (unsigned long) time((time_t *) NULL));
break;
}
if (LocaleCompare("size",option+1) == 0)
{
/* FUTURE: string in _image_info -- convert to Option ???
Look at the special handling for "size" in SetImageOption()
*/
(void) CloneString(&_image_info->size,ArgOption(NULL));
break;
}
if (LocaleCompare("stretch",option+1) == 0)
{
arg1=ArgOption("undefined");
parse = ParseCommandOption(MagickStretchOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedStretchType",
option,arg1);
_draw_info->stretch=(StretchType) parse;
break;
}
if (LocaleCompare("stroke",option+1) == 0)
{
/* set stroke color OR stroke-pattern
UPDATE: ensure stroke color is not destroyed is a pattern
is given. Just in case the color is also used for other purposes.
*/
MagickBooleanType
status;
ExceptionInfo
*sans;
PixelInfo
color;
arg1 = ArgOption("none"); /* +fill turns it off! */
(void) SetImageOption(_image_info,option+1,arg1);
if (_draw_info->stroke_pattern != (Image *) NULL)
_draw_info->stroke_pattern=DestroyImage(_draw_info->stroke_pattern);
/* is it a color or a image? -- ignore exceptions */
sans=AcquireExceptionInfo();
status=QueryColorCompliance(arg1,AllCompliance,&color,sans);
sans=DestroyExceptionInfo(sans);
if (status == MagickFalse)
_draw_info->stroke_pattern=GetImageCache(_image_info,arg1,_exception);
else
_draw_info->stroke=color;
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
_draw_info->stroke_width=StringToDouble(ArgOption("1.0"),
(char **) NULL);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
arg1=ArgOption("undefined");
parse = ParseCommandOption(MagickStyleOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedStyleType",
option,arg1);
_draw_info->style=(StyleType) parse;
break;
}
#if 0
if (LocaleCompare("subimage-search",option+1) == 0)
{
/* FUTURE: this is only used by CompareImages() which is used
only by the "compare" CLI program at this time. */
(void) SetImageOption(_image_info,option+1,ArgBooleanString);
break;
}
#endif
if (LocaleCompare("synchronize",option+1) == 0)
{
/* FUTURE: syncronize to storage - but what does that mean? */
_image_info->synchronize = ArgBoolean;
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 't':
{
if (LocaleCompare("taint",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
(void) SetImageOption(_image_info,option+1,ArgBooleanString);
break;
}
if (LocaleCompare("texture",option+1) == 0)
{
/* Note: arguments do not have percent escapes expanded */
/* FUTURE: move _image_info string to option splay-tree
Other than "montage" what uses "texture" ????
*/
(void) CloneString(&_image_info->texture,ArgOption(NULL));
break;
}
if (LocaleCompare("tile",option+1) == 0)
{
/* Note: arguments do not have percent escapes expanded */
_draw_info->fill_pattern=IfSetOption
?GetImageCache(_image_info,arg1,_exception)
:DestroyImage(_draw_info->fill_pattern);
break;
}
if (LocaleCompare("tile-offset",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. ??? */
arg1=ArgOption("0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("transparent-color",option+1) == 0)
{
/* FUTURE: both _image_info attribute & ImageOption in use!
_image_info only used for generating new images.
SyncImageSettings() used to set per-image attribute.
Note that +transparent-color, means fall-back to image
attribute so ImageOption is deleted, not set to a default.
*/
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
(void) QueryColorCompliance(ArgOption("none"),AllCompliance,
&_image_info->transparent_color,_exception);
break;
}
if (LocaleCompare("treedepth",option+1) == 0)
{
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
_quantize_info->tree_depth=StringToUnsignedLong(ArgOption("0"));
break;
}
if (LocaleCompare("type",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
parse=ParseCommandOption(MagickTypeOptions,MagickFalse,
ArgOption("undefined"));
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedImageType",
option,arg1);
_image_info->type=(ImageType) parse;
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
(void) QueryColorCompliance(ArgOption("none"),AllCompliance,
&_draw_info->undercolor,_exception);
break;
}
if (LocaleCompare("units",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute.
Should this effect _draw_info X and Y resolution?
FUTURE: this probably should be part of the density setting
*/
parse=ParseCommandOption(MagickResolutionOptions,MagickFalse,
ArgOption("undefined"));
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedUnitsType",
option,arg1);
_image_info->units=(ResolutionType) parse;
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
/* FUTURE: Remember all options become image artifacts
_image_info->verbose is only used by coders.
*/
(void) SetImageOption(_image_info,option+1,ArgBooleanString);
_image_info->verbose= ArgBoolean;
_image_info->ping=MagickFalse; /* verbose can't be a ping */
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute.
This is VERY deep in the image caching structure.
*/
parse=ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,
ArgOption("undefined"));
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedVirtualPixelMethod",
option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'w':
{
if (LocaleCompare("weight",option+1) == 0)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,arg1);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(arg1);
_draw_info->weight=(size_t) weight;
break;
}
if (LocaleCompare("white-point",option+1) == 0)
{
/* Used as a image chromaticity setting
SyncImageSettings() used to set per-image attribute.
*/
arg1=ArgOption("0.0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
default:
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
/* clean up percent escape interpreted strings */
if ((arg1 && arg1n) && (arg1 != arg1n ))
arg1=DestroyString((char *) arg1);
if ((arg2 && arg2n) && (arg2 != arg2n ))
arg2=DestroyString((char *) arg2);
#undef _image_info
#undef _exception
#undef _draw_info
#undef _quantize_info
#undef IfSetOption
#undef ArgBoolean
#undef ArgBooleanNot
#undef ArgBooleanString
#undef ArgOption
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C L I S i m p l e O p e r a t o r I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLISimpleOperatorImages() applys one simple image operation given to all
% the images in the CLI wand, using any per-image or global settings that was
% previously saved in the CLI wand.
%
% It is assumed that any such settings are up-to-date.
%
% The format of the WandSimpleOperatorImages method is:
%
% MagickBooleanType CLISimpleOperatorImages(MagickCLI *cli_wand,const char *option,
% const char *arg1, const char *arg2,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cli_wand: structure holding settings and images to be operated on
%
% o option: The option string for the operation
%
% o arg1, arg2: optional argument strings to the operation
%
*/
/*
CLISimpleOperatorImage() is an Internal subrountine to apply one simple
image operation to the current image pointed to by the CLI wand.
The image in the list may be modified in three different ways...
* directly modified (EG: -negate, -gamma, -level, -annotate, -draw),
* replaced by a new image (EG: -spread, -resize, -rotate, -morphology)
* one image replace by a list of images (-separate and -crop only!)
In each case the result replaces the single original image in the list, as
well as the pointer to the modified image (last image added if replaced by a
list of images) is returned.
As the image pointed to may be replaced, the first image in the list may
also change. GetFirstImageInList() should be used by caller if they wish
return the Image pointer to the first image in list.
*/
static MagickBooleanType CLISimpleOperatorImage(MagickCLI *cli_wand,
const char *option, const char *arg1n, const char *arg2n,
ExceptionInfo *exception)
{
Image *
new_image;
GeometryInfo
geometry_info;
RectangleInfo
geometry;
MagickStatusType
flags;
ssize_t
parse;
const char /* percent escaped versions of the args */
*arg1,
*arg2;
#define _image_info (cli_wand->wand.image_info)
#define _image (cli_wand->wand.images)
#define _exception (cli_wand->wand.exception)
#define _draw_info (cli_wand->draw_info)
#define _quantize_info (cli_wand->quantize_info)
#define _process_flags (cli_wand->process_flags)
#define _option_type ((CommandOptionFlags) cli_wand->command->flags)
#define IfNormalOp (*option=='-')
#define IfPlusOp (*option!='-')
#define IsNormalOp IfNormalOp ? MagickTrue : MagickFalse
#define IsPlusOp IfNormalOp ? MagickFalse : MagickTrue
assert(cli_wand != (MagickCLI *) NULL);
assert(cli_wand->signature == MagickWandSignature);
assert(cli_wand->wand.signature == MagickWandSignature);
assert(_image != (Image *) NULL); /* an image must be present */
if (cli_wand->wand.debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",cli_wand->wand.name);
arg1 = arg1n,
arg2 = arg2n;
/* Interpret Percent Escapes in Arguments - using first image */
if ( (((_process_flags & ProcessInterpretProperities) != 0 )
|| ((_option_type & AlwaysInterpretArgsFlag) != 0)
) && ((_option_type & NeverInterpretArgsFlag) == 0) ) {
/* Interpret Percent escapes in argument 1 */
if (arg1n != (char *) NULL) {
arg1=InterpretImageProperties(_image_info,_image,arg1n,_exception);
if (arg1 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg1=arg1n; /* use the given argument as is */
}
}
if (arg2n != (char *) NULL) {
arg2=InterpretImageProperties(_image_info,_image,arg2n,_exception);
if (arg2 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg2=arg2n; /* use the given argument as is */
}
}
}
#undef _process_flags
#undef _option_type
#if 0
(void) FormatLocaleFile(stderr,
"CLISimpleOperatorImage: \"%s\" \"%s\" \"%s\"\n",option,arg1,arg2);
#endif
new_image = (Image *) NULL; /* the replacement image, if not null at end */
SetGeometryInfo(&geometry_info);
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adaptive-blur",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=AdaptiveBlurImage(_image,geometry_info.rho,
geometry_info.sigma,_exception);
break;
}
if (LocaleCompare("adaptive-resize",option+1) == 0)
{
/* FUTURE: Roll into a resize special operator */
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseRegionGeometry(_image,arg1,&geometry,_exception);
new_image=AdaptiveResizeImage(_image,geometry.width,geometry.height,
_exception);
break;
}
if (LocaleCompare("adaptive-sharpen",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=AdaptiveSharpenImage(_image,geometry_info.rho,
geometry_info.sigma,_exception);
break;
}
if (LocaleCompare("alpha",option+1) == 0)
{
parse=ParseCommandOption(MagickAlphaChannelOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedAlphaChannelOption",
option,arg1);
(void) SetImageAlphaChannel(_image,(AlphaChannelOption) parse,
_exception);
break;
}
if (LocaleCompare("annotate",option+1) == 0)
{
char
geometry[MagickPathExtent];
SetGeometryInfo(&geometry_info);
flags=ParseGeometry(arg1,&geometry_info);
if (flags == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
(void) CloneString(&_draw_info->text,arg2);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
geometry_info.xi,geometry_info.psi);
(void) CloneString(&_draw_info->geometry,geometry);
_draw_info->affine.sx=cos(DegreesToRadians(
fmod(geometry_info.rho,360.0)));
_draw_info->affine.rx=sin(DegreesToRadians(
fmod(geometry_info.rho,360.0)));
_draw_info->affine.ry=(-sin(DegreesToRadians(
fmod(geometry_info.sigma,360.0))));
_draw_info->affine.sy=cos(DegreesToRadians(
fmod(geometry_info.sigma,360.0)));
(void) AnnotateImage(_image,_draw_info,_exception);
GetAffineMatrix(&_draw_info->affine);
break;
}
if (LocaleCompare("auto-gamma",option+1) == 0)
{
(void) AutoGammaImage(_image,_exception);
break;
}
if (LocaleCompare("auto-level",option+1) == 0)
{
(void) AutoLevelImage(_image,_exception);
break;
}
if (LocaleCompare("auto-orient",option+1) == 0)
{
new_image=AutoOrientImage(_image,_image->orientation,_exception);
break;
}
if (LocaleCompare("auto-threshold",option+1) == 0)
{
AutoThresholdMethod
method;
method=(AutoThresholdMethod) ParseCommandOption(
MagickAutoThresholdOptions,MagickFalse,arg1);
(void) AutoThresholdImage(_image,method,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'b':
{
if (LocaleCompare("black-threshold",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) BlackThresholdImage(_image,arg1,_exception);
break;
}
if (LocaleCompare("blue-shift",option+1) == 0)
{
geometry_info.rho=1.5;
if (IfNormalOp) {
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
}
new_image=BlueShiftImage(_image,geometry_info.rho,_exception);
break;
}
if (LocaleCompare("blur",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=BlurImage(_image,geometry_info.rho,geometry_info.sigma,
_exception);
break;
}
if (LocaleCompare("border",option+1) == 0)
{
CompositeOperator
compose;
const char*
value;
flags=ParsePageGeometry(_image,arg1,&geometry,_exception);
if ((flags & (WidthValue | HeightValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
compose=OverCompositeOp;
value=GetImageOption(_image_info,"compose");
if (value != (const char *) NULL)
compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,value);
new_image=BorderImage(_image,&geometry,compose,_exception);
break;
}
if (LocaleCompare("brightness-contrast",option+1) == 0)
{
double
brightness,
contrast;
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
brightness=geometry_info.rho;
contrast=0.0;
if ((flags & SigmaValue) != 0)
contrast=geometry_info.sigma;
(void) BrightnessContrastImage(_image,brightness,contrast,
_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'c':
{
if (LocaleCompare("canny",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=10;
if ((flags & PsiValue) == 0)
geometry_info.psi=30;
if ((flags & PercentValue) != 0)
{
geometry_info.xi/=100.0;
geometry_info.psi/=100.0;
}
new_image=CannyEdgeImage(_image,geometry_info.rho,geometry_info.sigma,
geometry_info.xi,geometry_info.psi,_exception);
break;
}
if (LocaleCompare("cdl",option+1) == 0)
{
char
*color_correction_collection; /* Note: arguments do not have percent escapes expanded */
/*
Color correct with a color decision list.
*/
color_correction_collection=FileToString(arg1,~0UL,_exception);
if (color_correction_collection == (char *) NULL)
break;
(void) ColorDecisionListImage(_image,color_correction_collection,
_exception);
break;
}
if (LocaleCompare("channel",option+1) == 0)
{
if (IfPlusOp)
{
(void) SetPixelChannelMask(_image,DefaultChannels);
break;
}
parse=ParseChannelOption(arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedChannelType",option,
arg1);
(void) SetPixelChannelMask(_image,(ChannelType) parse);
break;
}
if (LocaleCompare("charcoal",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=1.0;
new_image=CharcoalImage(_image,geometry_info.rho,geometry_info.sigma,
_exception);
break;
}
if (LocaleCompare("chop",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseGravityGeometry(_image,arg1,&geometry,_exception);
new_image=ChopImage(_image,&geometry,_exception);
break;
}
if (LocaleCompare("clahe",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParseGeometry(arg1,&geometry_info);
flags=ParseRegionGeometry(_image,arg1,&geometry,_exception);
(void) CLAHEImage(_image,geometry.width,geometry.height,
(size_t) geometry.x,geometry_info.psi,_exception);
break;
}
if (LocaleCompare("clamp",option+1) == 0)
{
(void) ClampImage(_image,_exception);
break;
}
if (LocaleCompare("clip",option+1) == 0)
{
if (IfNormalOp)
(void) ClipImage(_image,_exception);
else /* "+mask" remove the write mask */
(void) SetImageMask(_image,WritePixelMask,(Image *) NULL,
_exception);
break;
}
if (LocaleCompare("clip-mask",option+1) == 0)
{
Image
*clip_mask;
if (IfPlusOp) {
/* use "+clip-mask" Remove the write mask for -clip-path */
(void) SetImageMask(_image,WritePixelMask,(Image *) NULL,_exception);
break;
}
clip_mask=GetImageCache(_image_info,arg1,_exception);
if (clip_mask == (Image *) NULL)
break;
(void) SetImageMask(_image,WritePixelMask,clip_mask,_exception);
clip_mask=DestroyImage(clip_mask);
break;
}
if (LocaleCompare("clip-path",option+1) == 0)
{
(void) ClipImagePath(_image,arg1,IsNormalOp,_exception);
/* Note: Use "+clip-mask" remove the write mask added */
break;
}
if (LocaleCompare("colorize",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=ColorizeImage(_image,arg1,&_draw_info->fill,_exception);
break;
}
if (LocaleCompare("color-matrix",option+1) == 0)
{
KernelInfo
*kernel;
kernel=AcquireKernelInfo(arg1,exception);
if (kernel == (KernelInfo *) NULL)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=ColorMatrixImage(_image,kernel,_exception);
kernel=DestroyKernelInfo(kernel);
break;
}
if (LocaleCompare("colors",option+1) == 0)
{
/* Reduce the number of colors in the image.
FUTURE: also provide 'plus version with image 'color counts'
*/
_quantize_info->number_colors=StringToUnsignedLong(arg1);
if (_quantize_info->number_colors == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((_image->storage_class == DirectClass) ||
_image->colors > _quantize_info->number_colors)
(void) QuantizeImage(_quantize_info,_image,_exception);
else
(void) CompressImageColormap(_image,_exception);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
/* WARNING: this is both a image_info setting (already done)
and a operator to change image colorspace.
FUTURE: default colorspace should be sRGB!
Unless some type of 'linear colorspace' mode is set.
Note that +colorspace sets "undefined" or no effect on
new images, but forces images already in memory back to RGB!
That seems to be a little strange!
*/
(void) TransformImageColorspace(_image,
IfNormalOp ? _image_info->colorspace : sRGBColorspace,
_exception);
break;
}
if (LocaleCompare("connected-components",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=ConnectedComponentsImage(_image,(size_t)
StringToInteger(arg1),(CCObjectInfo **) NULL,_exception);
break;
}
if (LocaleCompare("contrast",option+1) == 0)
{
CLIWandWarnReplaced(IfNormalOp?"-level":"+level");
(void) ContrastImage(_image,IsNormalOp,_exception);
break;
}
if (LocaleCompare("contrast-stretch",option+1) == 0)
{
double
black_point,
white_point;
MagickStatusType
flags;
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
black_point=geometry_info.rho;
white_point=(flags & SigmaValue) != 0 ? geometry_info.sigma :
black_point;
if ((flags & PercentValue) != 0) {
black_point*=(double) _image->columns*_image->rows/100.0;
white_point*=(double) _image->columns*_image->rows/100.0;
}
white_point=(double) _image->columns*_image->rows-white_point;
(void) ContrastStretchImage(_image,black_point,white_point,
_exception);
break;
}
if (LocaleCompare("convolve",option+1) == 0)
{
double
gamma;
KernelInfo
*kernel_info;
register ssize_t
j;
kernel_info=AcquireKernelInfo(arg1,exception);
if (kernel_info == (KernelInfo *) NULL)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
gamma=0.0;
for (j=0; j < (ssize_t) (kernel_info->width*kernel_info->height); j++)
gamma+=kernel_info->values[j];
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
for (j=0; j < (ssize_t) (kernel_info->width*kernel_info->height); j++)
kernel_info->values[j]*=gamma;
new_image=MorphologyImage(_image,CorrelateMorphology,1,kernel_info,
_exception);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("crop",option+1) == 0)
{
/* WARNING: This can generate multiple images! */
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=CropImageToTiles(_image,arg1,_exception);
break;
}
if (LocaleCompare("cycle",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) CycleColormapImage(_image,(ssize_t) StringToLong(arg1),
_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'd':
{
if (LocaleCompare("decipher",option+1) == 0)
{
/* Note: arguments do not have percent escapes expanded */
StringInfo
*passkey;
passkey=FileToStringInfo(arg1,~0UL,_exception);
if (passkey == (StringInfo *) NULL)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) PasskeyDecipherImage(_image,passkey,_exception);
passkey=DestroyStringInfo(passkey);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
/* The _image_info->depth setting has already been set
We just need to apply it to all images in current sequence
WARNING: Depth from 8 to 16 causes 'quantum rounding to images!
That is it really is an operation, not a setting! Arrgghhh
FUTURE: this should not be an operator!!!
*/
(void) SetImageDepth(_image,_image_info->depth,_exception);
break;
}
if (LocaleCompare("deskew",option+1) == 0)
{
double
threshold;
if (IfNormalOp) {
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
threshold=StringToDoubleInterval(arg1,(double) QuantumRange+1.0);
}
else
threshold=40.0*QuantumRange/100.0;
new_image=DeskewImage(_image,threshold,_exception);
break;
}
if (LocaleCompare("despeckle",option+1) == 0)
{
new_image=DespeckleImage(_image,_exception);
break;
}
if (LocaleCompare("distort",option+1) == 0)
{
double
*args;
ssize_t
count;
parse = ParseCommandOption(MagickDistortOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedDistortMethod",
option,arg1);
if ((DistortMethod) parse == ResizeDistortion)
{
double
resize_args[2];
/* Special Case - Argument is actually a resize geometry!
** Convert that to an appropriate distortion argument array.
** FUTURE: make a separate special resize operator
Roll into a resize special operator */
if (IsGeometry(arg2) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidGeometry",
option,arg2);
(void) ParseRegionGeometry(_image,arg2,&geometry,_exception);
resize_args[0]=(double) geometry.width;
resize_args[1]=(double) geometry.height;
new_image=DistortImage(_image,(DistortMethod) parse,
(size_t)2,resize_args,MagickTrue,_exception);
break;
}
/* convert argument string into an array of doubles */
args = StringToArrayOfDoubles(arg2,&count,_exception);
if (args == (double *) NULL )
CLIWandExceptArgBreak(OptionError,"InvalidNumberList",option,arg2);
new_image=DistortImage(_image,(DistortMethod) parse,(size_t)
count,args,IsPlusOp,_exception);
args=(double *) RelinquishMagickMemory(args);
break;
}
if (LocaleCompare("draw",option+1) == 0)
{
(void) CloneString(&_draw_info->primitive,arg1);
(void) DrawImage(_image,_draw_info,_exception);
(void) CloneString(&_draw_info->primitive,(char *) NULL);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'e':
{
if (LocaleCompare("edge",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=EdgeImage(_image,geometry_info.rho,_exception);
break;
}
if (LocaleCompare("emboss",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=EmbossImage(_image,geometry_info.rho,
geometry_info.sigma,_exception);
break;
}
if (LocaleCompare("encipher",option+1) == 0)
{
/* Note: arguments do not have percent escapes expanded */
StringInfo
*passkey;
passkey=FileToStringInfo(arg1,~0UL,_exception);
if (passkey != (StringInfo *) NULL)
{
(void) PasskeyEncipherImage(_image,passkey,_exception);
passkey=DestroyStringInfo(passkey);
}
break;
}
if (LocaleCompare("enhance",option+1) == 0)
{
new_image=EnhanceImage(_image,_exception);
break;
}
if (LocaleCompare("equalize",option+1) == 0)
{
(void) EqualizeImage(_image,_exception);
break;
}
if (LocaleCompare("evaluate",option+1) == 0)
{
double
constant;
parse = ParseCommandOption(MagickEvaluateOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator",
option,arg1);
if (IsGeometry(arg2) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg2);
constant=StringToDoubleInterval(arg2,(double) QuantumRange+1.0);
(void) EvaluateImage(_image,(MagickEvaluateOperator)parse,constant,
_exception);
break;
}
if (LocaleCompare("extent",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParseGravityGeometry(_image,arg1,&geometry,_exception);
if (geometry.width == 0)
geometry.width=_image->columns;
if (geometry.height == 0)
geometry.height=_image->rows;
new_image=ExtentImage(_image,&geometry,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'f':
{
if (LocaleCompare("flip",option+1) == 0)
{
new_image=FlipImage(_image,_exception);
break;
}
if (LocaleCompare("flop",option+1) == 0)
{
new_image=FlopImage(_image,_exception);
break;
}
if (LocaleCompare("floodfill",option+1) == 0)
{
PixelInfo
target;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParsePageGeometry(_image,arg1,&geometry,_exception);
(void) QueryColorCompliance(arg2,AllCompliance,&target,_exception);
(void) FloodfillPaintImage(_image,_draw_info,&target,geometry.x,
geometry.y,IsPlusOp,_exception);
break;
}
if (LocaleCompare("frame",option+1) == 0)
{
FrameInfo
frame_info;
CompositeOperator
compose;
const char*
value;
value=GetImageOption(_image_info,"compose");
compose=OverCompositeOp; /* use Over not _image->compose */
if (value != (const char *) NULL)
compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,value);
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParsePageGeometry(_image,arg1,&geometry,_exception);
frame_info.width=geometry.width;
frame_info.height=geometry.height;
frame_info.outer_bevel=geometry.x;
frame_info.inner_bevel=geometry.y;
frame_info.x=(ssize_t) frame_info.width;
frame_info.y=(ssize_t) frame_info.height;
frame_info.width=_image->columns+2*frame_info.width;
frame_info.height=_image->rows+2*frame_info.height;
new_image=FrameImage(_image,&frame_info,compose,_exception);
break;
}
if (LocaleCompare("function",option+1) == 0)
{
double
*args;
ssize_t
count;
parse=ParseCommandOption(MagickFunctionOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedFunction",
option,arg1);
/* convert argument string into an array of doubles */
args = StringToArrayOfDoubles(arg2,&count,_exception);
if (args == (double *) NULL )
CLIWandExceptArgBreak(OptionError,"InvalidNumberList",option,arg2);
(void) FunctionImage(_image,(MagickFunction)parse,(size_t) count,args,
_exception);
args=(double *) RelinquishMagickMemory(args);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'g':
{
if (LocaleCompare("gamma",option+1) == 0)
{
double
constant;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
constant=StringToDouble(arg1,(char **) NULL);
#if 0
/* Using Gamma, via a cache */
if (IfPlusOp)
constant=PerceptibleReciprocal(constant);
(void) GammaImage(_image,constant,_exception);
#else
/* Using Evaluate POW, direct update of values - more accurite */
if (IfNormalOp)
constant=PerceptibleReciprocal(constant);
(void) EvaluateImage(_image,PowEvaluateOperator,constant,_exception);
_image->gamma*=StringToDouble(arg1,(char **) NULL);
#endif
/* Set gamma setting -- Old meaning of "+gamma"
* _image->gamma=StringToDouble(arg1,(char **) NULL);
*/
break;
}
if (LocaleCompare("gaussian-blur",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=GaussianBlurImage(_image,geometry_info.rho,
geometry_info.sigma,_exception);
break;
}
if (LocaleCompare("gaussian",option+1) == 0)
{
CLIWandWarnReplaced("-gaussian-blur");
(void) CLISimpleOperatorImage(cli_wand,"-gaussian-blur",arg1,NULL,exception);
}
if (LocaleCompare("geometry",option+1) == 0)
{
/*
Record Image offset for composition. (A Setting)
Resize last _image. (ListOperator) -- DEPRECIATE
FUTURE: Why if no 'offset' does this resize ALL images?
Also why is the setting recorded in the IMAGE non-sense!
*/
if (IfPlusOp)
{ /* remove the previous composition geometry offset! */
if (_image->geometry != (char *) NULL)
_image->geometry=DestroyString(_image->geometry);
break;
}
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParseRegionGeometry(_image,arg1,&geometry,_exception);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
(void) CloneString(&_image->geometry,arg1);
else
new_image=ResizeImage(_image,geometry.width,geometry.height,
_image->filter,_exception);
break;
}
if (LocaleCompare("grayscale",option+1) == 0)
{
parse=ParseCommandOption(MagickPixelIntensityOptions,
MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedIntensityMethod",
option,arg1);
(void) GrayscaleImage(_image,(PixelIntensityMethod) parse,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'h':
{
if (LocaleCompare("hough-lines",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
if ((flags & XiValue) == 0)
geometry_info.xi=40;
new_image=HoughLineImage(_image,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,(size_t) geometry_info.xi,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'i':
{
if (LocaleCompare("identify",option+1) == 0)
{
const char
*format,
*text;
format=GetImageOption(_image_info,"format");
if (format == (char *) NULL)
{
(void) IdentifyImage(_image,stdout,_image_info->verbose,
_exception);
break;
}
text=InterpretImageProperties(_image_info,_image,format,_exception);
if (text == (char *) NULL)
CLIWandExceptionBreak(OptionWarning,"InterpretPropertyFailure",
option);
(void) fputs(text,stdout);
text=DestroyString((char *)text);
break;
}
if (LocaleCompare("implode",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=ImplodeImage(_image,geometry_info.rho,_image->interpolate,
_exception);
break;
}
if (LocaleCompare("interpolative-resize",option+1) == 0)
{
/* FUTURE: New to IMv7
Roll into a resize special operator */
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseRegionGeometry(_image,arg1,&geometry,_exception);
new_image=InterpolativeResizeImage(_image,geometry.width,
geometry.height,_image->interpolate,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'k':
{
if (LocaleCompare("kuwahara",option+1) == 0)
{
/*
Edge preserving blur.
*/
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho-0.5;
new_image=KuwaharaImage(_image,geometry_info.rho,geometry_info.sigma,
_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'l':
{
if (LocaleCompare("lat",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
new_image=AdaptiveThresholdImage(_image,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,(double) geometry_info.xi,
_exception);
break;
}
if (LocaleCompare("level",option+1) == 0)
{
double
black_point,
gamma,
white_point;
MagickStatusType
flags;
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
black_point=geometry_info.rho;
white_point=(double) QuantumRange;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
gamma=1.0;
if ((flags & XiValue) != 0)
gamma=geometry_info.xi;
if ((flags & PercentValue) != 0)
{
black_point*=(double) (QuantumRange/100.0);
white_point*=(double) (QuantumRange/100.0);
}
if ((flags & SigmaValue) == 0)
white_point=(double) QuantumRange-black_point;
if (IfPlusOp || ((flags & AspectValue) != 0))
(void) LevelizeImage(_image,black_point,white_point,gamma,_exception);
else
(void) LevelImage(_image,black_point,white_point,gamma,_exception);
break;
}
if (LocaleCompare("level-colors",option+1) == 0)
{
char
token[MagickPathExtent];
const char
*p;
PixelInfo
black_point,
white_point;
p=(const char *) arg1;
GetNextToken(p,&p,MagickPathExtent,token); /* get black point color */
if ((isalpha((int) *token) != 0) || ((*token == '#') != 0))
(void) QueryColorCompliance(token,AllCompliance,
&black_point,_exception);
else
(void) QueryColorCompliance("#000000",AllCompliance,
&black_point,_exception);
if (isalpha((int) token[0]) || (token[0] == '#'))
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == '\0')
white_point=black_point; /* set everything to that color */
else
{
if ((isalpha((int) *token) == 0) && ((*token == '#') == 0))
GetNextToken(p,&p,MagickPathExtent,token); /* Get white point color. */
if ((isalpha((int) *token) != 0) || ((*token == '#') != 0))
(void) QueryColorCompliance(token,AllCompliance,
&white_point,_exception);
else
(void) QueryColorCompliance("#ffffff",AllCompliance,
&white_point,_exception);
}
(void) LevelImageColors(_image,&black_point,&white_point,
IsPlusOp,_exception);
break;
}
if (LocaleCompare("linear-stretch",option+1) == 0)
{
double
black_point,
white_point;
MagickStatusType
flags;
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
black_point=geometry_info.rho;
white_point=(double) _image->columns*_image->rows;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
black_point*=(double) _image->columns*_image->rows/100.0;
white_point*=(double) _image->columns*_image->rows/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) _image->columns*_image->rows-
black_point;
(void) LinearStretchImage(_image,black_point,white_point,_exception);
break;
}
if (LocaleCompare("liquid-rescale",option+1) == 0)
{
/* FUTURE: Roll into a resize special operator */
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParseRegionGeometry(_image,arg1,&geometry,_exception);
if ((flags & XValue) == 0)
geometry.x=1;
if ((flags & YValue) == 0)
geometry.y=0;
new_image=LiquidRescaleImage(_image,geometry.width,
geometry.height,1.0*geometry.x,1.0*geometry.y,_exception);
break;
}
if (LocaleCompare("local-contrast",option+1) == 0)
{
MagickStatusType
flags;
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
geometry_info.rho=10;
if ((flags & SigmaValue) == 0)
geometry_info.sigma=12.5;
new_image=LocalContrastImage(_image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'm':
{
if (LocaleCompare("magnify",option+1) == 0)
{
new_image=MagnifyImage(_image,_exception);
break;
}
if (LocaleCompare("map",option+1) == 0)
{
CLIWandWarnReplaced("-remap");
(void) CLISimpleOperatorImage(cli_wand,"-remap",NULL,NULL,exception);
break;
}
if (LocaleCompare("mask",option+1) == 0)
{
Image
*mask;
if (IfPlusOp)
{
/*
Remove a mask.
*/
(void) SetImageMask(_image,WritePixelMask,(Image *) NULL,
_exception);
break;
}
/*
Set the image mask.
*/
mask=GetImageCache(_image_info,arg1,_exception);
if (mask == (Image *) NULL)
break;
(void) SetImageMask(_image,WritePixelMask,mask,_exception);
mask=DestroyImage(mask);
break;
}
if (LocaleCompare("matte",option+1) == 0)
{
CLIWandWarnReplaced(IfNormalOp?"-alpha Set":"-alpha Off");
(void) SetImageAlphaChannel(_image,IfNormalOp ? SetAlphaChannel :
DeactivateAlphaChannel, _exception);
break;
}
if (LocaleCompare("mean-shift",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.10*QuantumRange;
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
new_image=MeanShiftImage(_image,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,geometry_info.xi,_exception);
break;
}
if (LocaleCompare("median",option+1) == 0)
{
CLIWandWarnReplaced("-statistic Median");
(void) CLISimpleOperatorImage(cli_wand,"-statistic","Median",arg1,exception);
break;
}
if (LocaleCompare("mode",option+1) == 0)
{
/* FUTURE: note this is also a special "montage" option */
CLIWandWarnReplaced("-statistic Mode");
(void) CLISimpleOperatorImage(cli_wand,"-statistic","Mode",arg1,exception);
break;
}
if (LocaleCompare("modulate",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ModulateImage(_image,arg1,_exception);
break;
}
if (LocaleCompare("monitor",option+1) == 0)
{
(void) SetImageProgressMonitor(_image, IfNormalOp ? MonitorProgress :
(MagickProgressMonitor) NULL,(void *) NULL);
break;
}
if (LocaleCompare("monochrome",option+1) == 0)
{
(void) SetImageType(_image,BilevelType,_exception);
break;
}
if (LocaleCompare("morphology",option+1) == 0)
{
char
token[MagickPathExtent];
const char
*p;
KernelInfo
*kernel;
ssize_t
iterations;
p=arg1;
GetNextToken(p,&p,MagickPathExtent,token);
parse=ParseCommandOption(MagickMorphologyOptions,MagickFalse,token);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedFunction",option,
arg1);
iterations=1L;
GetNextToken(p,&p,MagickPathExtent,token);
if ((*p == ':') || (*p == ','))
GetNextToken(p,&p,MagickPathExtent,token);
if ((*p != '\0'))
iterations=(ssize_t) StringToLong(p);
kernel=AcquireKernelInfo(arg2,exception);
if (kernel == (KernelInfo *) NULL)
CLIWandExceptArgBreak(OptionError,"UnabletoParseKernel",option,arg2);
new_image=MorphologyImage(_image,(MorphologyMethod)parse,iterations,
kernel,_exception);
kernel=DestroyKernelInfo(kernel);
break;
}
if (LocaleCompare("motion-blur",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=MotionBlurImage(_image,geometry_info.rho,geometry_info.sigma,
geometry_info.xi,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'n':
{
if (LocaleCompare("negate",option+1) == 0)
{
(void) NegateImage(_image, IsPlusOp, _exception);
break;
}
if (LocaleCompare("noise",option+1) == 0)
{
double
attenuate;
const char*
value;
if (IfNormalOp)
{
CLIWandWarnReplaced("-statistic NonPeak");
(void) CLISimpleOperatorImage(cli_wand,"-statistic","NonPeak",arg1,exception);
break;
}
parse=ParseCommandOption(MagickNoiseOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedNoiseType",
option,arg1);
attenuate=1.0;
value=GetImageOption(_image_info,"attenuate");
if (value != (const char *) NULL)
attenuate=StringToDouble(value,(char **) NULL);
new_image=AddNoiseImage(_image,(NoiseType)parse,attenuate,
_exception);
break;
}
if (LocaleCompare("normalize",option+1) == 0)
{
(void) NormalizeImage(_image,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'o':
{
if (LocaleCompare("opaque",option+1) == 0)
{
PixelInfo
target;
(void) QueryColorCompliance(arg1,AllCompliance,&target,_exception);
(void) OpaquePaintImage(_image,&target,&_draw_info->fill,IsPlusOp,
_exception);
break;
}
if (LocaleCompare("ordered-dither",option+1) == 0)
{
(void) OrderedDitherImage(_image,arg1,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'p':
{
if (LocaleCompare("paint",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=OilPaintImage(_image,geometry_info.rho,geometry_info.sigma,
_exception);
break;
}
if (LocaleCompare("perceptible",option+1) == 0)
{
(void) PerceptibleImage(_image,StringToDouble(arg1,(char **) NULL),
_exception);
break;
}
if (LocaleCompare("polaroid",option+1) == 0)
{
const char
*caption;
double
angle;
if (IfPlusOp) {
RandomInfo
*random_info;
random_info=AcquireRandomInfo();
angle=22.5*(GetPseudoRandomValue(random_info)-0.5);
random_info=DestroyRandomInfo(random_info);
}
else {
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
angle=geometry_info.rho;
}
caption=GetImageProperty(_image,"caption",_exception);
new_image=PolaroidImage(_image,_draw_info,caption,angle,
_image->interpolate,_exception);
break;
}
if (LocaleCompare("posterize",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) PosterizeImage(_image,(size_t) geometry_info.rho,
_quantize_info->dither_method,_exception);
break;
}
if (LocaleCompare("preview",option+1) == 0)
{
/* FUTURE: should be a 'Genesis' option?
Option however is also in WandSettingOptionInfo()
Why???
*/
parse=ParseCommandOption(MagickPreviewOptions, MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedPreviewType",
option,arg1);
new_image=PreviewImage(_image,(PreviewType)parse,_exception);
break;
}
if (LocaleCompare("profile",option+1) == 0)
{
const char
*name;
const StringInfo
*profile;
Image
*profile_image;
ImageInfo
*profile_info;
/* Note: arguments do not have percent escapes expanded */
if (IfPlusOp)
{ /* Remove a profile from the _image. */
(void) ProfileImage(_image,arg1,(const unsigned char *)
NULL,0,_exception);
break;
}
/* Associate a profile with the _image. */
profile_info=CloneImageInfo(_image_info);
profile=GetImageProfile(_image,"iptc");
if (profile != (StringInfo *) NULL)
profile_info->profile=(void *) CloneStringInfo(profile);
profile_image=GetImageCache(profile_info,arg1,_exception);
profile_info=DestroyImageInfo(profile_info);
if (profile_image == (Image *) NULL)
{
StringInfo
*profile;
profile_info=CloneImageInfo(_image_info);
(void) CopyMagickString(profile_info->filename,arg1,
MagickPathExtent);
profile=FileToStringInfo(profile_info->filename,~0UL,_exception);
if (profile != (StringInfo *) NULL)
{
(void) SetImageInfo(profile_info,0,_exception);
(void) ProfileImage(_image,profile_info->magick,
GetStringInfoDatum(profile),(size_t)
GetStringInfoLength(profile),_exception);
profile=DestroyStringInfo(profile);
}
profile_info=DestroyImageInfo(profile_info);
break;
}
ResetImageProfileIterator(profile_image);
name=GetNextImageProfile(profile_image);
while (name != (const char *) NULL)
{
profile=GetImageProfile(profile_image,name);
if (profile != (StringInfo *) NULL)
(void) ProfileImage(_image,name,GetStringInfoDatum(profile),
(size_t) GetStringInfoLength(profile),_exception);
name=GetNextImageProfile(profile_image);
}
profile_image=DestroyImage(profile_image);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'r':
{
if (LocaleCompare("raise",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParsePageGeometry(_image,arg1,&geometry,_exception);
(void) RaiseImage(_image,&geometry,IsNormalOp,_exception);
break;
}
if (LocaleCompare("random-threshold",option+1) == 0)
{
double
min_threshold,
max_threshold;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
min_threshold=0.0;
max_threshold=(double) QuantumRange;
flags=ParseGeometry(arg1,&geometry_info);
min_threshold=geometry_info.rho;
max_threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
max_threshold=min_threshold;
if (strchr(arg1,'%') != (char *) NULL)
{
max_threshold*=(double) (0.01*QuantumRange);
min_threshold*=(double) (0.01*QuantumRange);
}
(void) RandomThresholdImage(_image,min_threshold,max_threshold,
_exception);
break;
}
if (LocaleCompare("range-threshold",option+1) == 0)
{
/*
Range threshold image.
*/
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
if ((flags & XiValue) == 0)
geometry_info.xi=geometry_info.sigma;
if ((flags & PsiValue) == 0)
geometry_info.psi=geometry_info.xi;
if (strchr(arg1,'%') != (char *) NULL)
{
geometry_info.rho*=(double) (0.01*QuantumRange);
geometry_info.sigma*=(double) (0.01*QuantumRange);
geometry_info.xi*=(double) (0.01*QuantumRange);
geometry_info.psi*=(double) (0.01*QuantumRange);
}
(void) RangeThresholdImage(_image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception);
break;
}
if (LocaleCompare("read-mask",option+1) == 0)
{
/* Note: arguments do not have percent escapes expanded */
Image
*mask;
if (IfPlusOp)
{ /* Remove a mask. */
(void) SetImageMask(_image,ReadPixelMask,(Image *) NULL,
_exception);
break;
}
/* Set the image mask. */
mask=GetImageCache(_image_info,arg1,_exception);
if (mask == (Image *) NULL)
break;
(void) SetImageMask(_image,ReadPixelMask,mask,_exception);
mask=DestroyImage(mask);
break;
}
if (LocaleCompare("recolor",option+1) == 0)
{
CLIWandWarnReplaced("-color-matrix");
(void) CLISimpleOperatorImage(cli_wand,"-color-matrix",arg1,NULL,
exception);
}
if (LocaleCompare("region",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageRegionMask(_image,WritePixelMask,
(const RectangleInfo *) NULL,_exception);
break;
}
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseGravityGeometry(_image,arg1,&geometry,_exception);
(void) SetImageRegionMask(_image,WritePixelMask,&geometry,_exception);
break;
}
if (LocaleCompare("remap",option+1) == 0)
{
/* Note: arguments do not have percent escapes expanded */
Image
*remap_image;
remap_image=GetImageCache(_image_info,arg1,_exception);
if (remap_image == (Image *) NULL)
break;
(void) RemapImage(_quantize_info,_image,remap_image,_exception);
remap_image=DestroyImage(remap_image);
break;
}
if (LocaleCompare("repage",option+1) == 0)
{
if (IfNormalOp)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,
arg1);
(void) ResetImagePage(_image,arg1);
}
else
(void) ParseAbsoluteGeometry("0x0+0+0",&_image->page);
break;
}
if (LocaleCompare("resample",option+1) == 0)
{
/* FUTURE: Roll into a resize special operation */
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
new_image=ResampleImage(_image,geometry_info.rho,
geometry_info.sigma,_image->filter,_exception);
break;
}
if (LocaleCompare("resize",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseRegionGeometry(_image,arg1,&geometry,_exception);
new_image=ResizeImage(_image,geometry.width,geometry.height,
_image->filter,_exception);
break;
}
if (LocaleCompare("roll",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParsePageGeometry(_image,arg1,&geometry,_exception);
if ((flags & PercentValue) != 0)
{
geometry.x*=(double) _image->columns/100.0;
geometry.y*=(double) _image->rows/100.0;
}
new_image=RollImage(_image,geometry.x,geometry.y,_exception);
break;
}
if (LocaleCompare("rotate",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & GreaterValue) != 0 && (_image->columns <= _image->rows))
break;
if ((flags & LessValue) != 0 && (_image->columns >= _image->rows))
break;
new_image=RotateImage(_image,geometry_info.rho,_exception);
break;
}
if (LocaleCompare("rotational-blur",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=RotationalBlurImage(_image,geometry_info.rho,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 's':
{
if (LocaleCompare("sample",option+1) == 0)
{
/* FUTURE: Roll into a resize special operator */
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseRegionGeometry(_image,arg1,&geometry,_exception);
new_image=SampleImage(_image,geometry.width,geometry.height,
_exception);
break;
}
if (LocaleCompare("scale",option+1) == 0)
{
/* FUTURE: Roll into a resize special operator */
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseRegionGeometry(_image,arg1,&geometry,_exception);
new_image=ScaleImage(_image,geometry.width,geometry.height,
_exception);
break;
}
if (LocaleCompare("segment",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
(void) SegmentImage(_image,_image->colorspace,
_image_info->verbose,geometry_info.rho,geometry_info.sigma,
_exception);
break;
}
if (LocaleCompare("selective-blur",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
new_image=SelectiveBlurImage(_image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,_exception);
break;
}
if (LocaleCompare("separate",option+1) == 0)
{
/* WARNING: This can generate multiple images! */
/* FUTURE - this may be replaced by a "-channel" method */
new_image=SeparateImages(_image,_exception);
break;
}
if (LocaleCompare("sepia-tone",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=SepiaToneImage(_image,StringToDoubleInterval(arg1,
(double) QuantumRange+1.0),_exception);
break;
}
if (LocaleCompare("shade",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if (((flags & RhoValue) == 0) || ((flags & SigmaValue) == 0))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=ShadeImage(_image,IsNormalOp,geometry_info.rho,
geometry_info.sigma,_exception);
break;
}
if (LocaleCompare("shadow",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=4.0;
if ((flags & PsiValue) == 0)
geometry_info.psi=4.0;
new_image=ShadowImage(_image,geometry_info.rho,geometry_info.sigma,
(ssize_t) ceil(geometry_info.xi-0.5),(ssize_t)
ceil(geometry_info.psi-0.5),_exception);
break;
}
if (LocaleCompare("sharpen",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.0;
new_image=SharpenImage(_image,geometry_info.rho,geometry_info.sigma,
_exception);
break;
}
if (LocaleCompare("shave",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParsePageGeometry(_image,arg1,&geometry,_exception);
new_image=ShaveImage(_image,&geometry,_exception);
break;
}
if (LocaleCompare("shear",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
new_image=ShearImage(_image,geometry_info.rho,geometry_info.sigma,
_exception);
break;
}
if (LocaleCompare("sigmoidal-contrast",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=(double) QuantumRange/2.0;
if ((flags & PercentValue) != 0)
geometry_info.sigma=(double) QuantumRange*geometry_info.sigma/
100.0;
(void) SigmoidalContrastImage(_image,IsNormalOp,geometry_info.rho,
geometry_info.sigma,_exception);
break;
}
if (LocaleCompare("sketch",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=SketchImage(_image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,_exception);
break;
}
if (LocaleCompare("solarize",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SolarizeImage(_image,StringToDoubleInterval(arg1,(double)
QuantumRange+1.0),_exception);
break;
}
if (LocaleCompare("sparse-color",option+1) == 0)
{
parse= ParseCommandOption(MagickSparseColorOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedSparseColorMethod",
option,arg1);
new_image=SparseColorOption(_image,(SparseColorMethod)parse,arg2,
_exception);
break;
}
if (LocaleCompare("splice",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParseGravityGeometry(_image,arg1,&geometry,_exception);
new_image=SpliceImage(_image,&geometry,_exception);
break;
}
if (LocaleCompare("spread",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg2);
new_image=SpreadImage(_image,_image->interpolate,geometry_info.rho,
_exception);
break;
}
if (LocaleCompare("statistic",option+1) == 0)
{
parse=ParseCommandOption(MagickStatisticOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedStatisticType",
option,arg1);
flags=ParseGeometry(arg2,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg2);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
new_image=StatisticImage(_image,(StatisticType)parse,
(size_t) geometry_info.rho,(size_t) geometry_info.sigma,
_exception);
break;
}
if (LocaleCompare("strip",option+1) == 0)
{
(void) StripImage(_image,_exception);
break;
}
if (LocaleCompare("swirl",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=SwirlImage(_image,geometry_info.rho,
_image->interpolate,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 't':
{
if (LocaleCompare("threshold",option+1) == 0)
{
double
threshold;
threshold=(double) QuantumRange/2;
if (IfNormalOp) {
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
threshold=StringToDoubleInterval(arg1,(double) QuantumRange+1.0);
}
(void) BilevelImage(_image,threshold,_exception);
break;
}
if (LocaleCompare("thumbnail",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseRegionGeometry(_image,arg1,&geometry,_exception);
new_image=ThumbnailImage(_image,geometry.width,geometry.height,
_exception);
break;
}
if (LocaleCompare("tint",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=TintImage(_image,arg1,&_draw_info->fill,_exception);
break;
}
if (LocaleCompare("transform",option+1) == 0)
{
CLIWandWarnReplaced("+distort AffineProjection");
new_image=AffineTransformImage(_image,&_draw_info->affine,_exception);
break;
}
if (LocaleCompare("transparent",option+1) == 0)
{
PixelInfo
target;
(void) QueryColorCompliance(arg1,AllCompliance,&target,_exception);
(void) TransparentPaintImage(_image,&target,(Quantum)
TransparentAlpha,IsPlusOp,_exception);
break;
}
if (LocaleCompare("transpose",option+1) == 0)
{
new_image=TransposeImage(_image,_exception);
break;
}
if (LocaleCompare("transverse",option+1) == 0)
{
new_image=TransverseImage(_image,_exception);
break;
}
if (LocaleCompare("trim",option+1) == 0)
{
new_image=TrimImage(_image,_exception);
break;
}
if (LocaleCompare("type",option+1) == 0)
{
/* Note that "type" setting should have already been defined */
(void) SetImageType(_image,_image_info->type,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'u':
{
if (LocaleCompare("unique",option+1) == 0)
{
/* FUTURE: move to SyncImageSettings() and AcqireImage()???
Option is not documented, bt appears to be for "identify".
We may need a identify specific verbose!
*/
if (IsPlusOp) {
(void) DeleteImageArtifact(_image,"identify:unique-colors");
break;
}
(void) SetImageArtifact(_image,"identify:unique-colors","true");
(void) SetImageArtifact(_image,"verbose","true");
break;
}
if (LocaleCompare("unique-colors",option+1) == 0)
{
new_image=UniqueImageColors(_image,_exception);
break;
}
if (LocaleCompare("unsharp",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=1.0;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.05;
new_image=UnsharpMaskImage(_image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,geometry_info.psi,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
/* FUTURE: move to SyncImageSettings() and AcquireImage()???
three places! ImageArtifact ImageOption _image_info->verbose
Some how new images also get this artifact!
*/
(void) SetImageArtifact(_image,option+1,
IfNormalOp ? "true" : "false" );
break;
}
if (LocaleCompare("vignette",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.1*_image->columns;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.1*_image->rows;
if ((flags & PercentValue) != 0)
{
geometry_info.xi*=(double) _image->columns/100.0;
geometry_info.psi*=(double) _image->rows/100.0;
}
new_image=VignetteImage(_image,geometry_info.rho,geometry_info.sigma,
(ssize_t) ceil(geometry_info.xi-0.5),(ssize_t)
ceil(geometry_info.psi-0.5),_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'w':
{
if (LocaleCompare("wave",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=WaveImage(_image,geometry_info.rho,geometry_info.sigma,
_image->interpolate,_exception);
break;
}
if (LocaleCompare("wavelet-denoise",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & PercentValue) != 0)
{
geometry_info.rho=QuantumRange*geometry_info.rho/100.0;
geometry_info.sigma=QuantumRange*geometry_info.sigma/100.0;
}
if ((flags & SigmaValue) == 0)
geometry_info.sigma=0.0;
new_image=WaveletDenoiseImage(_image,geometry_info.rho,
geometry_info.sigma,_exception);
break;
}
if (LocaleCompare("white-threshold",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) WhiteThresholdImage(_image,arg1,_exception);
break;
}
if (LocaleCompare("write-mask",option+1) == 0)
{
/* Note: arguments do not have percent escapes expanded */
Image
*mask;
if (IfPlusOp)
{ /* Remove a mask. */
(void) SetImageMask(_image,WritePixelMask,(Image *) NULL,
_exception);
break;
}
/* Set the image mask. */
mask=GetImageCache(_image_info,arg1,_exception);
if (mask == (Image *) NULL)
break;
(void) SetImageMask(_image,WritePixelMask,mask,_exception);
mask=DestroyImage(mask);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
default:
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
/* clean up percent escape interpreted strings */
if (arg1 != arg1n )
arg1=DestroyString((char *)arg1);
if (arg2 != arg2n )
arg2=DestroyString((char *)arg2);
/* Replace current image with any image that was generated
and set image point to last image (so image->next is correct) */
if (new_image != (Image *) NULL)
ReplaceImageInListReturnLast(&_image,new_image);
return(MagickTrue);
#undef _image_info
#undef _draw_info
#undef _quantize_info
#undef _image
#undef _exception
#undef IfNormalOp
#undef IfPlusOp
#undef IsNormalOp
#undef IsPlusOp
}
WandPrivate MagickBooleanType CLISimpleOperatorImages(MagickCLI *cli_wand,
const char *option,const char *arg1,const char *arg2,ExceptionInfo *exception)
{
#if !USE_WAND_METHODS
size_t
n,
i;
#endif
assert(cli_wand != (MagickCLI *) NULL);
assert(cli_wand->signature == MagickWandSignature);
assert(cli_wand->wand.signature == MagickWandSignature);
assert(cli_wand->wand.images != (Image *) NULL); /* images must be present */
if (cli_wand->wand.debug != MagickFalse)
(void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(),
"- Simple Operator: %s \"%s\" \"%s\"", option,arg1,arg2);
#if !USE_WAND_METHODS
/* FUTURE add appropriate tracing */
i=0;
n=GetImageListLength(cli_wand->wand.images);
cli_wand->wand.images=GetFirstImageInList(cli_wand->wand.images);
while (1) {
i++;
CLISimpleOperatorImage(cli_wand, option, arg1, arg2,exception);
if ( cli_wand->wand.images->next == (Image *) NULL )
break;
cli_wand->wand.images=cli_wand->wand.images->next;
}
assert( i == n );
cli_wand->wand.images=GetFirstImageInList(cli_wand->wand.images);
#else
MagickResetIterator(&cli_wand->wand);
while (MagickNextImage(&cli_wand->wand) != MagickFalse)
(void) CLISimpleOperatorImage(cli_wand, option, arg1, arg2,exception);
MagickResetIterator(&cli_wand->wand);
#endif
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C L I L i s t O p e r a t o r I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLIListOperatorImages() applies a single operation that is apply to the
% entire image list as a whole. The result is often a complete replacment
% of the image list with a completely new list, or with just a single image
% result.
%
% The format of the MogrifyImage method is:
%
% MagickBooleanType CLIListOperatorImages(MagickCLI *cli_wand,
% const char *option,const char *arg1,const char *arg2)
%
% A description of each parameter follows:
%
% o cli_wand: structure holding settings to be applied
%
% o option: The option string for the operation
%
% o arg1, arg2: optional argument strings to the operation
% arg2 is currently not used
%
*/
WandPrivate MagickBooleanType CLIListOperatorImages(MagickCLI *cli_wand,
const char *option,const char *arg1n,const char *arg2n)
{
const char /* percent escaped versions of the args */
*arg1,
*arg2;
Image
*new_images;
MagickStatusType
status;
ssize_t
parse;
#define _image_info (cli_wand->wand.image_info)
#define _images (cli_wand->wand.images)
#define _exception (cli_wand->wand.exception)
#define _draw_info (cli_wand->draw_info)
#define _quantize_info (cli_wand->quantize_info)
#define _process_flags (cli_wand->process_flags)
#define _option_type ((CommandOptionFlags) cli_wand->command->flags)
#define IfNormalOp (*option=='-')
#define IfPlusOp (*option!='-')
#define IsNormalOp IfNormalOp ? MagickTrue : MagickFalse
assert(cli_wand != (MagickCLI *) NULL);
assert(cli_wand->signature == MagickWandSignature);
assert(cli_wand->wand.signature == MagickWandSignature);
assert(_images != (Image *) NULL); /* _images must be present */
if (cli_wand->wand.debug != MagickFalse)
(void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(),
"- List Operator: %s \"%s\" \"%s\"", option,
arg1n == (const char *) NULL ? "null" : arg1n,
arg2n == (const char *) NULL ? "null" : arg2n);
arg1 = arg1n;
arg2 = arg2n;
/* Interpret Percent Escapes in Arguments - using first image */
if ( (((_process_flags & ProcessInterpretProperities) != 0 )
|| ((_option_type & AlwaysInterpretArgsFlag) != 0)
) && ((_option_type & NeverInterpretArgsFlag) == 0) ) {
/* Interpret Percent escapes in argument 1 */
if (arg1n != (char *) NULL) {
arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception);
if (arg1 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg1=arg1n; /* use the given argument as is */
}
}
if (arg2n != (char *) NULL) {
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg2=arg2n; /* use the given argument as is */
}
}
}
#undef _process_flags
#undef _option_type
status=MagickTrue;
new_images=NewImageList();
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("append",option+1) == 0)
{
new_images=AppendImages(_images,IsNormalOp,_exception);
break;
}
if (LocaleCompare("average",option+1) == 0)
{
CLIWandWarnReplaced("-evaluate-sequence Mean");
(void) CLIListOperatorImages(cli_wand,"-evaluate-sequence","Mean",
NULL);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'c':
{
if (LocaleCompare("channel-fx",option+1) == 0)
{
new_images=ChannelFxImage(_images,arg1,_exception);
break;
}
if (LocaleCompare("clut",option+1) == 0)
{
Image
*clut_image;
/* FUTURE - make this a compose option, and thus can be used
with layers compose or even compose last image over all other
_images.
*/
new_images=RemoveFirstImageFromList(&_images);
clut_image=RemoveLastImageFromList(&_images);
/* FUTURE - produce Exception, rather than silent fail */
if (clut_image == (Image *) NULL)
break;
(void) ClutImage(new_images,clut_image,new_images->interpolate,
_exception);
clut_image=DestroyImage(clut_image);
break;
}
if (LocaleCompare("coalesce",option+1) == 0)
{
new_images=CoalesceImages(_images,_exception);
break;
}
if (LocaleCompare("combine",option+1) == 0)
{
parse=(ssize_t) _images->colorspace;
if (_images->number_channels < GetImageListLength(_images))
parse=sRGBColorspace;
if ( IfPlusOp )
parse=ParseCommandOption(MagickColorspaceOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedColorspace",option,
arg1);
new_images=CombineImages(_images,(ColorspaceType) parse,_exception);
break;
}
if (LocaleCompare("compare",option+1) == 0)
{
double
distortion;
Image
*image,
*reconstruct_image;
MetricType
metric;
/*
Mathematically and visually annotate the difference between an
image and its reconstruction.
*/
image=RemoveFirstImageFromList(&_images);
reconstruct_image=RemoveFirstImageFromList(&_images);
/* FUTURE - produce Exception, rather than silent fail */
if (reconstruct_image == (Image *) NULL)
{
image=DestroyImage(image);
break;
}
metric=UndefinedErrorMetric;
option=GetImageOption(_image_info,"metric");
if (option != (const char *) NULL)
metric=(MetricType) ParseCommandOption(MagickMetricOptions,
MagickFalse,option);
new_images=CompareImages(image,reconstruct_image,metric,&distortion,
_exception);
(void) distortion;
reconstruct_image=DestroyImage(reconstruct_image);
image=DestroyImage(image);
break;
}
if (LocaleCompare("complex",option+1) == 0)
{
parse=ParseCommandOption(MagickComplexOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator",
option,arg1);
new_images=ComplexImages(_images,(ComplexOperator) parse,_exception);
break;
}
if (LocaleCompare("composite",option+1) == 0)
{
CompositeOperator
compose;
const char*
value;
MagickBooleanType
clip_to_self;
Image
*mask_image,
*source_image;
RectangleInfo
geometry;
/* Compose value from "-compose" option only */
value=GetImageOption(_image_info,"compose");
if (value == (const char *) NULL)
compose=OverCompositeOp; /* use Over not source_image->compose */
else
compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,value);
/* Get "clip-to-self" expert setting (false is normal) */
clip_to_self=GetCompositeClipToSelf(compose);
value=GetImageOption(_image_info,"compose:clip-to-self");
if (value != (const char *) NULL)
clip_to_self=IsStringTrue(value);
value=GetImageOption(_image_info,"compose:outside-overlay");
if (value != (const char *) NULL)
clip_to_self=IsStringFalse(value); /* deprecated */
new_images=RemoveFirstImageFromList(&_images);
source_image=RemoveFirstImageFromList(&_images);
if (source_image == (Image *) NULL)
break; /* FUTURE - produce Exception, rather than silent fail */
/* FUTURE - this should not be here! - should be part of -geometry */
if (source_image->geometry != (char *) NULL)
{
RectangleInfo
resize_geometry;
(void) ParseRegionGeometry(source_image,source_image->geometry,
&resize_geometry,_exception);
if ((source_image->columns != resize_geometry.width) ||
(source_image->rows != resize_geometry.height))
{
Image
*resize_image;
resize_image=ResizeImage(source_image,resize_geometry.width,
resize_geometry.height,source_image->filter,_exception);
if (resize_image != (Image *) NULL)
{
source_image=DestroyImage(source_image);
source_image=resize_image;
}
}
}
SetGeometry(source_image,&geometry);
(void) ParseAbsoluteGeometry(source_image->geometry,&geometry);
GravityAdjustGeometry(new_images->columns,new_images->rows,
new_images->gravity, &geometry);
mask_image=RemoveFirstImageFromList(&_images);
if (mask_image == (Image *) NULL)
status&=CompositeImage(new_images,source_image,compose,clip_to_self,
geometry.x,geometry.y,_exception);
else
{
if ((compose == DisplaceCompositeOp) ||
(compose == DistortCompositeOp))
{
status&=CompositeImage(source_image,mask_image,
CopyGreenCompositeOp,MagickTrue,0,0,_exception);
status&=CompositeImage(new_images,source_image,compose,
clip_to_self,geometry.x,geometry.y,_exception);
}
else
{
Image
*clone_image;
clone_image=CloneImage(new_images,0,0,MagickTrue,_exception);
if (clone_image == (Image *) NULL)
break;
status&=CompositeImage(new_images,source_image,compose,
clip_to_self,geometry.x,geometry.y,_exception);
status&=CompositeImage(new_images,mask_image,
CopyAlphaCompositeOp,MagickTrue,0,0,_exception);
status&=CompositeImage(clone_image,new_images,OverCompositeOp,
clip_to_self,0,0,_exception);
new_images=DestroyImageList(new_images);
new_images=clone_image;
}
mask_image=DestroyImage(mask_image);
}
source_image=DestroyImage(source_image);
break;
}
if (LocaleCompare("copy",option+1) == 0)
{
Image
*source_image;
OffsetInfo
offset;
RectangleInfo
geometry;
/*
Copy image pixels.
*/
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if (IsGeometry(arg2) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParsePageGeometry(_images,arg2,&geometry,_exception);
offset.x=geometry.x;
offset.y=geometry.y;
source_image=_images;
if (source_image->next != (Image *) NULL)
source_image=source_image->next;
(void) ParsePageGeometry(source_image,arg1,&geometry,_exception);
(void) CopyImagePixels(_images,source_image,&geometry,&offset,
_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'd':
{
if (LocaleCompare("deconstruct",option+1) == 0)
{
CLIWandWarnReplaced("-layer CompareAny");
(void) CLIListOperatorImages(cli_wand,"-layer","CompareAny",NULL);
break;
}
if (LocaleCompare("delete",option+1) == 0)
{
if (IfNormalOp)
DeleteImages(&_images,arg1,_exception);
else
DeleteImages(&_images,"-1",_exception);
break;
}
if (LocaleCompare("duplicate",option+1) == 0)
{
if (IfNormalOp)
{
const char
*p;
size_t
number_duplicates;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,
arg1);
number_duplicates=(size_t) StringToLong(arg1);
p=strchr(arg1,',');
if (p == (const char *) NULL)
new_images=DuplicateImages(_images,number_duplicates,"-1",
_exception);
else
new_images=DuplicateImages(_images,number_duplicates,p,
_exception);
}
else
new_images=DuplicateImages(_images,1,"-1",_exception);
AppendImageToList(&_images, new_images);
new_images=(Image *) NULL;
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'e':
{
if (LocaleCompare("evaluate-sequence",option+1) == 0)
{
parse=ParseCommandOption(MagickEvaluateOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator",
option,arg1);
new_images=EvaluateImages(_images,(MagickEvaluateOperator) parse,
_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'f':
{
if (LocaleCompare("fft",option+1) == 0)
{
new_images=ForwardFourierTransformImage(_images,IsNormalOp,
_exception);
break;
}
if (LocaleCompare("flatten",option+1) == 0)
{
/* REDIRECTED to use -layers flatten instead */
(void) CLIListOperatorImages(cli_wand,"-layers",option+1,NULL);
break;
}
if (LocaleCompare("fx",option+1) == 0)
{
new_images=FxImage(_images,arg1,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'h':
{
if (LocaleCompare("hald-clut",option+1) == 0)
{
/* FUTURE - make this a compose option (and thus layers compose )
or perhaps compose last image over all other _images.
*/
Image
*hald_image;
new_images=RemoveFirstImageFromList(&_images);
hald_image=RemoveLastImageFromList(&_images);
if (hald_image == (Image *) NULL)
break;
(void) HaldClutImage(new_images,hald_image,_exception);
hald_image=DestroyImage(hald_image);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'i':
{
if (LocaleCompare("ift",option+1) == 0)
{
Image
*magnitude_image,
*phase_image;
magnitude_image=RemoveFirstImageFromList(&_images);
phase_image=RemoveFirstImageFromList(&_images);
/* FUTURE - produce Exception, rather than silent fail */
if (phase_image == (Image *) NULL)
break;
new_images=InverseFourierTransformImage(magnitude_image,phase_image,
IsNormalOp,_exception);
magnitude_image=DestroyImage(magnitude_image);
phase_image=DestroyImage(phase_image);
break;
}
if (LocaleCompare("insert",option+1) == 0)
{
Image
*insert_image,
*index_image;
ssize_t
index;
if (IfNormalOp && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
index=0;
insert_image=RemoveLastImageFromList(&_images);
if (IfNormalOp)
index=(ssize_t) StringToLong(arg1);
index_image=insert_image;
if (index == 0)
PrependImageToList(&_images,insert_image);
else if (index == (ssize_t) GetImageListLength(_images))
AppendImageToList(&_images,insert_image);
else
{
index_image=GetImageFromList(_images,index-1);
if (index_image == (Image *) NULL)
{
insert_image=DestroyImage(insert_image);
CLIWandExceptArgBreak(OptionError,"NoSuchImage",option,arg1);
}
InsertImageInList(&index_image,insert_image);
}
_images=GetFirstImageInList(index_image);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'l':
{
if (LocaleCompare("layers",option+1) == 0)
{
parse=ParseCommandOption(MagickLayerOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedLayerMethod",
option,arg1);
switch ((LayerMethod) parse)
{
case CoalesceLayer:
{
new_images=CoalesceImages(_images,_exception);
break;
}
case CompareAnyLayer:
case CompareClearLayer:
case CompareOverlayLayer:
default:
{
new_images=CompareImagesLayers(_images,(LayerMethod) parse,
_exception);
break;
}
case MergeLayer:
case FlattenLayer:
case MosaicLayer:
case TrimBoundsLayer:
{
new_images=MergeImageLayers(_images,(LayerMethod) parse,
_exception);
break;
}
case DisposeLayer:
{
new_images=DisposeImages(_images,_exception);
break;
}
case OptimizeImageLayer:
{
new_images=OptimizeImageLayers(_images,_exception);
break;
}
case OptimizePlusLayer:
{
new_images=OptimizePlusImageLayers(_images,_exception);
break;
}
case OptimizeTransLayer:
{
OptimizeImageTransparency(_images,_exception);
break;
}
case RemoveDupsLayer:
{
RemoveDuplicateLayers(&_images,_exception);
break;
}
case RemoveZeroLayer:
{
RemoveZeroDelayLayers(&_images,_exception);
break;
}
case OptimizeLayer:
{ /* General Purpose, GIF Animation Optimizer. */
new_images=CoalesceImages(_images,_exception);
if (new_images == (Image *) NULL)
break;
_images=DestroyImageList(_images);
_images=OptimizeImageLayers(new_images,_exception);
if (_images == (Image *) NULL)
break;
new_images=DestroyImageList(new_images);
OptimizeImageTransparency(_images,_exception);
(void) RemapImages(_quantize_info,_images,(Image *) NULL,
_exception);
break;
}
case CompositeLayer:
{
Image
*source;
RectangleInfo
geometry;
CompositeOperator
compose;
const char*
value;
value=GetImageOption(_image_info,"compose");
compose=OverCompositeOp; /* Default to Over */
if (value != (const char *) NULL)
compose=(CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,value);
/* Split image sequence at the first 'NULL:' image. */
source=_images;
while (source != (Image *) NULL)
{
source=GetNextImageInList(source);
if ((source != (Image *) NULL) &&
(LocaleCompare(source->magick,"NULL") == 0))
break;
}
if (source != (Image *) NULL)
{
if ((GetPreviousImageInList(source) == (Image *) NULL) ||
(GetNextImageInList(source) == (Image *) NULL))
source=(Image *) NULL;
else
{ /* Separate the two lists, junk the null: image. */
source=SplitImageList(source->previous);
DeleteImageFromList(&source);
}
}
if (source == (Image *) NULL)
{
(void) ThrowMagickException(_exception,GetMagickModule(),
OptionError,"MissingNullSeparator","layers Composite");
break;
}
/* Adjust offset with gravity and virtual canvas. */
SetGeometry(_images,&geometry);
(void) ParseAbsoluteGeometry(_images->geometry,&geometry);
geometry.width=source->page.width != 0 ?
source->page.width : source->columns;
geometry.height=source->page.height != 0 ?
source->page.height : source->rows;
GravityAdjustGeometry(_images->page.width != 0 ?
_images->page.width : _images->columns,
_images->page.height != 0 ? _images->page.height :
_images->rows,_images->gravity,&geometry);
/* Compose the two image sequences together */
CompositeLayers(_images,compose,source,geometry.x,geometry.y,
_exception);
source=DestroyImageList(source);
break;
}
}
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'm':
{
if (LocaleCompare("map",option+1) == 0)
{
CLIWandWarnReplaced("+remap");
(void) RemapImages(_quantize_info,_images,(Image *) NULL,_exception);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("morph",option+1) == 0)
{
Image
*morph_image;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
morph_image=MorphImages(_images,StringToUnsignedLong(arg1),
_exception);
if (morph_image == (Image *) NULL)
break;
_images=DestroyImageList(_images);
_images=morph_image;
break;
}
if (LocaleCompare("mosaic",option+1) == 0)
{
/* REDIRECTED to use -layers mosaic instead */
(void) CLIListOperatorImages(cli_wand,"-layers",option+1,NULL);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'p':
{
if (LocaleCompare("poly",option+1) == 0)
{
double
*args;
ssize_t
count;
/* convert argument string into an array of doubles */
args = StringToArrayOfDoubles(arg1,&count,_exception);
if (args == (double *) NULL )
CLIWandExceptArgBreak(OptionError,"InvalidNumberList",option,arg1);
new_images=PolynomialImage(_images,(size_t) (count >> 1),args,
_exception);
args=(double *) RelinquishMagickMemory(args);
break;
}
if (LocaleCompare("process",option+1) == 0)
{
/* FUTURE: better parsing using ScriptToken() from string ??? */
char
**arguments;
int
j,
number_arguments;
arguments=StringToArgv(arg1,&number_arguments);
if (arguments == (char **) NULL)
break;
if (strchr(arguments[1],'=') != (char *) NULL)
{
char
breaker,
quote,
*token;
const char
*arguments;
int
next,
status;
size_t
length;
TokenInfo
*token_info;
/*
Support old style syntax, filter="-option arg1".
*/
assert(arg1 != (const char *) NULL);
length=strlen(arg1);
token=(char *) NULL;
if (~length >= (MagickPathExtent-1))
token=(char *) AcquireQuantumMemory(length+MagickPathExtent,
sizeof(*token));
if (token == (char *) NULL)
break;
next=0;
arguments=arg1;
token_info=AcquireTokenInfo();
status=Tokenizer(token_info,0,token,length,arguments,"","=",
"\"",'\0',&breaker,&next,"e);
token_info=DestroyTokenInfo(token_info);
if (status == 0)
{
const char
*argv;
argv=(&(arguments[next]));
(void) InvokeDynamicImageFilter(token,&_images,1,&argv,
_exception);
}
token=DestroyString(token);
break;
}
(void) SubstituteString(&arguments[1],"-","");
(void) InvokeDynamicImageFilter(arguments[1],&_images,
number_arguments-2,(const char **) arguments+2,_exception);
for (j=0; j < number_arguments; j++)
arguments[j]=DestroyString(arguments[j]);
arguments=(char **) RelinquishMagickMemory(arguments);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'r':
{
if (LocaleCompare("remap",option+1) == 0)
{
(void) RemapImages(_quantize_info,_images,(Image *) NULL,_exception);
break;
}
if (LocaleCompare("reverse",option+1) == 0)
{
ReverseImageList(&_images);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 's':
{
if (LocaleCompare("smush",option+1) == 0)
{
/* FUTURE: this option needs more work to make better */
ssize_t
offset;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
offset=(ssize_t) StringToLong(arg1);
new_images=SmushImages(_images,IsNormalOp,offset,_exception);
break;
}
if (LocaleCompare("subimage",option+1) == 0)
{
Image
*base_image,
*compare_image;
const char
*value;
MetricType
metric;
double
similarity;
RectangleInfo
offset;
base_image=GetImageFromList(_images,0);
compare_image=GetImageFromList(_images,1);
/* Comparision Metric */
metric=UndefinedErrorMetric;
value=GetImageOption(_image_info,"metric");
if (value != (const char *) NULL)
metric=(MetricType) ParseCommandOption(MagickMetricOptions,
MagickFalse,value);
new_images=SimilarityImage(base_image,compare_image,metric,0.0,
&offset,&similarity,_exception);
if (new_images != (Image *) NULL)
{
char
result[MagickPathExtent];
(void) FormatLocaleString(result,MagickPathExtent,"%lf",
similarity);
(void) SetImageProperty(new_images,"subimage:similarity",result,
_exception);
(void) FormatLocaleString(result,MagickPathExtent,"%+ld",(long)
offset.x);
(void) SetImageProperty(new_images,"subimage:x",result,
_exception);
(void) FormatLocaleString(result,MagickPathExtent,"%+ld",(long)
offset.y);
(void) SetImageProperty(new_images,"subimage:y",result,
_exception);
(void) FormatLocaleString(result,MagickPathExtent,
"%lux%lu%+ld%+ld",(unsigned long) offset.width,(unsigned long)
offset.height,(long) offset.x,(long) offset.y);
(void) SetImageProperty(new_images,"subimage:offset",result,
_exception);
}
break;
}
if (LocaleCompare("swap",option+1) == 0)
{
Image
*p,
*q,
*swap;
ssize_t
index,
swap_index;
index=(-1);
swap_index=(-2);
if (IfNormalOp) {
GeometryInfo
geometry_info;
MagickStatusType
flags;
swap_index=(-1);
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
index=(ssize_t) geometry_info.rho;
if ((flags & SigmaValue) != 0)
swap_index=(ssize_t) geometry_info.sigma;
}
p=GetImageFromList(_images,index);
q=GetImageFromList(_images,swap_index);
if ((p == (Image *) NULL) || (q == (Image *) NULL)) {
if (IfNormalOp)
CLIWandExceptArgBreak(OptionError,"InvalidImageIndex",option,arg1)
else
CLIWandExceptionBreak(OptionError,"TwoOrMoreImagesRequired",option);
}
if (p == q)
CLIWandExceptArgBreak(OptionError,"InvalidImageIndex",option,arg1);
swap=CloneImage(p,0,0,MagickTrue,_exception);
if (swap == (Image *) NULL)
CLIWandExceptArgBreak(ResourceLimitError,"MemoryAllocationFailed",
option,GetExceptionMessage(errno));
ReplaceImageInList(&p,CloneImage(q,0,0,MagickTrue,_exception));
ReplaceImageInList(&q,swap);
_images=GetFirstImageInList(q);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
default:
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
/* clean up percent escape interpreted strings */
if (arg1 != arg1n )
arg1=DestroyString((char *)arg1);
if (arg2 != arg2n )
arg2=DestroyString((char *)arg2);
/* if new image list generated, replace existing image list */
if (new_images == (Image *) NULL)
return(status == 0 ? MagickFalse : MagickTrue);
_images=DestroyImageList(_images);
_images=GetFirstImageInList(new_images);
return(status == 0 ? MagickFalse : MagickTrue);
#undef _image_info
#undef _images
#undef _exception
#undef _draw_info
#undef _quantize_info
#undef IfNormalOp
#undef IfPlusOp
#undef IsNormalOp
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C L I N o I m a g e O p e r a t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLINoImageOperator() Applies operations that may not actually need images
% in an image list.
%
% The classic operators of this type is "-read", which actually creates
% images even when no images are present. Or image stack operators, which
% can be applied (push or pop) to an empty image list.
%
% Note that these operators may involve other special 'option' prefix
% characters other than '-' or '+', namely parenthesis and braces.
%
% The format of the CLINoImageOption method is:
%
% void CLINoImageOption(MagickCLI *cli_wand,const char *option,
% const char *arg1, const char *arg2)
%
% A description of each parameter follows:
%
% o cli_wand: the main CLI Wand to use. (sometimes not required)
%
% o option: The special option (with any switch char) to process
%
% o arg1 & arg2: Argument for option, if required
% Currently arg2 is not used.
%
*/
WandPrivate void CLINoImageOperator(MagickCLI *cli_wand,
const char *option,const char *arg1n,const char *arg2n)
{
const char /* percent escaped versions of the args */
*arg1,
*arg2;
#define _image_info (cli_wand->wand.image_info)
#define _images (cli_wand->wand.images)
#define _exception (cli_wand->wand.exception)
#define _process_flags (cli_wand->process_flags)
#define _option_type ((CommandOptionFlags) cli_wand->command->flags)
#define IfNormalOp (*option=='-')
#define IfPlusOp (*option!='-')
assert(cli_wand != (MagickCLI *) NULL);
assert(cli_wand->signature == MagickWandSignature);
assert(cli_wand->wand.signature == MagickWandSignature);
if (cli_wand->wand.debug != MagickFalse)
(void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(),
"- NoImage Operator: %s \"%s\" \"%s\"", option,
arg1n != (char *) NULL ? arg1n : "",
arg2n != (char *) NULL ? arg2n : "");
arg1 = arg1n;
arg2 = arg2n;
/* Interpret Percent Escapes in Arguments - using first image */
if ( (((_process_flags & ProcessInterpretProperities) != 0 )
|| ((_option_type & AlwaysInterpretArgsFlag) != 0)
) && ((_option_type & NeverInterpretArgsFlag) == 0) ) {
/* Interpret Percent escapes in argument 1 */
if (arg1n != (char *) NULL) {
arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception);
if (arg1 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg1=arg1n; /* use the given argument as is */
}
}
if (arg2n != (char *) NULL) {
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg2=arg2n; /* use the given argument as is */
}
}
}
#undef _process_flags
#undef _option_type
do { /* break to exit code */
/*
No-op options (ignore these)
*/
if (LocaleCompare("noop",option+1) == 0) /* zero argument */
break;
if (LocaleCompare("sans",option+1) == 0) /* one argument */
break;
if (LocaleCompare("sans0",option+1) == 0) /* zero argument */
break;
if (LocaleCompare("sans1",option+1) == 0) /* one argument */
break;
if (LocaleCompare("sans2",option+1) == 0) /* two arguments */
break;
/*
Image Reading
*/
if ( ( LocaleCompare("read",option+1) == 0 ) ||
( LocaleCompare("--",option) == 0 ) ) {
/* Do Glob filename Expansion for 'arg1' then read all images.
*
* Expansion handles '@', '~', '*', and '?' meta-characters while ignoring
* (but attaching to the filenames in the generated argument list) any
* [...] read modifiers that may be present.
*
* For example: It will expand '*.gif[20x20]' into a list such as
* 'abc.gif[20x20]', 'foobar.gif[20x20]', 'xyzzy.gif[20x20]'
*
* NOTE: In IMv6 this was done globally across all images. This
* meant you could include IM options in '@filename' lists, but you
* could not include comments. Doing it only for image read makes
* it far more secure.
*
* Note: arguments do not have percent escapes expanded for security
* reasons.
*/
int argc;
char **argv;
ssize_t i;
argc = 1;
argv = (char **) &arg1;
/* Expand 'glob' expressions in the given filename.
Expansion handles any 'coder:' prefix, or read modifiers attached
to the filename, including them in the resulting expanded list.
*/
if (ExpandFilenames(&argc,&argv) == MagickFalse)
CLIWandExceptArgBreak(ResourceLimitError,"MemoryAllocationFailed",
option,GetExceptionMessage(errno));
/* loop over expanded filename list, and read then all in */
for (i=0; i < (ssize_t) argc; i++) {
Image *
new_images;
if (_image_info->ping != MagickFalse)
new_images=PingImages(_image_info,argv[i],_exception);
else
new_images=ReadImages(_image_info,argv[i],_exception);
AppendImageToList(&_images, new_images);
argv[i]=DestroyString(argv[i]);
}
argv=(char **) RelinquishMagickMemory(argv);
break;
}
/*
Image Writing
Note: Writing a empty image list is valid in specific cases
*/
if (LocaleCompare("write",option+1) == 0) {
/* Note: arguments do not have percent escapes expanded */
char
key[MagickPathExtent];
Image
*write_images;
ImageInfo
*write_info;
/* Need images, unless a "null:" output coder is used */
if ( _images == (Image *) NULL ) {
if ( LocaleCompare(arg1,"null:") == 0 )
break;
CLIWandExceptArgBreak(OptionError,"NoImagesForWrite",option,arg1);
}
(void) FormatLocaleString(key,MagickPathExtent,"cache:%s",arg1);
(void) DeleteImageRegistry(key);
write_images=_images;
if (IfPlusOp)
write_images=CloneImageList(_images,_exception);
write_info=CloneImageInfo(_image_info);
(void) WriteImages(write_info,write_images,arg1,_exception);
write_info=DestroyImageInfo(write_info);
if (IfPlusOp)
write_images=DestroyImageList(write_images);
break;
}
/*
Parenthesis and Brace operations
*/
if (LocaleCompare("(",option) == 0) {
/* stack 'push' images */
Stack
*node;
size_t
size;
size=0;
node=cli_wand->image_list_stack;
for ( ; node != (Stack *) NULL; node=node->next)
size++;
if ( size >= MAX_STACK_DEPTH )
CLIWandExceptionBreak(OptionError,"ParenthesisNestedTooDeeply",option);
node=(Stack *) AcquireMagickMemory(sizeof(*node));
if (node == (Stack *) NULL)
CLIWandExceptionBreak(ResourceLimitFatalError,
"MemoryAllocationFailed",option);
node->data = (void *)cli_wand->wand.images;
node->next = cli_wand->image_list_stack;
cli_wand->image_list_stack = node;
cli_wand->wand.images = NewImageList();
/* handle respect-parenthesis */
if (IsStringTrue(GetImageOption(cli_wand->wand.image_info,
"respect-parenthesis")) != MagickFalse)
option="{"; /* fall-thru so as to push image settings too */
else
break;
/* fall thru to operation */
}
if (LocaleCompare("{",option) == 0) {
/* stack 'push' of image_info settings */
Stack
*node;
size_t
size;
size=0;
node=cli_wand->image_info_stack;
for ( ; node != (Stack *) NULL; node=node->next)
size++;
if ( size >= MAX_STACK_DEPTH )
CLIWandExceptionBreak(OptionError,"CurlyBracesNestedTooDeeply",option);
node=(Stack *) AcquireMagickMemory(sizeof(*node));
if (node == (Stack *) NULL)
CLIWandExceptionBreak(ResourceLimitFatalError,
"MemoryAllocationFailed",option);
node->data = (void *)cli_wand->wand.image_info;
node->next = cli_wand->image_info_stack;
cli_wand->image_info_stack = node;
cli_wand->wand.image_info = CloneImageInfo(cli_wand->wand.image_info);
if (cli_wand->wand.image_info == (ImageInfo *) NULL) {
CLIWandException(ResourceLimitFatalError,"MemoryAllocationFailed",
option);
cli_wand->wand.image_info = (ImageInfo *)node->data;
node = (Stack *)RelinquishMagickMemory(node);
break;
}
break;
}
if (LocaleCompare(")",option) == 0) {
/* pop images from stack */
Stack
*node;
node = (Stack *)cli_wand->image_list_stack;
if ( node == (Stack *) NULL)
CLIWandExceptionBreak(OptionError,"UnbalancedParenthesis",option);
cli_wand->image_list_stack = node->next;
AppendImageToList((Image **)&node->data,cli_wand->wand.images);
cli_wand->wand.images= (Image *)node->data;
node = (Stack *)RelinquishMagickMemory(node);
/* handle respect-parenthesis - of the previous 'pushed' settings */
node = cli_wand->image_info_stack;
if ( node != (Stack *) NULL)
{
if (IsStringTrue(GetImageOption(
cli_wand->wand.image_info,"respect-parenthesis")) != MagickFalse)
option="}"; /* fall-thru so as to pop image settings too */
else
break;
}
else
break;
/* fall thru to next if */
}
if (LocaleCompare("}",option) == 0) {
/* pop image_info settings from stack */
Stack
*node;
node = (Stack *)cli_wand->image_info_stack;
if ( node == (Stack *) NULL)
CLIWandExceptionBreak(OptionError,"UnbalancedCurlyBraces",option);
cli_wand->image_info_stack = node->next;
(void) DestroyImageInfo(cli_wand->wand.image_info);
cli_wand->wand.image_info = (ImageInfo *)node->data;
node = (Stack *)RelinquishMagickMemory(node);
GetDrawInfo(cli_wand->wand.image_info, cli_wand->draw_info);
cli_wand->quantize_info=DestroyQuantizeInfo(cli_wand->quantize_info);
cli_wand->quantize_info=AcquireQuantizeInfo(cli_wand->wand.image_info);
break;
}
if (LocaleCompare("print",option+1) == 0)
{
(void) FormatLocaleFile(stdout,"%s",arg1);
break;
}
if (LocaleCompare("set",option+1) == 0)
{
/* Settings are applied to each image in memory in turn (if any).
While a option: only need to be applied once globally.
NOTE: rguments have not been automatically percent expaneded
*/
/* escape the 'key' once only, using first image. */
arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception);
if (arg1 == (char *) NULL)
CLIWandExceptionBreak(OptionWarning,"InterpretPropertyFailure",
option);
if (LocaleNCompare(arg1,"registry:",9) == 0)
{
if (IfPlusOp)
{
(void) DeleteImageRegistry(arg1+9);
arg1=DestroyString((char *)arg1);
break;
}
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL) {
arg1=DestroyString((char *)arg1);
CLIWandExceptionBreak(OptionWarning,"InterpretPropertyFailure",
option);
}
(void) SetImageRegistry(StringRegistryType,arg1+9,arg2,_exception);
arg1=DestroyString((char *)arg1);
arg2=DestroyString((char *)arg2);
break;
}
if (LocaleNCompare(arg1,"option:",7) == 0)
{
/* delete equivelent artifact from all images (if any) */
if (_images != (Image *) NULL)
{
MagickResetIterator(&cli_wand->wand);
while (MagickNextImage(&cli_wand->wand) != MagickFalse)
(void) DeleteImageArtifact(_images,arg1+7);
MagickResetIterator(&cli_wand->wand);
}
/* now set/delete the global option as needed */
/* FUTURE: make escapes in a global 'option:' delayed */
arg2=(char *) NULL;
if (IfNormalOp)
{
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL)
CLIWandExceptionBreak(OptionWarning,
"InterpretPropertyFailure",option);
}
(void) SetImageOption(_image_info,arg1+7,arg2);
arg1=DestroyString((char *)arg1);
arg2=DestroyString((char *)arg2);
break;
}
/* Set Artifacts/Properties/Attributes all images (required) */
if ( _images == (Image *) NULL )
CLIWandExceptArgBreak(OptionWarning,"NoImageForProperty",option,arg1);
MagickResetIterator(&cli_wand->wand);
while (MagickNextImage(&cli_wand->wand) != MagickFalse)
{
arg2=(char *) NULL;
if (IfNormalOp)
{
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL)
CLIWandExceptionBreak(OptionWarning,
"InterpretPropertyFailure",option);
}
if (LocaleNCompare(arg1,"artifact:",9) == 0)
(void) SetImageArtifact(_images,arg1+9,arg2);
else if (LocaleNCompare(arg1,"property:",9) == 0)
(void) SetImageProperty(_images,arg1+9,arg2,_exception);
else
(void) SetImageProperty(_images,arg1,arg2,_exception);
arg2=DestroyString((char *)arg2);
}
MagickResetIterator(&cli_wand->wand);
arg1=DestroyString((char *)arg1);
break;
}
if (LocaleCompare("clone",option+1) == 0) {
Image
*new_images;
if (*option == '+')
arg1=AcquireString("-1");
if (IsSceneGeometry(arg1,MagickFalse) == MagickFalse)
CLIWandExceptionBreak(OptionError,"InvalidArgument",option);
if ( cli_wand->image_list_stack == (Stack *) NULL)
CLIWandExceptionBreak(OptionError,"UnableToCloneImage",option);
new_images = (Image *)cli_wand->image_list_stack->data;
if (new_images == (Image *) NULL)
CLIWandExceptionBreak(OptionError,"UnableToCloneImage",option);
new_images=CloneImages(new_images,arg1,_exception);
if (new_images == (Image *) NULL)
CLIWandExceptionBreak(OptionError,"NoSuchImage",option);
AppendImageToList(&_images,new_images);
break;
}
/*
Informational Operations.
Note that these do not require either a cli-wand or images!
Though currently a cli-wand much be provided regardless.
*/
if (LocaleCompare("version",option+1) == 0)
{
ListMagickVersion(stdout);
break;
}
if (LocaleCompare("list",option+1) == 0) {
/*
FUTURE: This 'switch' should really be part of MagickCore
*/
ssize_t
list;
list=ParseCommandOption(MagickListOptions,MagickFalse,arg1);
if ( list < 0 ) {
CLIWandExceptionArg(OptionError,"UnrecognizedListType",option,arg1);
break;
}
switch (list)
{
case MagickCoderOptions:
{
(void) ListCoderInfo((FILE *) NULL,_exception);
break;
}
case MagickColorOptions:
{
(void) ListColorInfo((FILE *) NULL,_exception);
break;
}
case MagickConfigureOptions:
{
(void) ListConfigureInfo((FILE *) NULL,_exception);
break;
}
case MagickDelegateOptions:
{
(void) ListDelegateInfo((FILE *) NULL,_exception);
break;
}
case MagickFontOptions:
{
(void) ListTypeInfo((FILE *) NULL,_exception);
break;
}
case MagickFormatOptions:
(void) ListMagickInfo((FILE *) NULL,_exception);
break;
case MagickLocaleOptions:
(void) ListLocaleInfo((FILE *) NULL,_exception);
break;
case MagickLogOptions:
(void) ListLogInfo((FILE *) NULL,_exception);
break;
case MagickMagicOptions:
(void) ListMagicInfo((FILE *) NULL,_exception);
break;
case MagickMimeOptions:
(void) ListMimeInfo((FILE *) NULL,_exception);
break;
case MagickModuleOptions:
(void) ListModuleInfo((FILE *) NULL,_exception);
break;
case MagickPolicyOptions:
(void) ListPolicyInfo((FILE *) NULL,_exception);
break;
case MagickResourceOptions:
(void) ListMagickResourceInfo((FILE *) NULL,_exception);
break;
case MagickThresholdOptions:
(void) ListThresholdMaps((FILE *) NULL,_exception);
break;
default:
(void) ListCommandOptions((FILE *) NULL,(CommandOption) list,
_exception);
break;
}
break;
}
CLIWandException(OptionError,"UnrecognizedOption",option);
DisableMSCWarning(4127)
} while (0); /* break to exit code. */
RestoreMSCWarning
/* clean up percent escape interpreted strings */
if (arg1 != arg1n )
arg1=DestroyString((char *)arg1);
if (arg2 != arg2n )
arg2=DestroyString((char *)arg2);
#undef _image_info
#undef _images
#undef _exception
#undef IfNormalOp
#undef IfPlusOp
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C L I O p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLIOption() Processes the given option using the given CLI Magick Wand.
% The option arguments can be variable in number, though at this time no more
% that two is actually used by any option (this may change). Excess options
% are simply ignored.
%
% If the cli_wand->command pointer is non-null, then it is assumed that the
% option has already been search for up from the CommandOptions[] table in
% "MagickCore/options.c" using GetCommandOptionInfo(). If not set this
% routine will do the lookup instead. The pointer is reset afterward.
%
% This action allows the caller to lookup and pre-handle any 'special'
% options, (such as implicit reads) before calling this general option
% handler to deal with 'standard' command line options.
%
% The format of the CLIOption method is:
%
% void CLIOption(MagickCLI *cli_wand,const char *option, ...)
%
% A description of each parameter follows:
%
% o cli_wand: the main CLI Wand to use.
%
% o option: The special option (with any switch char) to process
%
% o args: any required arguments for an option (variable number)
%
% Example Usage...
%
% CLIoption(cli_wand,"-read","rose:");
% CLIoption(cli_wand,"-virtual-pixel","transparent");
% CLIoption(cli_wand,"-distort","SRT:","30");
% CLIoption(cli_wand,"-write","rotated_rose.png");
%
*/
WandExport void CLIOption(MagickCLI *cli_wand,const char *option,...)
{
const char /* extracted option args from args */
*arg1,
*arg2;
CommandOptionFlags
option_type;
assert(cli_wand != (MagickCLI *) NULL);
assert(cli_wand->signature == MagickWandSignature);
assert(cli_wand->wand.signature == MagickWandSignature);
do { /* Break Code Block for error handling */
/* get information about option */
if ( cli_wand->command == (const OptionInfo *) NULL )
cli_wand->command = GetCommandOptionInfo(option);
#if 0
(void) FormatLocaleFile(stderr, "CLIOption \"%s\" matched \"%s\"\n",
option, cli_wand->command->mnemonic );
#endif
option_type=(CommandOptionFlags) cli_wand->command->flags;
if ( option_type == UndefinedOptionFlag )
CLIWandExceptionReturn(OptionFatalError,"UnrecognizedOption",option);
assert( LocaleCompare(cli_wand->command->mnemonic,option) == 0 );
/* deprecated options */
if ( (option_type & DeprecateOptionFlag) != 0 )
CLIWandExceptionBreak(OptionError,"DeprecatedOptionNoCode",option);
/* options that this module does not handle */
if ((option_type & (SpecialOptionFlag|GenesisOptionFlag)) != 0 )
CLIWandExceptionBreak(OptionFatalError,"InvalidUseOfOption",option);
/* Get argument strings from VarArgs
How can you determine if enough arguments was supplied?
What happens if not enough arguments were supplied?
*/
{ size_t
count = (size_t) cli_wand->command->type;
va_list
operands;
va_start(operands,option);
arg1=arg2=NULL;
if ( count >= 1 )
arg1=(const char *) va_arg(operands, const char *);
if ( count >= 2 )
arg2=(const char *) va_arg(operands, const char *);
va_end(operands);
#if 0
(void) FormatLocaleFile(stderr,
"CLIOption: \"%s\" Count: %ld Flags: %04x Args: \"%s\" \"%s\"\n",
option,(long) count,option_type,arg1,arg2);
#endif
}
/*
Call the appropriate option handler
*/
/* FUTURE: this is temporary - get 'settings' to handle distribution of
settings to images attributes,proprieties,artifacts */
if ( cli_wand->wand.images != (Image *) NULL )
(void) SyncImagesSettings(cli_wand->wand.image_info,cli_wand->wand.images,
cli_wand->wand.exception);
if ( (option_type & SettingOptionFlags) != 0 ) {
CLISettingOptionInfo(cli_wand, option, arg1, arg2);
/*
FUTURE: Sync Specific Settings into Image Properities (not global)
*/
}
/* Operators that do not need images - read, write, stack, clone */
if ((option_type & NoImageOperatorFlag) != 0)
CLINoImageOperator(cli_wand, option, arg1, arg2);
/* FUTURE: The not a setting part below is a temporary hack due to
* some options being both a Setting and a Simple operator.
* Specifically -monitor, -depth, and -colorspace */
if ( cli_wand->wand.images == (Image *) NULL )
if ( ((option_type & (SimpleOperatorFlag|ListOperatorFlag)) != 0 ) &&
((option_type & SettingOptionFlags) == 0 )) /* temp hack */
CLIWandExceptionBreak(OptionError,"NoImagesFound",option);
/* Operators which loop of individual images, simply */
if ( (option_type & SimpleOperatorFlag) != 0 &&
cli_wand->wand.images != (Image *) NULL) /* temp hack */
{
ExceptionInfo *exception=AcquireExceptionInfo();
(void) CLISimpleOperatorImages(cli_wand, option, arg1, arg2,exception);
exception=DestroyExceptionInfo(exception);
}
/* Operators that work on the image list as a whole */
if ( (option_type & ListOperatorFlag) != 0 )
(void) CLIListOperatorImages(cli_wand, option, arg1, arg2);
DisableMSCWarning(4127)
} while (0); /* end Break code block */
RestoreMSCWarning
cli_wand->command = (const OptionInfo *) NULL; /* prevent re-use later */
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_944_1 |
crossvul-cpp_data_bad_1855_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD DDDD SSSSS %
% D D D D SS %
% D D D D SSS %
% D D D D SS %
% DDDD DDDD SSSSS %
% %
% %
% Read/Write Microsoft Direct Draw Surface Image Format %
% %
% Software Design %
% Bianca van Schaik %
% March 2008 %
% Dirk Lemstra %
% September 2013 %
% %
% %
% Copyright 1999-2015 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/attribute.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/log.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/profile.h"
#include "magick/quantum.h"
#include "magick/quantum-private.h"
#include "magick/resource_.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/module.h"
#include "magick/transform.h"
/*
Definitions
*/
#define DDSD_CAPS 0x00000001
#define DDSD_HEIGHT 0x00000002
#define DDSD_WIDTH 0x00000004
#define DDSD_PITCH 0x00000008
#define DDSD_PIXELFORMAT 0x00001000
#define DDSD_MIPMAPCOUNT 0x00020000
#define DDSD_LINEARSIZE 0x00080000
#define DDSD_DEPTH 0x00800000
#define DDPF_ALPHAPIXELS 0x00000001
#define DDPF_FOURCC 0x00000004
#define DDPF_RGB 0x00000040
#define DDPF_LUMINANCE 0x00020000
#define FOURCC_DXT1 0x31545844
#define FOURCC_DXT3 0x33545844
#define FOURCC_DXT5 0x35545844
#define DDSCAPS_COMPLEX 0x00000008
#define DDSCAPS_TEXTURE 0x00001000
#define DDSCAPS_MIPMAP 0x00400000
#define DDSCAPS2_CUBEMAP 0x00000200
#define DDSCAPS2_CUBEMAP_POSITIVEX 0x00000400
#define DDSCAPS2_CUBEMAP_NEGATIVEX 0x00000800
#define DDSCAPS2_CUBEMAP_POSITIVEY 0x00001000
#define DDSCAPS2_CUBEMAP_NEGATIVEY 0x00002000
#define DDSCAPS2_CUBEMAP_POSITIVEZ 0x00004000
#define DDSCAPS2_CUBEMAP_NEGATIVEZ 0x00008000
#define DDSCAPS2_VOLUME 0x00200000
#ifndef SIZE_MAX
#define SIZE_MAX ((size_t) -1)
#endif
/*
Structure declarations.
*/
typedef struct _DDSPixelFormat
{
size_t
flags,
fourcc,
rgb_bitcount,
r_bitmask,
g_bitmask,
b_bitmask,
alpha_bitmask;
} DDSPixelFormat;
typedef struct _DDSInfo
{
size_t
flags,
height,
width,
pitchOrLinearSize,
depth,
mipmapcount,
ddscaps1,
ddscaps2;
DDSPixelFormat
pixelformat;
} DDSInfo;
typedef struct _DDSColors
{
unsigned char
r[4],
g[4],
b[4],
a[4];
} DDSColors;
typedef struct _DDSVector4
{
float
x,
y,
z,
w;
} DDSVector4;
typedef struct _DDSVector3
{
float
x,
y,
z;
} DDSVector3;
typedef struct _DDSSourceBlock
{
unsigned char
start,
end,
error;
} DDSSourceBlock;
typedef struct _DDSSingleColourLookup
{
DDSSourceBlock sources[2];
} DDSSingleColourLookup;
typedef MagickBooleanType
DDSDecoder(Image *, DDSInfo *, ExceptionInfo *);
static const DDSSingleColourLookup DDSLookup_5_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 1 } } },
{ { { 0, 0, 2 }, { 0, 1, 0 } } },
{ { { 0, 0, 3 }, { 0, 1, 1 } } },
{ { { 0, 0, 4 }, { 0, 2, 1 } } },
{ { { 1, 0, 3 }, { 0, 2, 0 } } },
{ { { 1, 0, 2 }, { 0, 2, 1 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 1, 2, 1 } } },
{ { { 1, 0, 2 }, { 1, 2, 0 } } },
{ { { 1, 0, 3 }, { 0, 4, 0 } } },
{ { { 1, 0, 4 }, { 0, 5, 1 } } },
{ { { 2, 0, 3 }, { 0, 5, 0 } } },
{ { { 2, 0, 2 }, { 0, 5, 1 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 2, 3, 1 } } },
{ { { 2, 0, 2 }, { 2, 3, 0 } } },
{ { { 2, 0, 3 }, { 0, 7, 0 } } },
{ { { 2, 0, 4 }, { 1, 6, 1 } } },
{ { { 3, 0, 3 }, { 1, 6, 0 } } },
{ { { 3, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 2 }, { 0, 10, 1 } } },
{ { { 3, 0, 3 }, { 0, 10, 0 } } },
{ { { 3, 0, 4 }, { 2, 7, 1 } } },
{ { { 4, 0, 4 }, { 2, 7, 0 } } },
{ { { 4, 0, 3 }, { 0, 11, 0 } } },
{ { { 4, 0, 2 }, { 1, 10, 1 } } },
{ { { 4, 0, 1 }, { 1, 10, 0 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 1 } } },
{ { { 4, 0, 2 }, { 0, 13, 0 } } },
{ { { 4, 0, 3 }, { 0, 13, 1 } } },
{ { { 4, 0, 4 }, { 0, 14, 1 } } },
{ { { 5, 0, 3 }, { 0, 14, 0 } } },
{ { { 5, 0, 2 }, { 2, 11, 1 } } },
{ { { 5, 0, 1 }, { 2, 11, 0 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 1, 14, 1 } } },
{ { { 5, 0, 2 }, { 1, 14, 0 } } },
{ { { 5, 0, 3 }, { 0, 16, 0 } } },
{ { { 5, 0, 4 }, { 0, 17, 1 } } },
{ { { 6, 0, 3 }, { 0, 17, 0 } } },
{ { { 6, 0, 2 }, { 0, 17, 1 } } },
{ { { 6, 0, 1 }, { 0, 18, 1 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 2, 15, 1 } } },
{ { { 6, 0, 2 }, { 2, 15, 0 } } },
{ { { 6, 0, 3 }, { 0, 19, 0 } } },
{ { { 6, 0, 4 }, { 1, 18, 1 } } },
{ { { 7, 0, 3 }, { 1, 18, 0 } } },
{ { { 7, 0, 2 }, { 0, 20, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 21, 1 } } },
{ { { 7, 0, 2 }, { 0, 22, 1 } } },
{ { { 7, 0, 3 }, { 0, 22, 0 } } },
{ { { 7, 0, 4 }, { 2, 19, 1 } } },
{ { { 8, 0, 4 }, { 2, 19, 0 } } },
{ { { 8, 0, 3 }, { 0, 23, 0 } } },
{ { { 8, 0, 2 }, { 1, 22, 1 } } },
{ { { 8, 0, 1 }, { 1, 22, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 1 } } },
{ { { 8, 0, 2 }, { 0, 25, 0 } } },
{ { { 8, 0, 3 }, { 0, 25, 1 } } },
{ { { 8, 0, 4 }, { 0, 26, 1 } } },
{ { { 9, 0, 3 }, { 0, 26, 0 } } },
{ { { 9, 0, 2 }, { 2, 23, 1 } } },
{ { { 9, 0, 1 }, { 2, 23, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 1, 26, 1 } } },
{ { { 9, 0, 2 }, { 1, 26, 0 } } },
{ { { 9, 0, 3 }, { 0, 28, 0 } } },
{ { { 9, 0, 4 }, { 0, 29, 1 } } },
{ { { 10, 0, 3 }, { 0, 29, 0 } } },
{ { { 10, 0, 2 }, { 0, 29, 1 } } },
{ { { 10, 0, 1 }, { 0, 30, 1 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 2, 27, 1 } } },
{ { { 10, 0, 2 }, { 2, 27, 0 } } },
{ { { 10, 0, 3 }, { 0, 31, 0 } } },
{ { { 10, 0, 4 }, { 1, 30, 1 } } },
{ { { 11, 0, 3 }, { 1, 30, 0 } } },
{ { { 11, 0, 2 }, { 4, 24, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 0 }, { 1, 31, 0 } } },
{ { { 11, 0, 1 }, { 1, 31, 1 } } },
{ { { 11, 0, 2 }, { 2, 30, 1 } } },
{ { { 11, 0, 3 }, { 2, 30, 0 } } },
{ { { 11, 0, 4 }, { 2, 31, 1 } } },
{ { { 12, 0, 4 }, { 2, 31, 0 } } },
{ { { 12, 0, 3 }, { 4, 27, 0 } } },
{ { { 12, 0, 2 }, { 3, 30, 1 } } },
{ { { 12, 0, 1 }, { 3, 30, 0 } } },
{ { { 12, 0, 0 }, { 4, 28, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 1 } } },
{ { { 12, 0, 2 }, { 3, 31, 0 } } },
{ { { 12, 0, 3 }, { 3, 31, 1 } } },
{ { { 12, 0, 4 }, { 4, 30, 1 } } },
{ { { 13, 0, 3 }, { 4, 30, 0 } } },
{ { { 13, 0, 2 }, { 6, 27, 1 } } },
{ { { 13, 0, 1 }, { 6, 27, 0 } } },
{ { { 13, 0, 0 }, { 4, 31, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 1 } } },
{ { { 13, 0, 2 }, { 5, 30, 0 } } },
{ { { 13, 0, 3 }, { 8, 24, 0 } } },
{ { { 13, 0, 4 }, { 5, 31, 1 } } },
{ { { 14, 0, 3 }, { 5, 31, 0 } } },
{ { { 14, 0, 2 }, { 5, 31, 1 } } },
{ { { 14, 0, 1 }, { 6, 30, 1 } } },
{ { { 14, 0, 0 }, { 6, 30, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 1 } } },
{ { { 14, 0, 2 }, { 6, 31, 0 } } },
{ { { 14, 0, 3 }, { 8, 27, 0 } } },
{ { { 14, 0, 4 }, { 7, 30, 1 } } },
{ { { 15, 0, 3 }, { 7, 30, 0 } } },
{ { { 15, 0, 2 }, { 8, 28, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 0 }, { 7, 31, 0 } } },
{ { { 15, 0, 1 }, { 7, 31, 1 } } },
{ { { 15, 0, 2 }, { 8, 30, 1 } } },
{ { { 15, 0, 3 }, { 8, 30, 0 } } },
{ { { 15, 0, 4 }, { 10, 27, 1 } } },
{ { { 16, 0, 4 }, { 10, 27, 0 } } },
{ { { 16, 0, 3 }, { 8, 31, 0 } } },
{ { { 16, 0, 2 }, { 9, 30, 1 } } },
{ { { 16, 0, 1 }, { 9, 30, 0 } } },
{ { { 16, 0, 0 }, { 12, 24, 0 } } },
{ { { 16, 0, 1 }, { 9, 31, 1 } } },
{ { { 16, 0, 2 }, { 9, 31, 0 } } },
{ { { 16, 0, 3 }, { 9, 31, 1 } } },
{ { { 16, 0, 4 }, { 10, 30, 1 } } },
{ { { 17, 0, 3 }, { 10, 30, 0 } } },
{ { { 17, 0, 2 }, { 10, 31, 1 } } },
{ { { 17, 0, 1 }, { 10, 31, 0 } } },
{ { { 17, 0, 0 }, { 12, 27, 0 } } },
{ { { 17, 0, 1 }, { 11, 30, 1 } } },
{ { { 17, 0, 2 }, { 11, 30, 0 } } },
{ { { 17, 0, 3 }, { 12, 28, 0 } } },
{ { { 17, 0, 4 }, { 11, 31, 1 } } },
{ { { 18, 0, 3 }, { 11, 31, 0 } } },
{ { { 18, 0, 2 }, { 11, 31, 1 } } },
{ { { 18, 0, 1 }, { 12, 30, 1 } } },
{ { { 18, 0, 0 }, { 12, 30, 0 } } },
{ { { 18, 0, 1 }, { 14, 27, 1 } } },
{ { { 18, 0, 2 }, { 14, 27, 0 } } },
{ { { 18, 0, 3 }, { 12, 31, 0 } } },
{ { { 18, 0, 4 }, { 13, 30, 1 } } },
{ { { 19, 0, 3 }, { 13, 30, 0 } } },
{ { { 19, 0, 2 }, { 16, 24, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 0 }, { 13, 31, 0 } } },
{ { { 19, 0, 1 }, { 13, 31, 1 } } },
{ { { 19, 0, 2 }, { 14, 30, 1 } } },
{ { { 19, 0, 3 }, { 14, 30, 0 } } },
{ { { 19, 0, 4 }, { 14, 31, 1 } } },
{ { { 20, 0, 4 }, { 14, 31, 0 } } },
{ { { 20, 0, 3 }, { 16, 27, 0 } } },
{ { { 20, 0, 2 }, { 15, 30, 1 } } },
{ { { 20, 0, 1 }, { 15, 30, 0 } } },
{ { { 20, 0, 0 }, { 16, 28, 0 } } },
{ { { 20, 0, 1 }, { 15, 31, 1 } } },
{ { { 20, 0, 2 }, { 15, 31, 0 } } },
{ { { 20, 0, 3 }, { 15, 31, 1 } } },
{ { { 20, 0, 4 }, { 16, 30, 1 } } },
{ { { 21, 0, 3 }, { 16, 30, 0 } } },
{ { { 21, 0, 2 }, { 18, 27, 1 } } },
{ { { 21, 0, 1 }, { 18, 27, 0 } } },
{ { { 21, 0, 0 }, { 16, 31, 0 } } },
{ { { 21, 0, 1 }, { 17, 30, 1 } } },
{ { { 21, 0, 2 }, { 17, 30, 0 } } },
{ { { 21, 0, 3 }, { 20, 24, 0 } } },
{ { { 21, 0, 4 }, { 17, 31, 1 } } },
{ { { 22, 0, 3 }, { 17, 31, 0 } } },
{ { { 22, 0, 2 }, { 17, 31, 1 } } },
{ { { 22, 0, 1 }, { 18, 30, 1 } } },
{ { { 22, 0, 0 }, { 18, 30, 0 } } },
{ { { 22, 0, 1 }, { 18, 31, 1 } } },
{ { { 22, 0, 2 }, { 18, 31, 0 } } },
{ { { 22, 0, 3 }, { 20, 27, 0 } } },
{ { { 22, 0, 4 }, { 19, 30, 1 } } },
{ { { 23, 0, 3 }, { 19, 30, 0 } } },
{ { { 23, 0, 2 }, { 20, 28, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 0 }, { 19, 31, 0 } } },
{ { { 23, 0, 1 }, { 19, 31, 1 } } },
{ { { 23, 0, 2 }, { 20, 30, 1 } } },
{ { { 23, 0, 3 }, { 20, 30, 0 } } },
{ { { 23, 0, 4 }, { 22, 27, 1 } } },
{ { { 24, 0, 4 }, { 22, 27, 0 } } },
{ { { 24, 0, 3 }, { 20, 31, 0 } } },
{ { { 24, 0, 2 }, { 21, 30, 1 } } },
{ { { 24, 0, 1 }, { 21, 30, 0 } } },
{ { { 24, 0, 0 }, { 24, 24, 0 } } },
{ { { 24, 0, 1 }, { 21, 31, 1 } } },
{ { { 24, 0, 2 }, { 21, 31, 0 } } },
{ { { 24, 0, 3 }, { 21, 31, 1 } } },
{ { { 24, 0, 4 }, { 22, 30, 1 } } },
{ { { 25, 0, 3 }, { 22, 30, 0 } } },
{ { { 25, 0, 2 }, { 22, 31, 1 } } },
{ { { 25, 0, 1 }, { 22, 31, 0 } } },
{ { { 25, 0, 0 }, { 24, 27, 0 } } },
{ { { 25, 0, 1 }, { 23, 30, 1 } } },
{ { { 25, 0, 2 }, { 23, 30, 0 } } },
{ { { 25, 0, 3 }, { 24, 28, 0 } } },
{ { { 25, 0, 4 }, { 23, 31, 1 } } },
{ { { 26, 0, 3 }, { 23, 31, 0 } } },
{ { { 26, 0, 2 }, { 23, 31, 1 } } },
{ { { 26, 0, 1 }, { 24, 30, 1 } } },
{ { { 26, 0, 0 }, { 24, 30, 0 } } },
{ { { 26, 0, 1 }, { 26, 27, 1 } } },
{ { { 26, 0, 2 }, { 26, 27, 0 } } },
{ { { 26, 0, 3 }, { 24, 31, 0 } } },
{ { { 26, 0, 4 }, { 25, 30, 1 } } },
{ { { 27, 0, 3 }, { 25, 30, 0 } } },
{ { { 27, 0, 2 }, { 28, 24, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 0 }, { 25, 31, 0 } } },
{ { { 27, 0, 1 }, { 25, 31, 1 } } },
{ { { 27, 0, 2 }, { 26, 30, 1 } } },
{ { { 27, 0, 3 }, { 26, 30, 0 } } },
{ { { 27, 0, 4 }, { 26, 31, 1 } } },
{ { { 28, 0, 4 }, { 26, 31, 0 } } },
{ { { 28, 0, 3 }, { 28, 27, 0 } } },
{ { { 28, 0, 2 }, { 27, 30, 1 } } },
{ { { 28, 0, 1 }, { 27, 30, 0 } } },
{ { { 28, 0, 0 }, { 28, 28, 0 } } },
{ { { 28, 0, 1 }, { 27, 31, 1 } } },
{ { { 28, 0, 2 }, { 27, 31, 0 } } },
{ { { 28, 0, 3 }, { 27, 31, 1 } } },
{ { { 28, 0, 4 }, { 28, 30, 1 } } },
{ { { 29, 0, 3 }, { 28, 30, 0 } } },
{ { { 29, 0, 2 }, { 30, 27, 1 } } },
{ { { 29, 0, 1 }, { 30, 27, 0 } } },
{ { { 29, 0, 0 }, { 28, 31, 0 } } },
{ { { 29, 0, 1 }, { 29, 30, 1 } } },
{ { { 29, 0, 2 }, { 29, 30, 0 } } },
{ { { 29, 0, 3 }, { 29, 30, 1 } } },
{ { { 29, 0, 4 }, { 29, 31, 1 } } },
{ { { 30, 0, 3 }, { 29, 31, 0 } } },
{ { { 30, 0, 2 }, { 29, 31, 1 } } },
{ { { 30, 0, 1 }, { 30, 30, 1 } } },
{ { { 30, 0, 0 }, { 30, 30, 0 } } },
{ { { 30, 0, 1 }, { 30, 31, 1 } } },
{ { { 30, 0, 2 }, { 30, 31, 0 } } },
{ { { 30, 0, 3 }, { 30, 31, 1 } } },
{ { { 30, 0, 4 }, { 31, 30, 1 } } },
{ { { 31, 0, 3 }, { 31, 30, 0 } } },
{ { { 31, 0, 2 }, { 31, 30, 1 } } },
{ { { 31, 0, 1 }, { 31, 31, 1 } } },
{ { { 31, 0, 0 }, { 31, 31, 0 } } }
};
static const DDSSingleColourLookup DDSLookup_6_4[] =
{
{ { { 0, 0, 0 }, { 0, 0, 0 } } },
{ { { 0, 0, 1 }, { 0, 1, 0 } } },
{ { { 0, 0, 2 }, { 0, 2, 0 } } },
{ { { 1, 0, 1 }, { 0, 3, 1 } } },
{ { { 1, 0, 0 }, { 0, 3, 0 } } },
{ { { 1, 0, 1 }, { 0, 4, 0 } } },
{ { { 1, 0, 2 }, { 0, 5, 0 } } },
{ { { 2, 0, 1 }, { 0, 6, 1 } } },
{ { { 2, 0, 0 }, { 0, 6, 0 } } },
{ { { 2, 0, 1 }, { 0, 7, 0 } } },
{ { { 2, 0, 2 }, { 0, 8, 0 } } },
{ { { 3, 0, 1 }, { 0, 9, 1 } } },
{ { { 3, 0, 0 }, { 0, 9, 0 } } },
{ { { 3, 0, 1 }, { 0, 10, 0 } } },
{ { { 3, 0, 2 }, { 0, 11, 0 } } },
{ { { 4, 0, 1 }, { 0, 12, 1 } } },
{ { { 4, 0, 0 }, { 0, 12, 0 } } },
{ { { 4, 0, 1 }, { 0, 13, 0 } } },
{ { { 4, 0, 2 }, { 0, 14, 0 } } },
{ { { 5, 0, 1 }, { 0, 15, 1 } } },
{ { { 5, 0, 0 }, { 0, 15, 0 } } },
{ { { 5, 0, 1 }, { 0, 16, 0 } } },
{ { { 5, 0, 2 }, { 1, 15, 0 } } },
{ { { 6, 0, 1 }, { 0, 17, 0 } } },
{ { { 6, 0, 0 }, { 0, 18, 0 } } },
{ { { 6, 0, 1 }, { 0, 19, 0 } } },
{ { { 6, 0, 2 }, { 3, 14, 0 } } },
{ { { 7, 0, 1 }, { 0, 20, 0 } } },
{ { { 7, 0, 0 }, { 0, 21, 0 } } },
{ { { 7, 0, 1 }, { 0, 22, 0 } } },
{ { { 7, 0, 2 }, { 4, 15, 0 } } },
{ { { 8, 0, 1 }, { 0, 23, 0 } } },
{ { { 8, 0, 0 }, { 0, 24, 0 } } },
{ { { 8, 0, 1 }, { 0, 25, 0 } } },
{ { { 8, 0, 2 }, { 6, 14, 0 } } },
{ { { 9, 0, 1 }, { 0, 26, 0 } } },
{ { { 9, 0, 0 }, { 0, 27, 0 } } },
{ { { 9, 0, 1 }, { 0, 28, 0 } } },
{ { { 9, 0, 2 }, { 7, 15, 0 } } },
{ { { 10, 0, 1 }, { 0, 29, 0 } } },
{ { { 10, 0, 0 }, { 0, 30, 0 } } },
{ { { 10, 0, 1 }, { 0, 31, 0 } } },
{ { { 10, 0, 2 }, { 9, 14, 0 } } },
{ { { 11, 0, 1 }, { 0, 32, 0 } } },
{ { { 11, 0, 0 }, { 0, 33, 0 } } },
{ { { 11, 0, 1 }, { 2, 30, 0 } } },
{ { { 11, 0, 2 }, { 0, 34, 0 } } },
{ { { 12, 0, 1 }, { 0, 35, 0 } } },
{ { { 12, 0, 0 }, { 0, 36, 0 } } },
{ { { 12, 0, 1 }, { 3, 31, 0 } } },
{ { { 12, 0, 2 }, { 0, 37, 0 } } },
{ { { 13, 0, 1 }, { 0, 38, 0 } } },
{ { { 13, 0, 0 }, { 0, 39, 0 } } },
{ { { 13, 0, 1 }, { 5, 30, 0 } } },
{ { { 13, 0, 2 }, { 0, 40, 0 } } },
{ { { 14, 0, 1 }, { 0, 41, 0 } } },
{ { { 14, 0, 0 }, { 0, 42, 0 } } },
{ { { 14, 0, 1 }, { 6, 31, 0 } } },
{ { { 14, 0, 2 }, { 0, 43, 0 } } },
{ { { 15, 0, 1 }, { 0, 44, 0 } } },
{ { { 15, 0, 0 }, { 0, 45, 0 } } },
{ { { 15, 0, 1 }, { 8, 30, 0 } } },
{ { { 15, 0, 2 }, { 0, 46, 0 } } },
{ { { 16, 0, 2 }, { 0, 47, 0 } } },
{ { { 16, 0, 1 }, { 1, 46, 0 } } },
{ { { 16, 0, 0 }, { 0, 48, 0 } } },
{ { { 16, 0, 1 }, { 0, 49, 0 } } },
{ { { 16, 0, 2 }, { 0, 50, 0 } } },
{ { { 17, 0, 1 }, { 2, 47, 0 } } },
{ { { 17, 0, 0 }, { 0, 51, 0 } } },
{ { { 17, 0, 1 }, { 0, 52, 0 } } },
{ { { 17, 0, 2 }, { 0, 53, 0 } } },
{ { { 18, 0, 1 }, { 4, 46, 0 } } },
{ { { 18, 0, 0 }, { 0, 54, 0 } } },
{ { { 18, 0, 1 }, { 0, 55, 0 } } },
{ { { 18, 0, 2 }, { 0, 56, 0 } } },
{ { { 19, 0, 1 }, { 5, 47, 0 } } },
{ { { 19, 0, 0 }, { 0, 57, 0 } } },
{ { { 19, 0, 1 }, { 0, 58, 0 } } },
{ { { 19, 0, 2 }, { 0, 59, 0 } } },
{ { { 20, 0, 1 }, { 7, 46, 0 } } },
{ { { 20, 0, 0 }, { 0, 60, 0 } } },
{ { { 20, 0, 1 }, { 0, 61, 0 } } },
{ { { 20, 0, 2 }, { 0, 62, 0 } } },
{ { { 21, 0, 1 }, { 8, 47, 0 } } },
{ { { 21, 0, 0 }, { 0, 63, 0 } } },
{ { { 21, 0, 1 }, { 1, 62, 0 } } },
{ { { 21, 0, 2 }, { 1, 63, 0 } } },
{ { { 22, 0, 1 }, { 10, 46, 0 } } },
{ { { 22, 0, 0 }, { 2, 62, 0 } } },
{ { { 22, 0, 1 }, { 2, 63, 0 } } },
{ { { 22, 0, 2 }, { 3, 62, 0 } } },
{ { { 23, 0, 1 }, { 11, 47, 0 } } },
{ { { 23, 0, 0 }, { 3, 63, 0 } } },
{ { { 23, 0, 1 }, { 4, 62, 0 } } },
{ { { 23, 0, 2 }, { 4, 63, 0 } } },
{ { { 24, 0, 1 }, { 13, 46, 0 } } },
{ { { 24, 0, 0 }, { 5, 62, 0 } } },
{ { { 24, 0, 1 }, { 5, 63, 0 } } },
{ { { 24, 0, 2 }, { 6, 62, 0 } } },
{ { { 25, 0, 1 }, { 14, 47, 0 } } },
{ { { 25, 0, 0 }, { 6, 63, 0 } } },
{ { { 25, 0, 1 }, { 7, 62, 0 } } },
{ { { 25, 0, 2 }, { 7, 63, 0 } } },
{ { { 26, 0, 1 }, { 16, 45, 0 } } },
{ { { 26, 0, 0 }, { 8, 62, 0 } } },
{ { { 26, 0, 1 }, { 8, 63, 0 } } },
{ { { 26, 0, 2 }, { 9, 62, 0 } } },
{ { { 27, 0, 1 }, { 16, 48, 0 } } },
{ { { 27, 0, 0 }, { 9, 63, 0 } } },
{ { { 27, 0, 1 }, { 10, 62, 0 } } },
{ { { 27, 0, 2 }, { 10, 63, 0 } } },
{ { { 28, 0, 1 }, { 16, 51, 0 } } },
{ { { 28, 0, 0 }, { 11, 62, 0 } } },
{ { { 28, 0, 1 }, { 11, 63, 0 } } },
{ { { 28, 0, 2 }, { 12, 62, 0 } } },
{ { { 29, 0, 1 }, { 16, 54, 0 } } },
{ { { 29, 0, 0 }, { 12, 63, 0 } } },
{ { { 29, 0, 1 }, { 13, 62, 0 } } },
{ { { 29, 0, 2 }, { 13, 63, 0 } } },
{ { { 30, 0, 1 }, { 16, 57, 0 } } },
{ { { 30, 0, 0 }, { 14, 62, 0 } } },
{ { { 30, 0, 1 }, { 14, 63, 0 } } },
{ { { 30, 0, 2 }, { 15, 62, 0 } } },
{ { { 31, 0, 1 }, { 16, 60, 0 } } },
{ { { 31, 0, 0 }, { 15, 63, 0 } } },
{ { { 31, 0, 1 }, { 24, 46, 0 } } },
{ { { 31, 0, 2 }, { 16, 62, 0 } } },
{ { { 32, 0, 2 }, { 16, 63, 0 } } },
{ { { 32, 0, 1 }, { 17, 62, 0 } } },
{ { { 32, 0, 0 }, { 25, 47, 0 } } },
{ { { 32, 0, 1 }, { 17, 63, 0 } } },
{ { { 32, 0, 2 }, { 18, 62, 0 } } },
{ { { 33, 0, 1 }, { 18, 63, 0 } } },
{ { { 33, 0, 0 }, { 27, 46, 0 } } },
{ { { 33, 0, 1 }, { 19, 62, 0 } } },
{ { { 33, 0, 2 }, { 19, 63, 0 } } },
{ { { 34, 0, 1 }, { 20, 62, 0 } } },
{ { { 34, 0, 0 }, { 28, 47, 0 } } },
{ { { 34, 0, 1 }, { 20, 63, 0 } } },
{ { { 34, 0, 2 }, { 21, 62, 0 } } },
{ { { 35, 0, 1 }, { 21, 63, 0 } } },
{ { { 35, 0, 0 }, { 30, 46, 0 } } },
{ { { 35, 0, 1 }, { 22, 62, 0 } } },
{ { { 35, 0, 2 }, { 22, 63, 0 } } },
{ { { 36, 0, 1 }, { 23, 62, 0 } } },
{ { { 36, 0, 0 }, { 31, 47, 0 } } },
{ { { 36, 0, 1 }, { 23, 63, 0 } } },
{ { { 36, 0, 2 }, { 24, 62, 0 } } },
{ { { 37, 0, 1 }, { 24, 63, 0 } } },
{ { { 37, 0, 0 }, { 32, 47, 0 } } },
{ { { 37, 0, 1 }, { 25, 62, 0 } } },
{ { { 37, 0, 2 }, { 25, 63, 0 } } },
{ { { 38, 0, 1 }, { 26, 62, 0 } } },
{ { { 38, 0, 0 }, { 32, 50, 0 } } },
{ { { 38, 0, 1 }, { 26, 63, 0 } } },
{ { { 38, 0, 2 }, { 27, 62, 0 } } },
{ { { 39, 0, 1 }, { 27, 63, 0 } } },
{ { { 39, 0, 0 }, { 32, 53, 0 } } },
{ { { 39, 0, 1 }, { 28, 62, 0 } } },
{ { { 39, 0, 2 }, { 28, 63, 0 } } },
{ { { 40, 0, 1 }, { 29, 62, 0 } } },
{ { { 40, 0, 0 }, { 32, 56, 0 } } },
{ { { 40, 0, 1 }, { 29, 63, 0 } } },
{ { { 40, 0, 2 }, { 30, 62, 0 } } },
{ { { 41, 0, 1 }, { 30, 63, 0 } } },
{ { { 41, 0, 0 }, { 32, 59, 0 } } },
{ { { 41, 0, 1 }, { 31, 62, 0 } } },
{ { { 41, 0, 2 }, { 31, 63, 0 } } },
{ { { 42, 0, 1 }, { 32, 61, 0 } } },
{ { { 42, 0, 0 }, { 32, 62, 0 } } },
{ { { 42, 0, 1 }, { 32, 63, 0 } } },
{ { { 42, 0, 2 }, { 41, 46, 0 } } },
{ { { 43, 0, 1 }, { 33, 62, 0 } } },
{ { { 43, 0, 0 }, { 33, 63, 0 } } },
{ { { 43, 0, 1 }, { 34, 62, 0 } } },
{ { { 43, 0, 2 }, { 42, 47, 0 } } },
{ { { 44, 0, 1 }, { 34, 63, 0 } } },
{ { { 44, 0, 0 }, { 35, 62, 0 } } },
{ { { 44, 0, 1 }, { 35, 63, 0 } } },
{ { { 44, 0, 2 }, { 44, 46, 0 } } },
{ { { 45, 0, 1 }, { 36, 62, 0 } } },
{ { { 45, 0, 0 }, { 36, 63, 0 } } },
{ { { 45, 0, 1 }, { 37, 62, 0 } } },
{ { { 45, 0, 2 }, { 45, 47, 0 } } },
{ { { 46, 0, 1 }, { 37, 63, 0 } } },
{ { { 46, 0, 0 }, { 38, 62, 0 } } },
{ { { 46, 0, 1 }, { 38, 63, 0 } } },
{ { { 46, 0, 2 }, { 47, 46, 0 } } },
{ { { 47, 0, 1 }, { 39, 62, 0 } } },
{ { { 47, 0, 0 }, { 39, 63, 0 } } },
{ { { 47, 0, 1 }, { 40, 62, 0 } } },
{ { { 47, 0, 2 }, { 48, 46, 0 } } },
{ { { 48, 0, 2 }, { 40, 63, 0 } } },
{ { { 48, 0, 1 }, { 41, 62, 0 } } },
{ { { 48, 0, 0 }, { 41, 63, 0 } } },
{ { { 48, 0, 1 }, { 48, 49, 0 } } },
{ { { 48, 0, 2 }, { 42, 62, 0 } } },
{ { { 49, 0, 1 }, { 42, 63, 0 } } },
{ { { 49, 0, 0 }, { 43, 62, 0 } } },
{ { { 49, 0, 1 }, { 48, 52, 0 } } },
{ { { 49, 0, 2 }, { 43, 63, 0 } } },
{ { { 50, 0, 1 }, { 44, 62, 0 } } },
{ { { 50, 0, 0 }, { 44, 63, 0 } } },
{ { { 50, 0, 1 }, { 48, 55, 0 } } },
{ { { 50, 0, 2 }, { 45, 62, 0 } } },
{ { { 51, 0, 1 }, { 45, 63, 0 } } },
{ { { 51, 0, 0 }, { 46, 62, 0 } } },
{ { { 51, 0, 1 }, { 48, 58, 0 } } },
{ { { 51, 0, 2 }, { 46, 63, 0 } } },
{ { { 52, 0, 1 }, { 47, 62, 0 } } },
{ { { 52, 0, 0 }, { 47, 63, 0 } } },
{ { { 52, 0, 1 }, { 48, 61, 0 } } },
{ { { 52, 0, 2 }, { 48, 62, 0 } } },
{ { { 53, 0, 1 }, { 56, 47, 0 } } },
{ { { 53, 0, 0 }, { 48, 63, 0 } } },
{ { { 53, 0, 1 }, { 49, 62, 0 } } },
{ { { 53, 0, 2 }, { 49, 63, 0 } } },
{ { { 54, 0, 1 }, { 58, 46, 0 } } },
{ { { 54, 0, 0 }, { 50, 62, 0 } } },
{ { { 54, 0, 1 }, { 50, 63, 0 } } },
{ { { 54, 0, 2 }, { 51, 62, 0 } } },
{ { { 55, 0, 1 }, { 59, 47, 0 } } },
{ { { 55, 0, 0 }, { 51, 63, 0 } } },
{ { { 55, 0, 1 }, { 52, 62, 0 } } },
{ { { 55, 0, 2 }, { 52, 63, 0 } } },
{ { { 56, 0, 1 }, { 61, 46, 0 } } },
{ { { 56, 0, 0 }, { 53, 62, 0 } } },
{ { { 56, 0, 1 }, { 53, 63, 0 } } },
{ { { 56, 0, 2 }, { 54, 62, 0 } } },
{ { { 57, 0, 1 }, { 62, 47, 0 } } },
{ { { 57, 0, 0 }, { 54, 63, 0 } } },
{ { { 57, 0, 1 }, { 55, 62, 0 } } },
{ { { 57, 0, 2 }, { 55, 63, 0 } } },
{ { { 58, 0, 1 }, { 56, 62, 1 } } },
{ { { 58, 0, 0 }, { 56, 62, 0 } } },
{ { { 58, 0, 1 }, { 56, 63, 0 } } },
{ { { 58, 0, 2 }, { 57, 62, 0 } } },
{ { { 59, 0, 1 }, { 57, 63, 1 } } },
{ { { 59, 0, 0 }, { 57, 63, 0 } } },
{ { { 59, 0, 1 }, { 58, 62, 0 } } },
{ { { 59, 0, 2 }, { 58, 63, 0 } } },
{ { { 60, 0, 1 }, { 59, 62, 1 } } },
{ { { 60, 0, 0 }, { 59, 62, 0 } } },
{ { { 60, 0, 1 }, { 59, 63, 0 } } },
{ { { 60, 0, 2 }, { 60, 62, 0 } } },
{ { { 61, 0, 1 }, { 60, 63, 1 } } },
{ { { 61, 0, 0 }, { 60, 63, 0 } } },
{ { { 61, 0, 1 }, { 61, 62, 0 } } },
{ { { 61, 0, 2 }, { 61, 63, 0 } } },
{ { { 62, 0, 1 }, { 62, 62, 1 } } },
{ { { 62, 0, 0 }, { 62, 62, 0 } } },
{ { { 62, 0, 1 }, { 62, 63, 0 } } },
{ { { 62, 0, 2 }, { 63, 62, 0 } } },
{ { { 63, 0, 1 }, { 63, 63, 1 } } },
{ { { 63, 0, 0 }, { 63, 63, 0 } } }
};
static const DDSSingleColourLookup*
DDS_LOOKUP[] =
{
DDSLookup_5_4,
DDSLookup_6_4,
DDSLookup_5_4
};
/*
Macros
*/
#define C565_r(x) (((x) & 0xF800) >> 11)
#define C565_g(x) (((x) & 0x07E0) >> 5)
#define C565_b(x) ((x) & 0x001F)
#define C565_red(x) ( (C565_r(x) << 3 | C565_r(x) >> 2))
#define C565_green(x) ( (C565_g(x) << 2 | C565_g(x) >> 4))
#define C565_blue(x) ( (C565_b(x) << 3 | C565_b(x) >> 2))
#define DIV2(x) ((x) > 1 ? ((x) >> 1) : 1)
#define FixRange(min, max, steps) \
if (min > max) \
min = max; \
if (max - min < steps) \
max = MagickMin(min + steps, 255); \
if (max - min < steps) \
min = MagickMax(min - steps, 0)
#define Dot(left, right) (left.x*right.x) + (left.y*right.y) + (left.z*right.z)
#define VectorInit(vector, value) vector.x = vector.y = vector.z = vector.w \
= value
#define VectorInit3(vector, value) vector.x = vector.y = vector.z = value
#define IsBitMask(mask, r, g, b, a) (mask.r_bitmask == r && mask.g_bitmask == \
g && mask.b_bitmask == b && mask.alpha_bitmask == a)
/*
Forward declarations
*/
static MagickBooleanType
ConstructOrdering(const size_t,const DDSVector4 *,const DDSVector3,
DDSVector4 *,DDSVector4 *,unsigned char *,size_t),
ReadDDSInfo(Image *,DDSInfo *),
ReadDXT1(Image *,DDSInfo *,ExceptionInfo *),
ReadDXT3(Image *,DDSInfo *,ExceptionInfo *),
ReadDXT5(Image *,DDSInfo *,ExceptionInfo *),
ReadUncompressedRGB(Image *,DDSInfo *,ExceptionInfo *),
ReadUncompressedRGBA(Image *,DDSInfo *,ExceptionInfo *),
SkipDXTMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
SkipRGBMipmaps(Image *,DDSInfo *,int,ExceptionInfo *),
WriteDDSImage(const ImageInfo *,Image *),
WriteMipmaps(Image *,const size_t,const size_t,const size_t,
const MagickBooleanType,const MagickBooleanType,ExceptionInfo *);
static void
RemapIndices(const ssize_t *,const unsigned char *,unsigned char *),
WriteDDSInfo(Image *,const size_t,const size_t,const size_t),
WriteFourCC(Image *,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteImageData(Image *,const size_t,const size_t,const MagickBooleanType,
const MagickBooleanType,ExceptionInfo *),
WriteIndices(Image *,const DDSVector3,const DDSVector3, unsigned char *),
WriteSingleColorFit(Image *,const DDSVector4 *,const ssize_t *),
WriteUncompressed(Image *,ExceptionInfo *);
static inline void VectorAdd(const DDSVector4 left, const DDSVector4 right,
DDSVector4 *destination)
{
destination->x = left.x + right.x;
destination->y = left.y + right.y;
destination->z = left.z + right.z;
destination->w = left.w + right.w;
}
static inline void VectorClamp(DDSVector4 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
value->w = MagickMin(1.0f,MagickMax(0.0f,value->w));
}
static inline void VectorClamp3(DDSVector3 *value)
{
value->x = MagickMin(1.0f,MagickMax(0.0f,value->x));
value->y = MagickMin(1.0f,MagickMax(0.0f,value->y));
value->z = MagickMin(1.0f,MagickMax(0.0f,value->z));
}
static inline void VectorCopy43(const DDSVector4 source,
DDSVector3 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
}
static inline void VectorCopy44(const DDSVector4 source,
DDSVector4 *destination)
{
destination->x = source.x;
destination->y = source.y;
destination->z = source.z;
destination->w = source.w;
}
static inline void VectorNegativeMultiplySubtract(const DDSVector4 a,
const DDSVector4 b, const DDSVector4 c, DDSVector4 *destination)
{
destination->x = c.x - (a.x * b.x);
destination->y = c.y - (a.y * b.y);
destination->z = c.z - (a.z * b.z);
destination->w = c.w - (a.w * b.w);
}
static inline void VectorMultiply(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
destination->w = left.w * right.w;
}
static inline void VectorMultiply3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x * right.x;
destination->y = left.y * right.y;
destination->z = left.z * right.z;
}
static inline void VectorMultiplyAdd(const DDSVector4 a, const DDSVector4 b,
const DDSVector4 c, DDSVector4 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
destination->w = (a.w * b.w) + c.w;
}
static inline void VectorMultiplyAdd3(const DDSVector3 a, const DDSVector3 b,
const DDSVector3 c, DDSVector3 *destination)
{
destination->x = (a.x * b.x) + c.x;
destination->y = (a.y * b.y) + c.y;
destination->z = (a.z * b.z) + c.z;
}
static inline void VectorReciprocal(const DDSVector4 value,
DDSVector4 *destination)
{
destination->x = 1.0f / value.x;
destination->y = 1.0f / value.y;
destination->z = 1.0f / value.z;
destination->w = 1.0f / value.w;
}
static inline void VectorSubtract(const DDSVector4 left,
const DDSVector4 right, DDSVector4 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
destination->w = left.w - right.w;
}
static inline void VectorSubtract3(const DDSVector3 left,
const DDSVector3 right, DDSVector3 *destination)
{
destination->x = left.x - right.x;
destination->y = left.y - right.y;
destination->z = left.z - right.z;
}
static inline void VectorTruncate(DDSVector4 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
value->w = value->w > 0.0f ? floor(value->w) : ceil(value->w);
}
static inline void VectorTruncate3(DDSVector3 *value)
{
value->x = value->x > 0.0f ? floor(value->x) : ceil(value->x);
value->y = value->y > 0.0f ? floor(value->y) : ceil(value->y);
value->z = value->z > 0.0f ? floor(value->z) : ceil(value->z);
}
static void CalculateColors(unsigned short c0, unsigned short c1,
DDSColors *c, MagickBooleanType ignoreAlpha)
{
c->a[0] = c->a[1] = c->a[2] = c->a[3] = 0;
c->r[0] = (unsigned char) C565_red(c0);
c->g[0] = (unsigned char) C565_green(c0);
c->b[0] = (unsigned char) C565_blue(c0);
c->r[1] = (unsigned char) C565_red(c1);
c->g[1] = (unsigned char) C565_green(c1);
c->b[1] = (unsigned char) C565_blue(c1);
if (ignoreAlpha != MagickFalse || c0 > c1)
{
c->r[2] = (unsigned char) ((2 * c->r[0] + c->r[1]) / 3);
c->g[2] = (unsigned char) ((2 * c->g[0] + c->g[1]) / 3);
c->b[2] = (unsigned char) ((2 * c->b[0] + c->b[1]) / 3);
c->r[3] = (unsigned char) ((c->r[0] + 2 * c->r[1]) / 3);
c->g[3] = (unsigned char) ((c->g[0] + 2 * c->g[1]) / 3);
c->b[3] = (unsigned char) ((c->b[0] + 2 * c->b[1]) / 3);
}
else
{
c->r[2] = (unsigned char) ((c->r[0] + c->r[1]) / 2);
c->g[2] = (unsigned char) ((c->g[0] + c->g[1]) / 2);
c->b[2] = (unsigned char) ((c->b[0] + c->b[1]) / 2);
c->r[3] = c->g[3] = c->b[3] = 0;
c->a[3] = 255;
}
}
static size_t CompressAlpha(const size_t min, const size_t max,
const size_t steps, const ssize_t *alphas, unsigned char* indices)
{
unsigned char
codes[8];
register ssize_t
i;
size_t
error,
index,
j,
least,
value;
codes[0] = (unsigned char) min;
codes[1] = (unsigned char) max;
codes[6] = 0;
codes[7] = 255;
for (i=1; i < (ssize_t) steps; i++)
codes[i+1] = (unsigned char) (((steps-i)*min + i*max) / steps);
error = 0;
for (i=0; i<16; i++)
{
if (alphas[i] == -1)
{
indices[i] = 0;
continue;
}
value = alphas[i];
least = SIZE_MAX;
index = 0;
for (j=0; j<8; j++)
{
size_t
dist;
dist = value - (size_t)codes[j];
dist *= dist;
if (dist < least)
{
least = dist;
index = j;
}
}
indices[i] = (unsigned char)index;
error += least;
}
return error;
}
static void CompressClusterFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
DDSVector3
axis;
DDSVector4
grid,
gridrcp,
half,
onethird_onethird2,
pointsWeights[16],
two,
twonineths,
twothirds_twothirds2,
xSumwSum;
float
bestError = 1e+37f;
size_t
bestIteration = 0,
besti = 0,
bestj = 0,
bestk = 0,
iterationIndex;
ssize_t
i;
unsigned char
*o,
order[128],
unordered[16];
VectorInit(half,0.5f);
VectorInit(two,2.0f);
VectorInit(onethird_onethird2,1.0f/3.0f);
onethird_onethird2.w = 1.0f/9.0f;
VectorInit(twothirds_twothirds2,2.0f/3.0f);
twothirds_twothirds2.w = 4.0f/9.0f;
VectorInit(twonineths,2.0f/9.0f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
grid.w = 0.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
gridrcp.w = 0.0f;
xSumwSum.x = 0.0f;
xSumwSum.y = 0.0f;
xSumwSum.z = 0.0f;
xSumwSum.w = 0.0f;
ConstructOrdering(count,points,principle,pointsWeights,&xSumwSum,order,0);
for (iterationIndex = 0;;)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,1) \
num_threads(GetMagickResourceLimit(ThreadResource))
#endif
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
part0,
part1,
part2;
size_t
ii,
j,
k,
kmin;
VectorInit(part0,0.0f);
for(ii=0; ii < (size_t) i; ii++)
VectorAdd(pointsWeights[ii],part0,&part0);
VectorInit(part1,0.0f);
for (j=(size_t) i;;)
{
if (j == 0)
{
VectorCopy44(pointsWeights[0],&part2);
kmin = 1;
}
else
{
VectorInit(part2,0.0f);
kmin = j;
}
for (k=kmin;;)
{
DDSVector4
a,
alpha2_sum,
alphax_sum,
alphabeta_sum,
b,
beta2_sum,
betax_sum,
e1,
e2,
factor,
part3;
float
error;
VectorSubtract(xSumwSum,part2,&part3);
VectorSubtract(part3,part1,&part3);
VectorSubtract(part3,part0,&part3);
VectorMultiplyAdd(part1,twothirds_twothirds2,part0,&alphax_sum);
VectorMultiplyAdd(part2,onethird_onethird2,alphax_sum,&alphax_sum);
VectorInit(alpha2_sum,alphax_sum.w);
VectorMultiplyAdd(part2,twothirds_twothirds2,part3,&betax_sum);
VectorMultiplyAdd(part1,onethird_onethird2,betax_sum,&betax_sum);
VectorInit(beta2_sum,betax_sum.w);
VectorAdd(part1,part2,&alphabeta_sum);
VectorInit(alphabeta_sum,alphabeta_sum.w);
VectorMultiply(twonineths,alphabeta_sum,&alphabeta_sum);
VectorMultiply(alpha2_sum,beta2_sum,&factor);
VectorNegativeMultiplySubtract(alphabeta_sum,alphabeta_sum,factor,
&factor);
VectorReciprocal(factor,&factor);
VectorMultiply(alphax_sum,beta2_sum,&a);
VectorNegativeMultiplySubtract(betax_sum,alphabeta_sum,a,&a);
VectorMultiply(a,factor,&a);
VectorMultiply(betax_sum,alpha2_sum,&b);
VectorNegativeMultiplySubtract(alphax_sum,alphabeta_sum,b,&b);
VectorMultiply(b,factor,&b);
VectorClamp(&a);
VectorMultiplyAdd(grid,a,half,&a);
VectorTruncate(&a);
VectorMultiply(a,gridrcp,&a);
VectorClamp(&b);
VectorMultiplyAdd(grid,b,half,&b);
VectorTruncate(&b);
VectorMultiply(b,gridrcp,&b);
VectorMultiply(b,b,&e1);
VectorMultiply(e1,beta2_sum,&e1);
VectorMultiply(a,a,&e2);
VectorMultiplyAdd(e2,alpha2_sum,e1,&e1);
VectorMultiply(a,b,&e2);
VectorMultiply(e2,alphabeta_sum,&e2);
VectorNegativeMultiplySubtract(a,alphax_sum,e2,&e2);
VectorNegativeMultiplySubtract(b,betax_sum,e2,&e2);
VectorMultiplyAdd(two,e2,e1,&e2);
VectorMultiply(e2,metric,&e2);
error = e2.x + e2.y + e2.z;
if (error < bestError)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (DDS_CompressClusterFit)
#endif
{
if (error < bestError)
{
VectorCopy43(a,start);
VectorCopy43(b,end);
bestError = error;
besti = i;
bestj = j;
bestk = k;
bestIteration = iterationIndex;
}
}
}
if (k == count)
break;
VectorAdd(pointsWeights[k],part2,&part2);
k++;
}
if (j == count)
break;
VectorAdd(pointsWeights[j],part1,&part1);
j++;
}
}
if (bestIteration != iterationIndex)
break;
iterationIndex++;
if (iterationIndex == 8)
break;
VectorSubtract3(*end,*start,&axis);
if (ConstructOrdering(count,points,axis,pointsWeights,&xSumwSum,order,
iterationIndex) == MagickFalse)
break;
}
o = order + (16*bestIteration);
for (i=0; i < (ssize_t) besti; i++)
unordered[o[i]] = 0;
for (i=besti; i < (ssize_t) bestj; i++)
unordered[o[i]] = 2;
for (i=bestj; i < (ssize_t) bestk; i++)
unordered[o[i]] = 3;
for (i=bestk; i < (ssize_t) count; i++)
unordered[o[i]] = 1;
RemapIndices(map,unordered,indices);
}
static void CompressRangeFit(const size_t count,
const DDSVector4 *points, const ssize_t *map, const DDSVector3 principle,
const DDSVector4 metric, DDSVector3 *start, DDSVector3 *end,
unsigned char *indices)
{
float
d,
bestDist,
max,
min,
val;
DDSVector3
codes[4],
grid,
gridrcp,
half,
dist;
register ssize_t
i;
size_t
bestj,
j;
unsigned char
closest[16];
VectorInit3(half,0.5f);
grid.x = 31.0f;
grid.y = 63.0f;
grid.z = 31.0f;
gridrcp.x = 1.0f/31.0f;
gridrcp.y = 1.0f/63.0f;
gridrcp.z = 1.0f/31.0f;
if (count > 0)
{
VectorCopy43(points[0],start);
VectorCopy43(points[0],end);
min = max = Dot(points[0],principle);
for (i=1; i < (ssize_t) count; i++)
{
val = Dot(points[i],principle);
if (val < min)
{
VectorCopy43(points[i],start);
min = val;
}
else if (val > max)
{
VectorCopy43(points[i],end);
max = val;
}
}
}
VectorClamp3(start);
VectorMultiplyAdd3(grid,*start,half,start);
VectorTruncate3(start);
VectorMultiply3(*start,gridrcp,start);
VectorClamp3(end);
VectorMultiplyAdd3(grid,*end,half,end);
VectorTruncate3(end);
VectorMultiply3(*end,gridrcp,end);
codes[0] = *start;
codes[1] = *end;
codes[2].x = (start->x * (2.0f/3.0f)) + (end->x * (1.0f/3.0f));
codes[2].y = (start->y * (2.0f/3.0f)) + (end->y * (1.0f/3.0f));
codes[2].z = (start->z * (2.0f/3.0f)) + (end->z * (1.0f/3.0f));
codes[3].x = (start->x * (1.0f/3.0f)) + (end->x * (2.0f/3.0f));
codes[3].y = (start->y * (1.0f/3.0f)) + (end->y * (2.0f/3.0f));
codes[3].z = (start->z * (1.0f/3.0f)) + (end->z * (2.0f/3.0f));
for (i=0; i < (ssize_t) count; i++)
{
bestDist = 1e+37f;
bestj = 0;
for (j=0; j < 4; j++)
{
dist.x = (points[i].x - codes[j].x) * metric.x;
dist.y = (points[i].y - codes[j].y) * metric.y;
dist.z = (points[i].z - codes[j].z) * metric.z;
d = Dot(dist,dist);
if (d < bestDist)
{
bestDist = d;
bestj = j;
}
}
closest[i] = (unsigned char) bestj;
}
RemapIndices(map, closest, indices);
}
static void ComputeEndPoints(const DDSSingleColourLookup *lookup[],
const unsigned char *color, DDSVector3 *start, DDSVector3 *end,
unsigned char *index)
{
register ssize_t
i;
size_t
c,
maxError = SIZE_MAX;
for (i=0; i < 2; i++)
{
const DDSSourceBlock*
sources[3];
size_t
error = 0;
for (c=0; c < 3; c++)
{
sources[c] = &lookup[c][color[c]].sources[i];
error += ((size_t) sources[c]->error) * ((size_t) sources[c]->error);
}
if (error > maxError)
continue;
start->x = (float) sources[0]->start / 31.0f;
start->y = (float) sources[1]->start / 63.0f;
start->z = (float) sources[2]->start / 31.0f;
end->x = (float) sources[0]->end / 31.0f;
end->y = (float) sources[1]->end / 63.0f;
end->z = (float) sources[2]->end / 31.0f;
*index = (unsigned char) (2*i);
maxError = error;
}
}
static void ComputePrincipleComponent(const float *covariance,
DDSVector3 *principle)
{
DDSVector4
row0,
row1,
row2,
v;
register ssize_t
i;
row0.x = covariance[0];
row0.y = covariance[1];
row0.z = covariance[2];
row0.w = 0.0f;
row1.x = covariance[1];
row1.y = covariance[3];
row1.z = covariance[4];
row1.w = 0.0f;
row2.x = covariance[2];
row2.y = covariance[4];
row2.z = covariance[5];
row2.w = 0.0f;
VectorInit(v,1.0f);
for (i=0; i < 8; i++)
{
DDSVector4
w;
float
a;
w.x = row0.x * v.x;
w.y = row0.y * v.x;
w.z = row0.z * v.x;
w.w = row0.w * v.x;
w.x = (row1.x * v.y) + w.x;
w.y = (row1.y * v.y) + w.y;
w.z = (row1.z * v.y) + w.z;
w.w = (row1.w * v.y) + w.w;
w.x = (row2.x * v.z) + w.x;
w.y = (row2.y * v.z) + w.y;
w.z = (row2.z * v.z) + w.z;
w.w = (row2.w * v.z) + w.w;
a = 1.0f / MagickMax(w.x,MagickMax(w.y,w.z));
v.x = w.x * a;
v.y = w.y * a;
v.z = w.z * a;
v.w = w.w * a;
}
VectorCopy43(v,principle);
}
static void ComputeWeightedCovariance(const size_t count,
const DDSVector4 *points, float *covariance)
{
DDSVector3
centroid;
float
total;
size_t
i;
total = 0.0f;
VectorInit3(centroid,0.0f);
for (i=0; i < count; i++)
{
total += points[i].w;
centroid.x += (points[i].x * points[i].w);
centroid.y += (points[i].y * points[i].w);
centroid.z += (points[i].z * points[i].w);
}
if( total > 1.192092896e-07F)
{
centroid.x /= total;
centroid.y /= total;
centroid.z /= total;
}
for (i=0; i < 6; i++)
covariance[i] = 0.0f;
for (i = 0; i < count; i++)
{
DDSVector3
a,
b;
a.x = points[i].x - centroid.x;
a.y = points[i].y - centroid.y;
a.z = points[i].z - centroid.z;
b.x = points[i].w * a.x;
b.y = points[i].w * a.y;
b.z = points[i].w * a.z;
covariance[0] += a.x*b.x;
covariance[1] += a.x*b.y;
covariance[2] += a.x*b.z;
covariance[3] += a.y*b.y;
covariance[4] += a.y*b.z;
covariance[5] += a.z*b.z;
}
}
static MagickBooleanType ConstructOrdering(const size_t count,
const DDSVector4 *points, const DDSVector3 axis, DDSVector4 *pointsWeights,
DDSVector4 *xSumwSum, unsigned char *order, size_t iteration)
{
float
dps[16],
f;
register ssize_t
i;
size_t
j;
unsigned char
c,
*o,
*p;
o = order + (16*iteration);
for (i=0; i < (ssize_t) count; i++)
{
dps[i] = Dot(points[i],axis);
o[i] = (unsigned char)i;
}
for (i=0; i < (ssize_t) count; i++)
{
for (j=i; j > 0 && dps[j] < dps[j - 1]; j--)
{
f = dps[j];
dps[j] = dps[j - 1];
dps[j - 1] = f;
c = o[j];
o[j] = o[j - 1];
o[j - 1] = c;
}
}
for (i=0; i < (ssize_t) iteration; i++)
{
MagickBooleanType
same;
p = order + (16*i);
same = MagickTrue;
for (j=0; j < count; j++)
{
if (o[j] != p[j])
{
same = MagickFalse;
break;
}
}
if (same != MagickFalse)
return MagickFalse;
}
xSumwSum->x = 0;
xSumwSum->y = 0;
xSumwSum->z = 0;
xSumwSum->w = 0;
for (i=0; i < (ssize_t) count; i++)
{
DDSVector4
v;
j = (size_t) o[i];
v.x = points[j].w * points[j].x;
v.y = points[j].w * points[j].y;
v.z = points[j].w * points[j].z;
v.w = points[j].w * 1.0f;
VectorCopy44(v,&pointsWeights[i]);
VectorAdd(*xSumwSum,v,xSumwSum);
}
return MagickTrue;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s D D S %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsDDS() returns MagickTrue if the image format type, identified by the
% magick string, is DDS.
%
% The format of the IsDDS method is:
%
% MagickBooleanType IsDDS(const unsigned char *magick,const size_t length)
%
% A description of each parameter follows:
%
% o magick: compare image format pattern against these bytes.
%
% o length: Specifies the length of the magick string.
%
*/
static MagickBooleanType IsDDS(const unsigned char *magick, const size_t length)
{
if (length < 4)
return(MagickFalse);
if (LocaleNCompare((char *) magick,"DDS ", 4) == 0)
return(MagickTrue);
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadDDSImage() reads a DirectDraw Surface image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadDDSImage method is:
%
% Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: The image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadDDSImage(const ImageInfo *image_info,ExceptionInfo *exception)
{
Image
*image;
MagickBooleanType
status,
cubemap = MagickFalse,
volume = MagickFalse,
matte;
CompressionType
compression;
DDSInfo
dds_info;
DDSDecoder
*decoder;
size_t
n,
num_images;
/*
Open image file.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
/*
Initialize image structure.
*/
if (ReadDDSInfo(image, &dds_info) != MagickTrue) {
ThrowReaderException(CorruptImageError,"ImproperImageHeader");
}
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP)
cubemap = MagickTrue;
if (dds_info.ddscaps2 & DDSCAPS2_VOLUME && dds_info.depth > 0)
volume = MagickTrue;
(void) SeekBlob(image, 128, SEEK_SET);
/*
Determine pixel format
*/
if (dds_info.pixelformat.flags & DDPF_RGB)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
matte = MagickTrue;
decoder = ReadUncompressedRGBA;
}
else
{
matte = MagickTrue;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_LUMINANCE)
{
compression = NoCompression;
if (dds_info.pixelformat.flags & DDPF_ALPHAPIXELS)
{
/* Not sure how to handle this */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
else
{
matte = MagickFalse;
decoder = ReadUncompressedRGB;
}
}
else if (dds_info.pixelformat.flags & DDPF_FOURCC)
{
switch (dds_info.pixelformat.fourcc)
{
case FOURCC_DXT1:
{
matte = MagickFalse;
compression = DXT1Compression;
decoder = ReadDXT1;
break;
}
case FOURCC_DXT3:
{
matte = MagickTrue;
compression = DXT3Compression;
decoder = ReadDXT3;
break;
}
case FOURCC_DXT5:
{
matte = MagickTrue;
compression = DXT5Compression;
decoder = ReadDXT5;
break;
}
default:
{
/* Unknown FOURCC */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
}
}
else
{
/* Neither compressed nor uncompressed... thus unsupported */
ThrowReaderException(CorruptImageError, "ImageTypeNotSupported");
}
num_images = 1;
if (cubemap)
{
/*
Determine number of faces defined in the cubemap
*/
num_images = 0;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEX) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEY) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_POSITIVEZ) num_images++;
if (dds_info.ddscaps2 & DDSCAPS2_CUBEMAP_NEGATIVEZ) num_images++;
}
if (volume)
num_images = dds_info.depth;
for (n = 0; n < num_images; n++)
{
if (n != 0)
{
if (EOFBlob(image) != MagickFalse)
ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile");
/* Start a new image */
AcquireNextImage(image_info,image);
if (GetNextImageInList(image) == (Image *) NULL)
return(DestroyImageList(image));
image=SyncNextImageInList(image);
}
image->matte = matte;
image->compression = compression;
image->columns = dds_info.width;
image->rows = dds_info.height;
image->storage_class = DirectClass;
image->endian = LSBEndian;
image->depth = 8;
if (image_info->ping != MagickFalse)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
if ((decoder)(image, &dds_info, exception) != MagickTrue)
{
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
}
if (EOFBlob(image) != MagickFalse)
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
(void) CloseBlob(image);
return(GetFirstImageInList(image));
}
static MagickBooleanType ReadDDSInfo(Image *image, DDSInfo *dds_info)
{
size_t
hdr_size,
required;
/* Seek to start of header */
(void) SeekBlob(image, 4, SEEK_SET);
/* Check header field */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 124)
return MagickFalse;
/* Fill in DDS info struct */
dds_info->flags = ReadBlobLSBLong(image);
/* Check required flags */
required=(size_t) (DDSD_WIDTH | DDSD_HEIGHT | DDSD_PIXELFORMAT);
if ((dds_info->flags & required) != required)
return MagickFalse;
dds_info->height = ReadBlobLSBLong(image);
dds_info->width = ReadBlobLSBLong(image);
dds_info->pitchOrLinearSize = ReadBlobLSBLong(image);
dds_info->depth = ReadBlobLSBLong(image);
dds_info->mipmapcount = ReadBlobLSBLong(image);
(void) SeekBlob(image, 44, SEEK_CUR); /* reserved region of 11 DWORDs */
/* Read pixel format structure */
hdr_size = ReadBlobLSBLong(image);
if (hdr_size != 32)
return MagickFalse;
dds_info->pixelformat.flags = ReadBlobLSBLong(image);
dds_info->pixelformat.fourcc = ReadBlobLSBLong(image);
dds_info->pixelformat.rgb_bitcount = ReadBlobLSBLong(image);
dds_info->pixelformat.r_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.g_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.b_bitmask = ReadBlobLSBLong(image);
dds_info->pixelformat.alpha_bitmask = ReadBlobLSBLong(image);
dds_info->ddscaps1 = ReadBlobLSBLong(image);
dds_info->ddscaps2 = ReadBlobLSBLong(image);
(void) SeekBlob(image, 12, SEEK_CUR); /* 3 reserved DWORDs */
return MagickTrue;
}
static MagickBooleanType ReadDXT1(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
PixelPacket
*q;
register ssize_t
i,
x;
size_t
bits;
ssize_t
j,
y;
unsigned char
code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x),
MagickMin(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickFalse);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (unsigned char) ((bits >> ((j*4+i)*2)) & 0x3);
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
SetPixelOpacity(q,ScaleCharToQuantum(colors.a[code]));
if (colors.a[code] && image->matte == MagickFalse)
/* Correct matte */
image->matte = MagickTrue;
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
}
return(SkipDXTMipmaps(image,dds_info,8,exception));
}
static MagickBooleanType ReadDXT3(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
ssize_t
j,
y;
PixelPacket
*q;
register ssize_t
i,
x;
unsigned char
alpha;
size_t
a0,
a1,
bits,
code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x),
MagickMin(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read alpha values (8 bytes) */
a0 = ReadBlobLSBLong(image);
a1 = ReadBlobLSBLong(image);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
/*
Extract alpha value: multiply 0..15 by 17 to get range 0..255
*/
if (j < 2)
alpha = 17U * (unsigned char) ((a0 >> (4*(4*j+i))) & 0xf);
else
alpha = 17U * (unsigned char) ((a1 >> (4*(4*(j-2)+i))) & 0xf);
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
alpha));
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
}
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadDXT5(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
DDSColors
colors;
ssize_t
j,
y;
MagickSizeType
alpha_bits;
PixelPacket
*q;
register ssize_t
i,
x;
unsigned char
a0,
a1;
size_t
alpha,
bits,
code,
alpha_code;
unsigned short
c0,
c1;
for (y = 0; y < (ssize_t) dds_info->height; y += 4)
{
for (x = 0; x < (ssize_t) dds_info->width; x += 4)
{
/* Get 4x4 patch of pixels to write on */
q = QueueAuthenticPixels(image, x, y, MagickMin(4, dds_info->width - x),
MagickMin(4, dds_info->height - y),exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
/* Read alpha values (8 bytes) */
a0 = (unsigned char) ReadBlobByte(image);
a1 = (unsigned char) ReadBlobByte(image);
alpha_bits = (MagickSizeType)ReadBlobLSBLong(image);
alpha_bits = alpha_bits | ((MagickSizeType)ReadBlobLSBShort(image) << 32);
/* Read 8 bytes of data from the image */
c0 = ReadBlobLSBShort(image);
c1 = ReadBlobLSBShort(image);
bits = ReadBlobLSBLong(image);
CalculateColors(c0, c1, &colors, MagickTrue);
/* Write the pixels */
for (j = 0; j < 4; j++)
{
for (i = 0; i < 4; i++)
{
if ((x + i) < (ssize_t) dds_info->width && (y + j) < (ssize_t) dds_info->height)
{
code = (bits >> ((4*j+i)*2)) & 0x3;
SetPixelRed(q,ScaleCharToQuantum(colors.r[code]));
SetPixelGreen(q,ScaleCharToQuantum(colors.g[code]));
SetPixelBlue(q,ScaleCharToQuantum(colors.b[code]));
/* Extract alpha value */
alpha_code = (size_t) (alpha_bits >> (3*(4*j+i))) & 0x7;
if (alpha_code == 0)
alpha = a0;
else if (alpha_code == 1)
alpha = a1;
else if (a0 > a1)
alpha = ((8-alpha_code) * a0 + (alpha_code-1) * a1) / 7;
else if (alpha_code == 6)
alpha = 0;
else if (alpha_code == 7)
alpha = 255;
else
alpha = (((6-alpha_code) * a0 + (alpha_code-1) * a1) / 5);
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
alpha));
q++;
}
}
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
}
return(SkipDXTMipmaps(image,dds_info,16,exception));
}
static MagickBooleanType ReadUncompressedRGB(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
PixelPacket
*q;
ssize_t
x, y;
unsigned short
color;
if (dds_info->pixelformat.rgb_bitcount == 8)
(void) SetImageType(image,GrayscaleType);
else if (dds_info->pixelformat.rgb_bitcount == 16 && !IsBitMask(
dds_info->pixelformat,0xf800,0x07e0,0x001f,0x0000))
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
for (y = 0; y < (ssize_t) dds_info->height; y++)
{
q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
for (x = 0; x < (ssize_t) dds_info->width; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 8)
SetPixelGray(q,ScaleCharToQuantum(ReadBlobByte(image)));
else if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
(((color >> 11)/31.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 5) >> 10)/63.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)));
}
else
{
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
if (dds_info->pixelformat.rgb_bitcount == 32)
(void) ReadBlobByte(image);
}
SetPixelAlpha(q,QuantumRange);
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
return(SkipRGBMipmaps(image,dds_info,3,exception));
}
static MagickBooleanType ReadUncompressedRGBA(Image *image, DDSInfo *dds_info,
ExceptionInfo *exception)
{
PixelPacket
*q;
ssize_t
alphaBits,
x,
y;
unsigned short
color;
alphaBits=0;
if (dds_info->pixelformat.rgb_bitcount == 16)
{
if (IsBitMask(dds_info->pixelformat,0x7c00,0x03e0,0x001f,0x8000))
alphaBits=1;
else if (IsBitMask(dds_info->pixelformat,0x00ff,0x00ff,0x00ff,0xff00))
{
alphaBits=2;
(void) SetImageType(image,GrayscaleMatteType);
}
else if (IsBitMask(dds_info->pixelformat,0x0f00,0x00f0,0x000f,0xf000))
alphaBits=4;
else
ThrowBinaryException(CorruptImageError,"ImageTypeNotSupported",
image->filename);
}
for (y = 0; y < (ssize_t) dds_info->height; y++)
{
q = QueueAuthenticPixels(image, 0, y, dds_info->width, 1,exception);
if (q == (PixelPacket *) NULL)
return MagickFalse;
for (x = 0; x < (ssize_t) dds_info->width; x++)
{
if (dds_info->pixelformat.rgb_bitcount == 16)
{
color=ReadBlobShort(image);
if (alphaBits == 1)
{
SetPixelAlpha(q,(color & (1 << 15)) ? QuantumRange : 0);
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 1) >> 11)/31.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 6) >> 11)/31.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 11) >> 11)/31.0)*255)));
}
else if (alphaBits == 2)
{
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
(color >> 8)));
SetPixelGray(q,ScaleCharToQuantum((unsigned char)color));
}
else
{
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
(((color >> 12)/15.0)*255)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 4) >> 12)/15.0)*255)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 8) >> 12)/15.0)*255)));
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
((((unsigned short)(color << 12) >> 12)/15.0)*255)));
}
}
else
{
SetPixelBlue(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelGreen(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelRed(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
SetPixelAlpha(q,ScaleCharToQuantum((unsigned char)
ReadBlobByte(image)));
}
q++;
}
if (SyncAuthenticPixels(image,exception) == MagickFalse)
return MagickFalse;
}
return(SkipRGBMipmaps(image,dds_info,4,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterDDSImage() adds attributes for the DDS image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterDDSImage method is:
%
% RegisterDDSImage(void)
%
*/
ModuleExport size_t RegisterDDSImage(void)
{
MagickInfo
*entry;
entry = SetMagickInfo("DDS");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
entry = SetMagickInfo("DXT1");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
entry = SetMagickInfo("DXT5");
entry->decoder = (DecodeImageHandler *) ReadDDSImage;
entry->encoder = (EncodeImageHandler *) WriteDDSImage;
entry->magick = (IsImageFormatHandler *) IsDDS;
entry->seekable_stream=MagickTrue;
entry->description = ConstantString("Microsoft DirectDraw Surface");
entry->module = ConstantString("DDS");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
static void RemapIndices(const ssize_t *map, const unsigned char *source,
unsigned char *target)
{
register ssize_t
i;
for (i = 0; i < 16; i++)
{
if (map[i] == -1)
target[i] = 3;
else
target[i] = source[map[i]];
}
}
/*
Skip the mipmap images for compressed (DXTn) dds files
*/
static MagickBooleanType SkipDXTMipmaps(Image *image,DDSInfo *dds_info,
int texel_size,ExceptionInfo *exception)
{
register ssize_t
i;
MagickOffsetType
offset;
size_t
h,
w;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
w = DIV2(dds_info->width);
h = DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i = 1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset = (MagickOffsetType) ((w + 3) / 4) * ((h + 3) / 4) * texel_size;
if (SeekBlob(image,offset,SEEK_CUR) < 0)
break;
w = DIV2(w);
h = DIV2(h);
}
}
return(MagickTrue);
}
/*
Skip the mipmap images for uncompressed (RGB or RGBA) dds files
*/
static MagickBooleanType SkipRGBMipmaps(Image *image,DDSInfo *dds_info,
int pixel_size,ExceptionInfo *exception)
{
MagickOffsetType
offset;
register ssize_t
i;
size_t
h,
w;
/*
Only skip mipmaps for textures and cube maps
*/
if (EOFBlob(image) != MagickFalse)
{
ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile",
image->filename);
return(MagickFalse);
}
if (dds_info->ddscaps1 & DDSCAPS_MIPMAP
&& (dds_info->ddscaps1 & DDSCAPS_TEXTURE
|| dds_info->ddscaps2 & DDSCAPS2_CUBEMAP))
{
w = DIV2(dds_info->width);
h = DIV2(dds_info->height);
/*
Mipmapcount includes the main image, so start from one
*/
for (i=1; (i < (ssize_t) dds_info->mipmapcount) && w && h; i++)
{
offset = (MagickOffsetType) w * h * pixel_size;
(void) SeekBlob(image, offset, SEEK_CUR);
w = DIV2(w);
h = DIV2(h);
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterDDSImage() removes format registrations made by the
% DDS module from the list of supported formats.
%
% The format of the UnregisterDDSImage method is:
%
% UnregisterDDSImage(void)
%
*/
ModuleExport void UnregisterDDSImage(void)
{
(void) UnregisterMagickInfo("DDS");
(void) UnregisterMagickInfo("DXT1");
(void) UnregisterMagickInfo("DXT5");
}
static void WriteAlphas(Image *image, const ssize_t* alphas, size_t min5,
size_t max5, size_t min7, size_t max7)
{
register ssize_t
i;
size_t
err5,
err7,
j;
unsigned char
indices5[16],
indices7[16];
FixRange(min5,max5,5);
err5 = CompressAlpha(min5,max5,5,alphas,indices5);
FixRange(min7,max7,7);
err7 = CompressAlpha(min7,max7,7,alphas,indices7);
if (err7 < err5)
{
for (i=0; i < 16; i++)
{
unsigned char
index;
index = indices7[i];
if( index == 0 )
indices5[i] = 1;
else if (index == 1)
indices5[i] = 0;
else
indices5[i] = 9 - index;
}
min5 = max7;
max5 = min7;
}
(void) WriteBlobByte(image,(unsigned char) min5);
(void) WriteBlobByte(image,(unsigned char) max5);
for(i=0; i < 2; i++)
{
size_t
value = 0;
for (j=0; j < 8; j++)
{
size_t index = (size_t) indices5[j + i*8];
value |= ( index << 3*j );
}
for (j=0; j < 3; j++)
{
size_t byte = (value >> 8*j) & 0xff;
(void) WriteBlobByte(image,(unsigned char) byte);
}
}
}
static void WriteCompressed(Image *image, const size_t count,
DDSVector4* points, const ssize_t* map, const MagickBooleanType clusterFit)
{
float
covariance[16];
DDSVector3
end,
principle,
start;
DDSVector4
metric;
unsigned char
indices[16];
VectorInit(metric,1.0f);
VectorInit3(start,0.0f);
VectorInit3(end,0.0f);
ComputeWeightedCovariance(count,points,covariance);
ComputePrincipleComponent(covariance,&principle);
if (clusterFit == MagickFalse || count == 0)
CompressRangeFit(count,points,map,principle,metric,&start,&end,indices);
else
CompressClusterFit(count,points,map,principle,metric,&start,&end,indices);
WriteIndices(image,start,end,indices);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W r i t e D D S I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WriteDDSImage() writes a DirectDraw Surface image file in the DXT5 format.
%
% The format of the WriteBMPImage method is:
%
% MagickBooleanType WriteDDSImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows.
%
% o image_info: the image info.
%
% o image: The image.
%
*/
static MagickBooleanType WriteDDSImage(const ImageInfo *image_info,
Image *image)
{
const char
*option;
size_t
compression,
columns,
maxMipmaps,
mipmaps,
pixelFormat,
rows;
MagickBooleanType
clusterFit,
status,
weightByAlpha;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception);
if (status == MagickFalse)
return(status);
(void) TransformImageColorspace(image,sRGBColorspace);
pixelFormat=DDPF_FOURCC;
compression=FOURCC_DXT5;
if (!image->matte)
compression=FOURCC_DXT1;
if (LocaleCompare(image_info->magick,"dxt1") == 0)
compression=FOURCC_DXT1;
option=GetImageOption(image_info,"dds:compression");
if (option != (char *) NULL)
{
if (LocaleCompare(option,"dxt1") == 0)
compression=FOURCC_DXT1;
if (LocaleCompare(option,"none") == 0)
pixelFormat=DDPF_RGB;
}
clusterFit=MagickFalse;
weightByAlpha=MagickFalse;
if (pixelFormat == DDPF_FOURCC)
{
option=GetImageOption(image_info,"dds:cluster-fit");
if (option != (char *) NULL && LocaleCompare(option,"true") == 0)
{
clusterFit=MagickTrue;
if (compression != FOURCC_DXT1)
{
option=GetImageOption(image_info,"dds:weight-by-alpha");
if (option != (char *) NULL && LocaleCompare(option,"true") == 0)
weightByAlpha=MagickTrue;
}
}
}
maxMipmaps=SIZE_MAX;
mipmaps=0;
if ((image->columns & (image->columns - 1)) == 0 &&
(image->rows & (image->rows - 1)) == 0)
{
option=GetImageOption(image_info,"dds:mipmaps");
if (option != (char *) NULL)
maxMipmaps=StringToUnsignedLong(option);
if (maxMipmaps != 0)
{
columns=image->columns;
rows=image->rows;
while (columns != 1 && rows != 1 && mipmaps != maxMipmaps)
{
columns=DIV2(columns);
rows=DIV2(rows);
mipmaps++;
}
}
}
WriteDDSInfo(image,pixelFormat,compression,mipmaps);
WriteImageData(image,pixelFormat,compression,clusterFit,weightByAlpha,
&image->exception);
if (mipmaps > 0 && WriteMipmaps(image,pixelFormat,compression,mipmaps,
clusterFit,weightByAlpha,&image->exception) == MagickFalse)
return(MagickFalse);
(void) CloseBlob(image);
return(MagickTrue);
}
static void WriteDDSInfo(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps)
{
char
software[MaxTextExtent];
register ssize_t
i;
unsigned int
format,
caps,
flags;
flags=(unsigned int) (DDSD_CAPS | DDSD_WIDTH | DDSD_HEIGHT |
DDSD_PIXELFORMAT | DDSD_LINEARSIZE);
caps=(unsigned int) DDSCAPS_TEXTURE;
format=(unsigned int) pixelFormat;
if (mipmaps > 0)
{
flags=flags | (unsigned int) DDSD_MIPMAPCOUNT;
caps=caps | (unsigned int) (DDSCAPS_MIPMAP | DDSCAPS_COMPLEX);
}
if (format != DDPF_FOURCC && image->matte)
format=format | DDPF_ALPHAPIXELS;
(void) WriteBlob(image,4,(unsigned char *) "DDS ");
(void) WriteBlobLSBLong(image,124);
(void) WriteBlobLSBLong(image,flags);
(void) WriteBlobLSBLong(image,(unsigned int) image->rows);
(void) WriteBlobLSBLong(image,(unsigned int) image->columns);
if (compression == FOURCC_DXT1)
(void) WriteBlobLSBLong(image,
(unsigned int) (MagickMax(1,(image->columns+3)/4) * 8));
else
(void) WriteBlobLSBLong(image,
(unsigned int) (MagickMax(1,(image->columns+3)/4) * 16));
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,(unsigned int) mipmaps+1);
(void) ResetMagickMemory(software,0,sizeof(software));
(void) strcpy(software,"IMAGEMAGICK");
(void) WriteBlob(image,44,(unsigned char *) software);
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,format);
if (pixelFormat == DDPF_FOURCC)
{
(void) WriteBlobLSBLong(image,(unsigned int) compression);
for(i=0;i < 5;i++) // bitcount / masks
(void) WriteBlobLSBLong(image,0x00);
}
else
{
(void) WriteBlobLSBLong(image,0x00);
if (image->matte)
{
(void) WriteBlobLSBLong(image,32);
(void) WriteBlobLSBLong(image,0xff0000);
(void) WriteBlobLSBLong(image,0xff00);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0xff000000);
}
else
{
(void) WriteBlobLSBLong(image,24);
(void) WriteBlobLSBLong(image,0xff);
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,0x00);
(void) WriteBlobLSBLong(image,0x00);
}
}
(void) WriteBlobLSBLong(image,caps);
for(i=0;i < 4;i++) // ddscaps2 + reserved region
(void) WriteBlobLSBLong(image,0x00);
}
static void WriteFourCC(Image *image, const size_t compression,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
i,
y,
bx,
by;
for (y=0; y < (ssize_t) image->rows; y+=4)
{
for (x=0; x < (ssize_t) image->columns; x+=4)
{
MagickBooleanType
match;
DDSVector4
point,
points[16];
size_t
count = 0,
max5 = 0,
max7 = 0,
min5 = 255,
min7 = 255,
columns = 4,
rows = 4;
ssize_t
alphas[16],
map[16];
unsigned char
alpha;
if (x + columns >= image->columns)
columns = image->columns - x;
if (y + rows >= image->rows)
rows = image->rows - y;
p=GetVirtualPixels(image,x,y,columns,rows,exception);
if (p == (const PixelPacket *) NULL)
break;
for (i=0; i<16; i++)
{
map[i] = -1;
alphas[i] = -1;
}
for (by=0; by < (ssize_t) rows; by++)
{
for (bx=0; bx < (ssize_t) columns; bx++)
{
if (compression == FOURCC_DXT5)
alpha = ScaleQuantumToChar(GetPixelAlpha(p));
else
alpha = 255;
alphas[4*by + bx] = (size_t)alpha;
point.x = (float)ScaleQuantumToChar(GetPixelRed(p)) / 255.0f;
point.y = (float)ScaleQuantumToChar(GetPixelGreen(p)) / 255.0f;
point.z = (float)ScaleQuantumToChar(GetPixelBlue(p)) / 255.0f;
point.w = weightByAlpha ? (float)(alpha + 1) / 256.0f : 1.0f;
p++;
match = MagickFalse;
for (i=0; i < (ssize_t) count; i++)
{
if ((points[i].x == point.x) &&
(points[i].y == point.y) &&
(points[i].z == point.z) &&
(alpha >= 128 || compression == FOURCC_DXT5))
{
points[i].w += point.w;
map[4*by + bx] = i;
match = MagickTrue;
break;
}
}
if (match != MagickFalse)
continue;
points[count].x = point.x;
points[count].y = point.y;
points[count].z = point.z;
points[count].w = point.w;
map[4*by + bx] = count;
count++;
if (compression == FOURCC_DXT5)
{
if (alpha < min7)
min7 = alpha;
if (alpha > max7)
max7 = alpha;
if (alpha != 0 && alpha < min5)
min5 = alpha;
if (alpha != 255 && alpha > max5)
max5 = alpha;
}
}
}
for (i=0; i < (ssize_t) count; i++)
points[i].w = sqrt(points[i].w);
if (compression == FOURCC_DXT5)
WriteAlphas(image,alphas,min5,max5,min7,max7);
if (count == 1)
WriteSingleColorFit(image,points,map);
else
WriteCompressed(image,count,points,map,clusterFit);
}
}
}
static void WriteImageData(Image *image, const size_t pixelFormat,
const size_t compression, const MagickBooleanType clusterFit,
const MagickBooleanType weightByAlpha, ExceptionInfo *exception)
{
if (pixelFormat == DDPF_FOURCC)
WriteFourCC(image,compression,clusterFit,weightByAlpha,exception);
else
WriteUncompressed(image,exception);
}
static inline size_t ClampToLimit(const float value,
const size_t limit)
{
size_t
result = (int) (value + 0.5f);
if (result < 0.0f)
return(0);
if (result > limit)
return(limit);
return result;
}
static inline size_t ColorTo565(const DDSVector3 point)
{
size_t r = ClampToLimit(31.0f*point.x,31);
size_t g = ClampToLimit(63.0f*point.y,63);
size_t b = ClampToLimit(31.0f*point.z,31);
return (r << 11) | (g << 5) | b;
}
static void WriteIndices(Image *image, const DDSVector3 start,
const DDSVector3 end, unsigned char* indices)
{
register ssize_t
i;
size_t
a,
b;
unsigned char
remapped[16];
const unsigned char
*ind;
a = ColorTo565(start);
b = ColorTo565(end);
for (i=0; i<16; i++)
{
if( a < b )
remapped[i] = (indices[i] ^ 0x1) & 0x3;
else if( a == b )
remapped[i] = 0;
else
remapped[i] = indices[i];
}
if( a < b )
Swap(a,b);
(void) WriteBlobByte(image,(unsigned char) (a & 0xff));
(void) WriteBlobByte(image,(unsigned char) (a >> 8));
(void) WriteBlobByte(image,(unsigned char) (b & 0xff));
(void) WriteBlobByte(image,(unsigned char) (b >> 8));
for (i=0; i<4; i++)
{
ind = remapped + 4*i;
(void) WriteBlobByte(image,ind[0] | (ind[1] << 2) | (ind[2] << 4) |
(ind[3] << 6));
}
}
static MagickBooleanType WriteMipmaps(Image *image, const size_t pixelFormat,
const size_t compression, const size_t mipmaps,
const MagickBooleanType clusterFit, const MagickBooleanType weightByAlpha,
ExceptionInfo *exception)
{
Image*
resize_image;
register ssize_t
i;
size_t
columns,
rows;
columns = image->columns;
rows = image->rows;
for (i=0; i< (ssize_t) mipmaps; i++)
{
resize_image = ResizeImage(image,columns/2,rows/2,TriangleFilter,1.0,
exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
DestroyBlob(resize_image);
resize_image->blob=ReferenceBlob(image->blob);
WriteImageData(resize_image,pixelFormat,compression,weightByAlpha,
clusterFit,exception);
resize_image=DestroyImage(resize_image);
columns = DIV2(columns);
rows = DIV2(rows);
}
return(MagickTrue);
}
static void WriteSingleColorFit(Image *image, const DDSVector4* points,
const ssize_t* map)
{
DDSVector3
start,
end;
register ssize_t
i;
unsigned char
color[3],
index,
indexes[16],
indices[16];
color[0] = (unsigned char) ClampToLimit(255.0f*points->x,255);
color[1] = (unsigned char) ClampToLimit(255.0f*points->y,255);
color[2] = (unsigned char) ClampToLimit(255.0f*points->z,255);
index=0;
ComputeEndPoints(DDS_LOOKUP,color,&start,&end,&index);
for (i=0; i< 16; i++)
indexes[i]=index;
RemapIndices(map,indexes,indices);
WriteIndices(image,start,end,indices);
}
static void WriteUncompressed(Image *image, ExceptionInfo *exception)
{
register const PixelPacket
*p;
register ssize_t
x;
ssize_t
y;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelBlue(p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelGreen(p)));
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelRed(p)));
if (image->matte)
(void) WriteBlobByte(image,ScaleQuantumToChar(GetPixelAlpha(p)));
p++;
}
}
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_1855_0 |
crossvul-cpp_data_good_5561_2 | /*
* Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <crm_internal.h>
#include <sys/param.h>
#include <stdio.h>
#include <sys/types.h>
#include <unistd.h>
#include <stdlib.h>
#include <errno.h>
#include <fcntl.h>
#include <time.h>
#include <crm/crm.h>
#include <crm/cib/internal.h>
#include <crm/msg_xml.h>
#include <crm/common/xml.h>
#include <cibio.h>
#include <callbacks.h>
#include <notify.h>
int pending_updates = 0;
extern GHashTable *client_list;
gboolean cib_notify_client(gpointer key, gpointer value, gpointer user_data);
void attach_cib_generation(xmlNode * msg, const char *field, xmlNode * a_cib);
void do_cib_notify(int options, const char *op, xmlNode * update,
int result, xmlNode * result_data, const char *msg_type);
static void
need_pre_notify(gpointer key, gpointer value, gpointer user_data)
{
cib_client_t *client = value;
if (client->pre_notify) {
gboolean *needed = user_data;
*needed = TRUE;
}
}
static void
need_post_notify(gpointer key, gpointer value, gpointer user_data)
{
cib_client_t *client = value;
if (client->post_notify) {
gboolean *needed = user_data;
*needed = TRUE;
}
}
gboolean
cib_notify_client(gpointer key, gpointer value, gpointer user_data)
{
const char *type = NULL;
gboolean do_send = FALSE;
cib_client_t *client = value;
xmlNode *update_msg = user_data;
CRM_CHECK(client != NULL, return TRUE);
CRM_CHECK(update_msg != NULL, return TRUE);
if (client->ipc == NULL && client->session == NULL) {
crm_warn("Skipping client with NULL channel");
return FALSE;
}
type = crm_element_value(update_msg, F_SUBTYPE);
CRM_LOG_ASSERT(type != NULL);
if (client->diffs && safe_str_eq(type, T_CIB_DIFF_NOTIFY)) {
do_send = TRUE;
} else if (client->replace && safe_str_eq(type, T_CIB_REPLACE_NOTIFY)) {
do_send = TRUE;
} else if (client->confirmations && safe_str_eq(type, T_CIB_UPDATE_CONFIRM)) {
do_send = TRUE;
} else if (client->pre_notify && safe_str_eq(type, T_CIB_PRE_NOTIFY)) {
do_send = TRUE;
} else if (client->post_notify && safe_str_eq(type, T_CIB_POST_NOTIFY)) {
do_send = TRUE;
}
if (do_send) {
if (client->ipc) {
if(crm_ipcs_send(client->ipc, 0, update_msg, TRUE) == FALSE) {
crm_warn("Notification of client %s/%s failed", client->name, client->id);
}
#ifdef HAVE_GNUTLS_GNUTLS_H
} else if (client->session) {
crm_debug("Sent %s notification to client %s/%s", type, client->name, client->id);
crm_send_remote_msg(client->session, update_msg, client->encrypted);
#endif
} else {
crm_err("Unknown transport for %s", client->name);
}
}
return FALSE;
}
void
cib_pre_notify(int options, const char *op, xmlNode * existing, xmlNode * update)
{
xmlNode *update_msg = NULL;
const char *type = NULL;
const char *id = NULL;
gboolean needed = FALSE;
g_hash_table_foreach(client_list, need_pre_notify, &needed);
if (needed == FALSE) {
return;
}
/* TODO: consider pre-notification for removal */
update_msg = create_xml_node(NULL, "pre-notify");
if (update != NULL) {
id = crm_element_value(update, XML_ATTR_ID);
}
crm_xml_add(update_msg, F_TYPE, T_CIB_NOTIFY);
crm_xml_add(update_msg, F_SUBTYPE, T_CIB_PRE_NOTIFY);
crm_xml_add(update_msg, F_CIB_OPERATION, op);
if (id != NULL) {
crm_xml_add(update_msg, F_CIB_OBJID, id);
}
if (update != NULL) {
crm_xml_add(update_msg, F_CIB_OBJTYPE, crm_element_name(update));
} else if (existing != NULL) {
crm_xml_add(update_msg, F_CIB_OBJTYPE, crm_element_name(existing));
}
type = crm_element_value(update_msg, F_CIB_OBJTYPE);
attach_cib_generation(update_msg, "cib_generation", the_cib);
if (existing != NULL) {
add_message_xml(update_msg, F_CIB_EXISTING, existing);
}
if (update != NULL) {
add_message_xml(update_msg, F_CIB_UPDATE, update);
}
g_hash_table_foreach_remove(client_list, cib_notify_client, update_msg);
if (update == NULL) {
crm_trace("Performing operation %s (on section=%s)", op, type);
} else {
crm_trace("Performing %s on <%s%s%s>", op, type, id ? " id=" : "", id ? id : "");
}
free_xml(update_msg);
}
void
cib_post_notify(int options, const char *op, xmlNode * update,
int result, xmlNode * new_obj)
{
gboolean needed = FALSE;
g_hash_table_foreach(client_list, need_post_notify, &needed);
if (needed == FALSE) {
return;
}
do_cib_notify(options, op, update, result, new_obj, T_CIB_UPDATE_CONFIRM);
}
void
cib_diff_notify(int options, const char *client, const char *call_id, const char *op,
xmlNode * update, int result, xmlNode * diff)
{
int add_updates = 0;
int add_epoch = 0;
int add_admin_epoch = 0;
int del_updates = 0;
int del_epoch = 0;
int del_admin_epoch = 0;
int log_level = LOG_DEBUG_2;
if (diff == NULL) {
return;
}
if (result != pcmk_ok) {
log_level = LOG_WARNING;
}
cib_diff_version_details(diff, &add_admin_epoch, &add_epoch, &add_updates,
&del_admin_epoch, &del_epoch, &del_updates);
if (add_updates != del_updates) {
do_crm_log(log_level,
"Update (client: %s%s%s): %d.%d.%d -> %d.%d.%d (%s)",
client, call_id ? ", call:" : "", call_id ? call_id : "",
del_admin_epoch, del_epoch, del_updates,
add_admin_epoch, add_epoch, add_updates, pcmk_strerror(result));
} else if (diff != NULL) {
do_crm_log(log_level,
"Local-only Change (client:%s%s%s): %d.%d.%d (%s)",
client, call_id ? ", call: " : "", call_id ? call_id : "",
add_admin_epoch, add_epoch, add_updates, pcmk_strerror(result));
}
do_cib_notify(options, op, update, result, diff, T_CIB_DIFF_NOTIFY);
}
void
do_cib_notify(int options, const char *op, xmlNode * update,
int result, xmlNode * result_data, const char *msg_type)
{
xmlNode *update_msg = NULL;
const char *id = NULL;
update_msg = create_xml_node(NULL, "notify");
if (result_data != NULL) {
id = crm_element_value(result_data, XML_ATTR_ID);
}
crm_xml_add(update_msg, F_TYPE, T_CIB_NOTIFY);
crm_xml_add(update_msg, F_SUBTYPE, msg_type);
crm_xml_add(update_msg, F_CIB_OPERATION, op);
crm_xml_add_int(update_msg, F_CIB_RC, result);
if (id != NULL) {
crm_xml_add(update_msg, F_CIB_OBJID, id);
}
if (update != NULL) {
crm_trace("Setting type to update->name: %s", crm_element_name(update));
crm_xml_add(update_msg, F_CIB_OBJTYPE, crm_element_name(update));
} else if (result_data != NULL) {
crm_trace("Setting type to new_obj->name: %s", crm_element_name(result_data));
crm_xml_add(update_msg, F_CIB_OBJTYPE, crm_element_name(result_data));
} else {
crm_trace("Not Setting type");
}
attach_cib_generation(update_msg, "cib_generation", the_cib);
if (update != NULL) {
add_message_xml(update_msg, F_CIB_UPDATE, update);
}
if (result_data != NULL) {
add_message_xml(update_msg, F_CIB_UPDATE_RESULT, result_data);
}
crm_trace("Notifying clients");
g_hash_table_foreach_remove(client_list, cib_notify_client, update_msg);
free_xml(update_msg);
crm_trace("Notify complete");
}
void
attach_cib_generation(xmlNode * msg, const char *field, xmlNode * a_cib)
{
xmlNode *generation = create_xml_node(NULL, XML_CIB_TAG_GENERATION_TUPPLE);
if (a_cib != NULL) {
copy_in_properties(generation, a_cib);
}
add_message_xml(msg, field, generation);
free_xml(generation);
}
void
cib_replace_notify(const char *origin, xmlNode * update, int result, xmlNode * diff)
{
xmlNode *replace_msg = NULL;
int add_updates = 0;
int add_epoch = 0;
int add_admin_epoch = 0;
int del_updates = 0;
int del_epoch = 0;
int del_admin_epoch = 0;
if (diff == NULL) {
return;
}
cib_diff_version_details(diff, &add_admin_epoch, &add_epoch, &add_updates,
&del_admin_epoch, &del_epoch, &del_updates);
if(del_updates < 0) {
crm_log_xml_debug(diff, "Bad replace diff");
}
if (add_updates != del_updates) {
crm_info("Replaced: %d.%d.%d -> %d.%d.%d from %s",
del_admin_epoch, del_epoch, del_updates,
add_admin_epoch, add_epoch, add_updates, crm_str(origin));
} else if (diff != NULL) {
crm_info("Local-only Replace: %d.%d.%d from %s",
add_admin_epoch, add_epoch, add_updates, crm_str(origin));
}
replace_msg = create_xml_node(NULL, "notify-replace");
crm_xml_add(replace_msg, F_TYPE, T_CIB_NOTIFY);
crm_xml_add(replace_msg, F_SUBTYPE, T_CIB_REPLACE_NOTIFY);
crm_xml_add(replace_msg, F_CIB_OPERATION, CIB_OP_REPLACE);
crm_xml_add_int(replace_msg, F_CIB_RC, result);
attach_cib_generation(replace_msg, "cib-replace-generation", update);
crm_log_xml_trace(replace_msg, "CIB Replaced");
g_hash_table_foreach_remove(client_list, cib_notify_client, replace_msg);
free_xml(replace_msg);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_5561_2 |
crossvul-cpp_data_bad_3453_0 | #include <config.h>
#include "ftpd.h"
#include "dynamic.h"
#include "ftpwho-update.h"
#include "globals.h"
#include "messages.h"
#ifdef WITH_DIRALIASES
# include "diraliases.h"
#endif
#ifdef WITH_TLS
# include "tls.h"
#endif
#ifdef WITH_DMALLOC
# include <dmalloc.h>
#endif
static void antiidle(void)
{
if (noopidle == (time_t) -1) {
noopidle = time(NULL);
} else {
if ((time(NULL) - noopidle) > (time_t) idletime_noop) {
die(421, LOG_INFO, MSG_TIMEOUT_NOOP, (unsigned long) idletime_noop);
}
}
}
/*
* Introduce a random delay, to avoid guessing existing user names by
* mesuring delay. It's especially true when LDAP is used.
* No need to call usleep2() because we are root at this point.
*/
static void randomdelay(void)
{
usleep(rand() % 15000UL); /* dummy... no need for arc4 */
}
/*
* Simple but fast command-line reader. We break the FTP protocol here,
* because we deny access to files with strange characters in their name.
* Now, I seriously doubt that clients should be allowed to upload files
* with carriage returns, bells, cursor moves and other fancy stuff in the
* names. It can indirectly lead to security flaws with scripts, it's
* annoying for the sysadmin, it can be a client error, it can bring unexpected
* results on some filesystems, etc. So control chars are replaced by "_".
* Better be safe than 100% RFC crap compliant but unsafe. If you really want
* RFC compliance, define RFC_CONFORMANT_PARSER. But I will hate you.
*
* RFC_CONFORMANT_LINES is another thing that clients should implement
* properly (and it's trivial to do) : lines must be ended with \r\n .
* Guess what ?
* Some broken clients are just sending \n ... Grrrrrrrrrrrr !!!!!!!!!!!!!!!
*
* -Frank.
*/
int sfgets(void)
{
struct pollfd pfd;
int pollret;
ssize_t readnb;
signed char seen_r = 0;
static size_t scanned;
static size_t readnbd;
if (scanned > (size_t) 0U) { /* support pipelining */
readnbd -= scanned;
memmove(cmd, cmd + scanned, readnbd); /* safe */
scanned = (size_t) 0U;
}
pfd.fd = clientfd;
#ifdef __APPLE_CC__
pfd.events = POLLIN | POLLERR | POLLHUP;
#else
pfd.events = POLLIN | POLLPRI | POLLERR | POLLHUP;
#endif
while (scanned < cmdsize) {
if (scanned >= readnbd) { /* nothing left in the buffer */
pfd.revents = 0;
while ((pollret = poll(&pfd, 1U, idletime * 1000UL)) < 0 &&
errno == EINTR);
if (pollret == 0) {
return -1;
}
if (pollret <= 0 ||
(pfd.revents & (POLLERR | POLLHUP | POLLNVAL)) != 0) {
return -2;
}
if ((pfd.revents & (POLLIN | POLLPRI)) == 0) {
continue;
}
if (readnbd >= cmdsize) {
break;
}
#ifdef WITH_TLS
if (tls_cnx != NULL) {
while ((readnb = SSL_read
(tls_cnx, cmd + readnbd, cmdsize - readnbd))
< (ssize_t) 0 && errno == EINTR);
} else
#endif
{
while ((readnb = read(clientfd, cmd + readnbd,
cmdsize - readnbd)) < (ssize_t) 0 &&
errno == EINTR);
}
if (readnb <= (ssize_t) 0) {
return -2;
}
readnbd += readnb;
if (readnbd > cmdsize) {
return -2;
}
}
#ifdef RFC_CONFORMANT_LINES
if (seen_r != 0) {
#endif
if (cmd[scanned] == '\n') {
#ifndef RFC_CONFORMANT_LINES
if (seen_r != 0) {
#endif
cmd[scanned - 1U] = 0;
#ifndef RFC_CONFORMANT_LINES
} else {
cmd[scanned] = 0;
}
#endif
if (++scanned >= readnbd) { /* non-pipelined command */
scanned = readnbd = (size_t) 0U;
}
return 0;
}
seen_r = 0;
#ifdef RFC_CONFORMANT_LINES
}
#endif
if (ISCTRLCODE(cmd[scanned])) {
if (cmd[scanned] == '\r') {
seen_r = 1;
}
#ifdef RFC_CONFORMANT_PARSER /* disabled by default, intentionnaly */
else if (cmd[scanned] == 0) {
cmd[scanned] = '\n';
}
#else
/* replace control chars with _ */
cmd[scanned] = '_';
#endif
}
scanned++;
}
die(421, LOG_WARNING, MSG_LINE_TOO_LONG); /* don't remove this */
return 0; /* to please GCC */
}
/* Replace extra spaces before and after a string with '_' */
#ifdef MINIMAL
# define revealextraspc(X) (X)
#else
static char *revealextraspc(char * const s_)
{
unsigned char *s = (unsigned char *) s_;
unsigned char *sn;
if (s == NULL) {
return s_;
}
simplify(s_);
while (*s != 0U && isspace(*s)) {
*s++ = '_';
}
if (*s == 0U) {
return s_;
}
sn = s;
do {
sn++;
} while (*sn != 0U);
do {
sn--;
if (!isspace(*sn)) {
break;
}
*sn = '_';
} while (sn != s);
return s_;
}
#endif
#ifdef WITH_RFC2640
char *charset_client2fs(const char * const string)
{
char *output = NULL, *output_;
size_t inlen, outlen, outlen_;
inlen = strlen(string);
outlen_ = outlen = inlen * (size_t) 4U + (size_t) 1U;
if (outlen <= inlen ||
(output_ = output = calloc(outlen, (size_t) 1U)) == NULL) {
die_mem();
}
if (utf8 > 0 && strcasecmp(charset_fs, "utf-8") != 0) {
if (iconv(iconv_fd_utf82fs, (char **) &string,
&inlen, &output_, &outlen_) == (size_t) -1) {
strncpy(output, string, outlen);
}
} else if (utf8 <= 0 && strcasecmp(charset_fs, charset_client) != 0) {
if (iconv(iconv_fd_client2fs, (char **) &string,
&inlen, &output_, &outlen_) == (size_t) -1) {
strncpy(output, string, outlen);
}
} else {
strncpy(output, string, outlen);
}
output[outlen - 1] = 0;
return output;
}
#endif
void parser(void)
{
char *arg;
#ifndef MINIMAL
char *sitearg;
#endif
#ifdef WITH_RFC2640
char *narg = NULL;
#endif
size_t n;
#ifdef IMPLICIT_TLS
(void) tls_init_new_session();
data_protection_level = CPL_PRIVATE;
#endif
for (;;) {
xferfd = -1;
if (state_needs_update != 0) {
state_needs_update = 0;
setprocessname("pure-ftpd (IDLE)");
#ifdef FTPWHO
if (shm_data_cur != NULL) {
ftpwho_lock();
shm_data_cur->state = FTPWHO_STATE_IDLE;
*shm_data_cur->filename = 0;
ftpwho_unlock();
}
#endif
}
doreply();
alarm(idletime * 2);
switch (sfgets()) {
case -1:
#ifdef BORING_MODE
die(421, LOG_INFO, MSG_TIMEOUT);
#else
die(421, LOG_INFO, MSG_TIMEOUT_PARSER);
#endif
case -2:
return;
}
#ifdef DEBUG
if (debug != 0) {
addreply(0, "%s", cmd);
}
#endif
n = (size_t) 0U;
while ((isalpha((unsigned char) cmd[n]) || cmd[n] == '@') &&
n < cmdsize) {
cmd[n] = (char) tolower((unsigned char) cmd[n]);
n++;
}
if (n >= cmdsize) { /* overparanoid, it should never happen */
die(421, LOG_WARNING, MSG_LINE_TOO_LONG);
}
if (n == (size_t) 0U) {
nop:
addreply_noformat(500, "?");
continue;
}
#ifdef SKIP_COMMAND_TRAILING_SPACES
while (isspace((unsigned char) cmd[n]) && n < cmdsize) {
cmd[n++] = 0;
}
arg = cmd + n;
while (cmd[n] != 0 && n < cmdsize) {
n++;
}
n--;
while (isspace((unsigned char) cmd[n])) {
cmd[n--] = 0;
}
#else
if (cmd[n] == 0) {
arg = cmd + n;
} else if (isspace((unsigned char) cmd[n])) {
cmd[n] = 0;
arg = cmd + n + 1;
} else {
goto nop;
}
#endif
if (logging != 0) {
#ifdef DEBUG
logfile(LOG_DEBUG, MSG_DEBUG_COMMAND " [%s] [%s]",
cmd, arg);
#else
logfile(LOG_DEBUG, MSG_DEBUG_COMMAND " [%s] [%s]",
cmd, strcmp(cmd, "pass") ? arg : "<*>");
#endif
}
#ifdef WITH_RFC2640
narg = charset_client2fs(arg);
arg = narg;
#endif
/*
* antiidle() is called with dummy commands, usually used by clients
* who are wanting extra idle time. We give them some, but not too much.
* When we jump to wayout, the idle timer is not zeroed. It means that
* we didn't issue an 'active' command like RETR.
*/
#ifndef MINIMAL
if (!strcmp(cmd, "noop")) {
antiidle();
donoop();
goto wayout;
}
#endif
if (!strcmp(cmd, "user")) {
#ifdef WITH_TLS
if (enforce_tls_auth > 1 && tls_cnx == NULL) {
die(421, LOG_WARNING, MSG_TLS_NEEDED);
}
#endif
douser(arg);
} else if (!strcmp(cmd, "acct")) {
addreply(202, MSG_WHOAREYOU);
} else if (!strcmp(cmd, "pass")) {
if (guest == 0) {
randomdelay();
}
dopass(arg);
} else if (!strcmp(cmd, "quit")) {
addreply(221, MSG_GOODBYE,
(unsigned long long) ((uploaded + 1023ULL) / 1024ULL),
(unsigned long long) ((downloaded + 1023ULL) / 1024ULL));
return;
} else if (!strcmp(cmd, "syst")) {
antiidle();
addreply_noformat(215, "UNIX Type: L8");
goto wayout;
#ifdef WITH_TLS
} else if (enforce_tls_auth > 0 &&
!strcmp(cmd, "auth") && !strcasecmp(arg, "tls")) {
addreply_noformat(234, "AUTH TLS OK.");
doreply();
if (tls_cnx == NULL) {
(void) tls_init_new_session();
}
goto wayout;
} else if (!strcmp(cmd, "pbsz")) {
addreply_noformat(tls_cnx == NULL ? 503 : 200, "PBSZ=0");
} else if (!strcmp(cmd, "prot")) {
if (tls_cnx == NULL) {
addreply_noformat(503, MSG_PROT_BEFORE_PBSZ);
goto wayout;
}
switch (*arg) {
case 0:
addreply_noformat(503, MSG_MISSING_ARG);
data_protection_level = CPL_NONE;
break;
case 'C':
if (arg[1] == 0) {
addreply(200, MSG_PROT_OK, "clear");
data_protection_level = CPL_CLEAR;
break;
}
case 'S':
case 'E':
if (arg[1] == 0) {
addreply(200, MSG_PROT_UNKNOWN_LEVEL, arg, "private");
data_protection_level = CPL_PRIVATE;
break;
}
case 'P':
if (arg[1] == 0) {
addreply(200, MSG_PROT_OK, "private");
data_protection_level = CPL_PRIVATE;
break;
}
default:
addreply_noformat(534, "Fallback to [C]");
data_protection_level = CPL_CLEAR;
break;
}
#endif
} else if (!strcmp(cmd, "auth") || !strcmp(cmd, "adat")) {
addreply_noformat(500, MSG_AUTH_UNIMPLEMENTED);
} else if (!strcmp(cmd, "type")) {
antiidle();
dotype(arg);
goto wayout;
} else if (!strcmp(cmd, "mode")) {
antiidle();
domode(arg);
goto wayout;
#ifndef MINIMAL
} else if (!strcmp(cmd, "feat")) {
dofeat();
goto wayout;
} else if (!strcmp(cmd, "opts")) {
doopts(arg);
goto wayout;
#endif
} else if (!strcmp(cmd, "stru")) {
dostru(arg);
goto wayout;
#ifndef MINIMAL
} else if (!strcmp(cmd, "help")) {
goto help_site;
#endif
#ifdef DEBUG
} else if (!strcmp(cmd, "xdbg")) {
debug++;
addreply(200, MSG_XDBG_OK, debug);
goto wayout;
#endif
} else if (loggedin == 0) {
/* from this point, all commands need authentication */
addreply_noformat(530, MSG_NOT_LOGGED_IN);
goto wayout;
} else {
if (!strcmp(cmd, "cwd") || !strcmp(cmd, "xcwd")) {
antiidle();
docwd(arg);
goto wayout;
} else if (!strcmp(cmd, "port")) {
doport(arg);
#ifndef MINIMAL
} else if (!strcmp(cmd, "eprt")) {
doeprt(arg);
} else if (!strcmp(cmd, "esta") &&
disallow_passive == 0 &&
STORAGE_FAMILY(force_passive_ip) == 0) {
doesta();
} else if (!strcmp(cmd, "estp")) {
doestp();
#endif
} else if (disallow_passive == 0 &&
(!strcmp(cmd, "pasv") || !strcmp(cmd, "p@sw"))) {
dopasv(0);
} else if (disallow_passive == 0 &&
(!strcmp(cmd, "epsv") &&
(broken_client_compat == 0 ||
STORAGE_FAMILY(ctrlconn) == AF_INET6))) {
if (!strcasecmp(arg, "all")) {
epsv_all = 1;
addreply_noformat(220, MSG_ACTIVE_DISABLED);
} else if (!strcmp(arg, "2") && !v6ready) {
addreply_noformat(522, MSG_ONLY_IPV4);
} else {
dopasv(1);
}
#ifndef MINIMAL
} else if (disallow_passive == 0 && !strcmp(cmd, "spsv")) {
dopasv(2);
} else if (!strcmp(cmd, "allo")) {
if (*arg == 0) {
addreply_noformat(501, MSG_STAT_FAILURE);
} else {
const off_t size = (off_t) strtoull(arg, NULL, 10);
if (size < (off_t) 0) {
addreply_noformat(501, MSG_STAT_FAILURE);
} else {
doallo(size);
}
}
#endif
} else if (!strcmp(cmd, "pwd") || !strcmp(cmd, "xpwd")) {
#ifdef WITH_RFC2640
char *nwd;
#endif
antiidle();
#ifdef WITH_RFC2640
nwd = charset_fs2client(wd);
addreply(257, "\"%s\" " MSG_IS_YOUR_CURRENT_LOCATION, nwd);
free(nwd);
#else
addreply(257, "\"%s\" " MSG_IS_YOUR_CURRENT_LOCATION, wd);
#endif
goto wayout;
} else if (!strcmp(cmd, "cdup") || !strcmp(cmd, "xcup")) {
docwd("..");
} else if (!strcmp(cmd, "retr")) {
if (*arg != 0) {
#ifdef WITH_TLS
if (enforce_tls_auth == 3 &&
data_protection_level != CPL_PRIVATE) {
addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED);
}
else
#endif
{
doretr(arg);
}
} else {
addreply_noformat(501, MSG_NO_FILE_NAME);
}
} else if (!strcmp(cmd, "rest")) {
antiidle();
if (*arg != 0) {
dorest(arg);
} else {
addreply_noformat(501, MSG_NO_RESTART_POINT);
restartat = (off_t) 0;
}
goto wayout;
} else if (!strcmp(cmd, "dele")) {
if (*arg != 0) {
dodele(arg);
} else {
addreply_noformat(501, MSG_NO_FILE_NAME);
}
} else if (!strcmp(cmd, "stor")) {
arg = revealextraspc(arg);
if (*arg != 0) {
#ifdef WITH_TLS
if (enforce_tls_auth == 3 &&
data_protection_level != CPL_PRIVATE) {
addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED);
} else
#endif
{
dostor(arg, 0, autorename);
}
} else {
addreply_noformat(501, MSG_NO_FILE_NAME);
}
} else if (!strcmp(cmd, "appe")) {
arg = revealextraspc(arg);
if (*arg != 0) {
#ifdef WITH_TLS
if (enforce_tls_auth == 3 &&
data_protection_level != CPL_PRIVATE) {
addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED);
} else
#endif
{
dostor(arg, 1, 0);
}
} else {
addreply_noformat(501, MSG_NO_FILE_NAME);
}
#ifndef MINIMAL
} else if (!strcmp(cmd, "stou")) {
#ifdef WITH_TLS
if (enforce_tls_auth == 3 &&
data_protection_level != CPL_PRIVATE) {
addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED);
} else
#endif
{
dostou();
}
#endif
#ifndef DISABLE_MKD_RMD
} else if (!strcmp(cmd, "mkd") || !strcmp(cmd, "xmkd")) {
arg = revealextraspc(arg);
if (*arg != 0) {
domkd(arg);
} else {
addreply_noformat(501, MSG_NO_DIRECTORY_NAME);
}
} else if (!strcmp(cmd, "rmd") || !strcmp(cmd, "xrmd")) {
if (*arg != 0) {
dormd(arg);
} else {
addreply_noformat(550, MSG_NO_DIRECTORY_NAME);
}
#endif
#ifndef MINIMAL
} else if (!strcmp(cmd, "stat")) {
if (*arg != 0) {
modern_listings = 0;
donlist(arg, 1, 1, 1, 1);
} else {
addreply_noformat(211, "http://www.pureftpd.org/");
}
#endif
} else if (!strcmp(cmd, "list")) {
#ifndef MINIMAL
modern_listings = 0;
#endif
#ifdef WITH_TLS
if (enforce_tls_auth == 3 &&
data_protection_level != CPL_PRIVATE) {
addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED);
} else
#endif
{
donlist(arg, 0, 1, 0, 1);
}
} else if (!strcmp(cmd, "nlst")) {
#ifndef MINIMAL
modern_listings = 0;
#endif
#ifdef WITH_TLS
if (enforce_tls_auth == 3 &&
data_protection_level != CPL_PRIVATE) {
addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED);
} else
#endif
{
donlist(arg, 0, 0, 0, broken_client_compat);
}
#ifndef MINIMAL
} else if (!strcmp(cmd, "mlst")) {
#ifdef WITH_TLS
if (enforce_tls_auth == 3 &&
data_protection_level != CPL_PRIVATE) {
addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED);
} else
#endif
{
domlst(*arg != 0 ? arg : ".");
}
} else if (!strcmp(cmd, "mlsd")) {
modern_listings = 1;
#ifdef WITH_TLS
if (enforce_tls_auth == 3 &&
data_protection_level != CPL_PRIVATE) {
addreply_noformat(521, MSG_PROT_PRIVATE_NEEDED);
} else
#endif
{
donlist(arg, 0, 1, 1, 0);
}
#endif
} else if (!strcmp(cmd, "abor")) {
addreply_noformat(226, MSG_ABOR_SUCCESS);
#ifndef MINIMAL
} else if (!strcmp(cmd, "site")) {
if ((sitearg = arg) != NULL) {
while (*sitearg != 0 && !isspace((unsigned char) *sitearg)) {
sitearg++;
}
if (*sitearg != 0) {
*sitearg++ = 0;
}
}
if (!strcasecmp(arg, "idle")) {
if (sitearg == NULL || *sitearg == 0) {
addreply_noformat(501, "SITE IDLE: " MSG_MISSING_ARG);
} else {
unsigned long int i = 0;
i = strtoul(sitearg, &sitearg, 10);
if (sitearg && *sitearg)
addreply(501, MSG_GARBAGE_FOUND " : %s", sitearg);
else if (i > MAX_SITE_IDLE)
addreply_noformat(501, MSG_VALUE_TOO_LARGE);
else {
idletime = i;
addreply(200, MSG_IDLE_TIME, idletime);
idletime_noop = (double) idletime * 2.0;
}
}
} else if (!strcasecmp(arg, "time")) {
dositetime();
} else if (!strcasecmp(arg, "help")) {
help_site:
addreply_noformat(214, MSG_SITE_HELP CRLF
# ifdef WITH_DIRALIASES
" ALIAS" CRLF
# endif
" CHMOD" CRLF " IDLE" CRLF " UTIME");
addreply_noformat(214, "Pure-FTPd - http://pureftpd.org/");
} else if (!strcasecmp(arg, "chmod")) {
char *sitearg2;
mode_t mode;
parsechmod:
if (sitearg == NULL || *sitearg == 0) {
addreply_noformat(501, MSG_MISSING_ARG);
goto chmod_wayout;
}
sitearg2 = sitearg;
while (*sitearg2 != 0 && !isspace((unsigned char) *sitearg2)) {
sitearg2++;
}
while (*sitearg2 != 0 && isspace((unsigned char) *sitearg2)) {
sitearg2++;
}
if (*sitearg2 == 0) {
addreply_noformat(550, MSG_NO_FILE_NAME);
goto chmod_wayout;
}
mode = (mode_t) strtoul(sitearg, NULL, 8);
if (mode > (mode_t) 07777) {
addreply_noformat(501, MSG_BAD_CHMOD);
goto chmod_wayout;
}
dochmod(sitearg2, mode);
chmod_wayout:
(void) 0;
} else if (!strcasecmp(arg, "utime")) {
char *sitearg2;
if (sitearg == NULL || *sitearg == 0) {
addreply_noformat(501, MSG_NO_FILE_NAME);
goto utime_wayout;
}
if ((sitearg2 = strrchr(sitearg, ' ')) == NULL ||
sitearg2 == sitearg) {
addreply_noformat(501, MSG_MISSING_ARG);
goto utime_wayout;
}
if (strcasecmp(sitearg2, " UTC") != 0) {
addreply_noformat(500, "UTC Only");
goto utime_wayout;
}
*sitearg2-- = 0;
if ((sitearg2 = strrchr(sitearg, ' ')) == NULL ||
sitearg2 == sitearg) {
utime_no_arg:
addreply_noformat(501, MSG_MISSING_ARG);
goto utime_wayout;
}
*sitearg2-- = 0;
if ((sitearg2 = strrchr(sitearg, ' ')) == NULL ||
sitearg2 == sitearg) {
goto utime_no_arg;
}
*sitearg2-- = 0;
if ((sitearg2 = strrchr(sitearg, ' ')) == NULL ||
sitearg2 == sitearg) {
goto utime_no_arg;
}
*sitearg2++ = 0;
if (*sitearg2 == 0) {
goto utime_no_arg;
}
doutime(sitearg, sitearg2);
utime_wayout:
(void) 0;
# ifdef WITH_DIRALIASES
} else if (!strcasecmp(arg, "alias")) {
if (sitearg == NULL || *sitearg == 0) {
print_aliases();
} else {
const char *alias;
if ((alias = lookup_alias(sitearg)) != NULL) {
addreply(214, MSG_ALIASES_ALIAS, sitearg, alias);
} else {
addreply(502, MSG_ALIASES_UNKNOWN, sitearg);
}
}
# endif
} else if (*arg != 0) {
addreply(500, "SITE %s " MSG_UNKNOWN_EXTENSION, arg);
} else {
addreply_noformat(500, "SITE: " MSG_MISSING_ARG);
}
#endif
} else if (!strcmp(cmd, "mdtm")) {
domdtm(arg);
} else if (!strcmp(cmd, "size")) {
dosize(arg);
#ifndef MINIMAL
} else if (!strcmp(cmd, "chmod")) {
sitearg = arg;
goto parsechmod;
#endif
} else if (!strcmp(cmd, "rnfr")) {
if (*arg != 0) {
dornfr(arg);
} else {
addreply_noformat(550, MSG_NO_FILE_NAME);
}
} else if (!strcmp(cmd, "rnto")) {
arg = revealextraspc(arg);
if (*arg != 0) {
dornto(arg);
} else {
addreply_noformat(550, MSG_NO_FILE_NAME);
}
} else {
addreply_noformat(500, MSG_UNKNOWN_COMMAND);
}
}
noopidle = (time_t) -1;
wayout:
#ifdef WITH_RFC2640
free(narg);
narg = NULL;
#endif
#ifdef THROTTLING
if (throttling_delay != 0UL) {
usleep2(throttling_delay);
}
#else
(void) 0;
#endif
}
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3453_0 |
crossvul-cpp_data_good_5006_1 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IPv4 Forwarding Information Base: FIB frontend.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <asm/uaccess.h>
#include <linux/bitops.h>
#include <linux/capability.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/if_addr.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/cache.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
#include <net/tcp.h>
#include <net/sock.h>
#include <net/arp.h>
#include <net/ip_fib.h>
#include <net/rtnetlink.h>
#include <net/xfrm.h>
#include <net/l3mdev.h>
#include <trace/events/fib.h>
#ifndef CONFIG_IP_MULTIPLE_TABLES
static int __net_init fib4_rules_init(struct net *net)
{
struct fib_table *local_table, *main_table;
main_table = fib_trie_table(RT_TABLE_MAIN, NULL);
if (!main_table)
return -ENOMEM;
local_table = fib_trie_table(RT_TABLE_LOCAL, main_table);
if (!local_table)
goto fail;
hlist_add_head_rcu(&local_table->tb_hlist,
&net->ipv4.fib_table_hash[TABLE_LOCAL_INDEX]);
hlist_add_head_rcu(&main_table->tb_hlist,
&net->ipv4.fib_table_hash[TABLE_MAIN_INDEX]);
return 0;
fail:
fib_free_table(main_table);
return -ENOMEM;
}
#else
struct fib_table *fib_new_table(struct net *net, u32 id)
{
struct fib_table *tb, *alias = NULL;
unsigned int h;
if (id == 0)
id = RT_TABLE_MAIN;
tb = fib_get_table(net, id);
if (tb)
return tb;
if (id == RT_TABLE_LOCAL)
alias = fib_new_table(net, RT_TABLE_MAIN);
tb = fib_trie_table(id, alias);
if (!tb)
return NULL;
switch (id) {
case RT_TABLE_LOCAL:
rcu_assign_pointer(net->ipv4.fib_local, tb);
break;
case RT_TABLE_MAIN:
rcu_assign_pointer(net->ipv4.fib_main, tb);
break;
case RT_TABLE_DEFAULT:
rcu_assign_pointer(net->ipv4.fib_default, tb);
break;
default:
break;
}
h = id & (FIB_TABLE_HASHSZ - 1);
hlist_add_head_rcu(&tb->tb_hlist, &net->ipv4.fib_table_hash[h]);
return tb;
}
/* caller must hold either rtnl or rcu read lock */
struct fib_table *fib_get_table(struct net *net, u32 id)
{
struct fib_table *tb;
struct hlist_head *head;
unsigned int h;
if (id == 0)
id = RT_TABLE_MAIN;
h = id & (FIB_TABLE_HASHSZ - 1);
head = &net->ipv4.fib_table_hash[h];
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
if (tb->tb_id == id)
return tb;
}
return NULL;
}
#endif /* CONFIG_IP_MULTIPLE_TABLES */
static void fib_replace_table(struct net *net, struct fib_table *old,
struct fib_table *new)
{
#ifdef CONFIG_IP_MULTIPLE_TABLES
switch (new->tb_id) {
case RT_TABLE_LOCAL:
rcu_assign_pointer(net->ipv4.fib_local, new);
break;
case RT_TABLE_MAIN:
rcu_assign_pointer(net->ipv4.fib_main, new);
break;
case RT_TABLE_DEFAULT:
rcu_assign_pointer(net->ipv4.fib_default, new);
break;
default:
break;
}
#endif
/* replace the old table in the hlist */
hlist_replace_rcu(&old->tb_hlist, &new->tb_hlist);
}
int fib_unmerge(struct net *net)
{
struct fib_table *old, *new;
/* attempt to fetch local table if it has been allocated */
old = fib_get_table(net, RT_TABLE_LOCAL);
if (!old)
return 0;
new = fib_trie_unmerge(old);
if (!new)
return -ENOMEM;
/* replace merged table with clean table */
if (new != old) {
fib_replace_table(net, old, new);
fib_free_table(old);
}
return 0;
}
static void fib_flush(struct net *net)
{
int flushed = 0;
unsigned int h;
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[h];
struct hlist_node *tmp;
struct fib_table *tb;
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
flushed += fib_table_flush(tb);
}
if (flushed)
rt_cache_flush(net);
}
void fib_flush_external(struct net *net)
{
struct fib_table *tb;
struct hlist_head *head;
unsigned int h;
for (h = 0; h < FIB_TABLE_HASHSZ; h++) {
head = &net->ipv4.fib_table_hash[h];
hlist_for_each_entry(tb, head, tb_hlist)
fib_table_flush_external(tb);
}
}
/*
* Find address type as if only "dev" was present in the system. If
* on_dev is NULL then all interfaces are taken into consideration.
*/
static inline unsigned int __inet_dev_addr_type(struct net *net,
const struct net_device *dev,
__be32 addr, u32 tb_id)
{
struct flowi4 fl4 = { .daddr = addr };
struct fib_result res;
unsigned int ret = RTN_BROADCAST;
struct fib_table *table;
if (ipv4_is_zeronet(addr) || ipv4_is_lbcast(addr))
return RTN_BROADCAST;
if (ipv4_is_multicast(addr))
return RTN_MULTICAST;
rcu_read_lock();
table = fib_get_table(net, tb_id);
if (table) {
ret = RTN_UNICAST;
if (!fib_table_lookup(table, &fl4, &res, FIB_LOOKUP_NOREF)) {
if (!dev || dev == res.fi->fib_dev)
ret = res.type;
}
}
rcu_read_unlock();
return ret;
}
unsigned int inet_addr_type_table(struct net *net, __be32 addr, u32 tb_id)
{
return __inet_dev_addr_type(net, NULL, addr, tb_id);
}
EXPORT_SYMBOL(inet_addr_type_table);
unsigned int inet_addr_type(struct net *net, __be32 addr)
{
return __inet_dev_addr_type(net, NULL, addr, RT_TABLE_LOCAL);
}
EXPORT_SYMBOL(inet_addr_type);
unsigned int inet_dev_addr_type(struct net *net, const struct net_device *dev,
__be32 addr)
{
u32 rt_table = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
return __inet_dev_addr_type(net, dev, addr, rt_table);
}
EXPORT_SYMBOL(inet_dev_addr_type);
/* inet_addr_type with dev == NULL but using the table from a dev
* if one is associated
*/
unsigned int inet_addr_type_dev_table(struct net *net,
const struct net_device *dev,
__be32 addr)
{
u32 rt_table = l3mdev_fib_table(dev) ? : RT_TABLE_LOCAL;
return __inet_dev_addr_type(net, NULL, addr, rt_table);
}
EXPORT_SYMBOL(inet_addr_type_dev_table);
__be32 fib_compute_spec_dst(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
struct in_device *in_dev;
struct fib_result res;
struct rtable *rt;
struct flowi4 fl4;
struct net *net;
int scope;
rt = skb_rtable(skb);
if ((rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST | RTCF_LOCAL)) ==
RTCF_LOCAL)
return ip_hdr(skb)->daddr;
in_dev = __in_dev_get_rcu(dev);
BUG_ON(!in_dev);
net = dev_net(dev);
scope = RT_SCOPE_UNIVERSE;
if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
fl4.flowi4_oif = 0;
fl4.flowi4_iif = LOOPBACK_IFINDEX;
fl4.daddr = ip_hdr(skb)->saddr;
fl4.saddr = 0;
fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
fl4.flowi4_scope = scope;
fl4.flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0;
fl4.flowi4_tun_key.tun_id = 0;
if (!fib_lookup(net, &fl4, &res, 0))
return FIB_RES_PREFSRC(net, res);
} else {
scope = RT_SCOPE_LINK;
}
return inet_select_addr(dev, ip_hdr(skb)->saddr, scope);
}
/* Given (packet source, input interface) and optional (dst, oif, tos):
* - (main) check, that source is valid i.e. not broadcast or our local
* address.
* - figure out what "logical" interface this packet arrived
* and calculate "specific destination" address.
* - check, that packet arrived from expected physical interface.
* called with rcu_read_lock()
*/
static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
u8 tos, int oif, struct net_device *dev,
int rpf, struct in_device *idev, u32 *itag)
{
int ret, no_addr;
struct fib_result res;
struct flowi4 fl4;
struct net *net;
bool dev_match;
fl4.flowi4_oif = 0;
fl4.flowi4_iif = l3mdev_master_ifindex_rcu(dev);
if (!fl4.flowi4_iif)
fl4.flowi4_iif = oif ? : LOOPBACK_IFINDEX;
fl4.daddr = src;
fl4.saddr = dst;
fl4.flowi4_tos = tos;
fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
fl4.flowi4_tun_key.tun_id = 0;
fl4.flowi4_flags = 0;
no_addr = idev->ifa_list == NULL;
fl4.flowi4_mark = IN_DEV_SRC_VMARK(idev) ? skb->mark : 0;
trace_fib_validate_source(dev, &fl4);
net = dev_net(dev);
if (fib_lookup(net, &fl4, &res, 0))
goto last_resort;
if (res.type != RTN_UNICAST &&
(res.type != RTN_LOCAL || !IN_DEV_ACCEPT_LOCAL(idev)))
goto e_inval;
if (!rpf && !fib_num_tclassid_users(dev_net(dev)) &&
(dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev)))
goto last_resort;
fib_combine_itag(itag, &res);
dev_match = false;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
for (ret = 0; ret < res.fi->fib_nhs; ret++) {
struct fib_nh *nh = &res.fi->fib_nh[ret];
if (nh->nh_dev == dev) {
dev_match = true;
break;
} else if (l3mdev_master_ifindex_rcu(nh->nh_dev) == dev->ifindex) {
dev_match = true;
break;
}
}
#else
if (FIB_RES_DEV(res) == dev)
dev_match = true;
#endif
if (dev_match) {
ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
return ret;
}
if (no_addr)
goto last_resort;
if (rpf == 1)
goto e_rpf;
fl4.flowi4_oif = dev->ifindex;
ret = 0;
if (fib_lookup(net, &fl4, &res, FIB_LOOKUP_IGNORE_LINKSTATE) == 0) {
if (res.type == RTN_UNICAST)
ret = FIB_RES_NH(res).nh_scope >= RT_SCOPE_HOST;
}
return ret;
last_resort:
if (rpf)
goto e_rpf;
*itag = 0;
return 0;
e_inval:
return -EINVAL;
e_rpf:
return -EXDEV;
}
/* Ignore rp_filter for packets protected by IPsec. */
int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
u8 tos, int oif, struct net_device *dev,
struct in_device *idev, u32 *itag)
{
int r = secpath_exists(skb) ? 0 : IN_DEV_RPFILTER(idev);
if (!r && !fib_num_tclassid_users(dev_net(dev)) &&
IN_DEV_ACCEPT_LOCAL(idev) &&
(dev->ifindex != oif || !IN_DEV_TX_REDIRECTS(idev))) {
*itag = 0;
return 0;
}
return __fib_validate_source(skb, src, dst, tos, oif, dev, r, idev, itag);
}
static inline __be32 sk_extract_addr(struct sockaddr *addr)
{
return ((struct sockaddr_in *) addr)->sin_addr.s_addr;
}
static int put_rtax(struct nlattr *mx, int len, int type, u32 value)
{
struct nlattr *nla;
nla = (struct nlattr *) ((char *) mx + len);
nla->nla_type = type;
nla->nla_len = nla_attr_size(4);
*(u32 *) nla_data(nla) = value;
return len + nla_total_size(4);
}
static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt,
struct fib_config *cfg)
{
__be32 addr;
int plen;
memset(cfg, 0, sizeof(*cfg));
cfg->fc_nlinfo.nl_net = net;
if (rt->rt_dst.sa_family != AF_INET)
return -EAFNOSUPPORT;
/*
* Check mask for validity:
* a) it must be contiguous.
* b) destination must have all host bits clear.
* c) if application forgot to set correct family (AF_INET),
* reject request unless it is absolutely clear i.e.
* both family and mask are zero.
*/
plen = 32;
addr = sk_extract_addr(&rt->rt_dst);
if (!(rt->rt_flags & RTF_HOST)) {
__be32 mask = sk_extract_addr(&rt->rt_genmask);
if (rt->rt_genmask.sa_family != AF_INET) {
if (mask || rt->rt_genmask.sa_family)
return -EAFNOSUPPORT;
}
if (bad_mask(mask, addr))
return -EINVAL;
plen = inet_mask_len(mask);
}
cfg->fc_dst_len = plen;
cfg->fc_dst = addr;
if (cmd != SIOCDELRT) {
cfg->fc_nlflags = NLM_F_CREATE;
cfg->fc_protocol = RTPROT_BOOT;
}
if (rt->rt_metric)
cfg->fc_priority = rt->rt_metric - 1;
if (rt->rt_flags & RTF_REJECT) {
cfg->fc_scope = RT_SCOPE_HOST;
cfg->fc_type = RTN_UNREACHABLE;
return 0;
}
cfg->fc_scope = RT_SCOPE_NOWHERE;
cfg->fc_type = RTN_UNICAST;
if (rt->rt_dev) {
char *colon;
struct net_device *dev;
char devname[IFNAMSIZ];
if (copy_from_user(devname, rt->rt_dev, IFNAMSIZ-1))
return -EFAULT;
devname[IFNAMSIZ-1] = 0;
colon = strchr(devname, ':');
if (colon)
*colon = 0;
dev = __dev_get_by_name(net, devname);
if (!dev)
return -ENODEV;
cfg->fc_oif = dev->ifindex;
if (colon) {
struct in_ifaddr *ifa;
struct in_device *in_dev = __in_dev_get_rtnl(dev);
if (!in_dev)
return -ENODEV;
*colon = ':';
for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
if (strcmp(ifa->ifa_label, devname) == 0)
break;
if (!ifa)
return -ENODEV;
cfg->fc_prefsrc = ifa->ifa_local;
}
}
addr = sk_extract_addr(&rt->rt_gateway);
if (rt->rt_gateway.sa_family == AF_INET && addr) {
unsigned int addr_type;
cfg->fc_gw = addr;
addr_type = inet_addr_type_table(net, addr, cfg->fc_table);
if (rt->rt_flags & RTF_GATEWAY &&
addr_type == RTN_UNICAST)
cfg->fc_scope = RT_SCOPE_UNIVERSE;
}
if (cmd == SIOCDELRT)
return 0;
if (rt->rt_flags & RTF_GATEWAY && !cfg->fc_gw)
return -EINVAL;
if (cfg->fc_scope == RT_SCOPE_NOWHERE)
cfg->fc_scope = RT_SCOPE_LINK;
if (rt->rt_flags & (RTF_MTU | RTF_WINDOW | RTF_IRTT)) {
struct nlattr *mx;
int len = 0;
mx = kzalloc(3 * nla_total_size(4), GFP_KERNEL);
if (!mx)
return -ENOMEM;
if (rt->rt_flags & RTF_MTU)
len = put_rtax(mx, len, RTAX_ADVMSS, rt->rt_mtu - 40);
if (rt->rt_flags & RTF_WINDOW)
len = put_rtax(mx, len, RTAX_WINDOW, rt->rt_window);
if (rt->rt_flags & RTF_IRTT)
len = put_rtax(mx, len, RTAX_RTT, rt->rt_irtt << 3);
cfg->fc_mx = mx;
cfg->fc_mx_len = len;
}
return 0;
}
/*
* Handle IP routing ioctl calls.
* These are used to manipulate the routing tables
*/
int ip_rt_ioctl(struct net *net, unsigned int cmd, void __user *arg)
{
struct fib_config cfg;
struct rtentry rt;
int err;
switch (cmd) {
case SIOCADDRT: /* Add a route */
case SIOCDELRT: /* Delete a route */
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&rt, arg, sizeof(rt)))
return -EFAULT;
rtnl_lock();
err = rtentry_to_fib_config(net, cmd, &rt, &cfg);
if (err == 0) {
struct fib_table *tb;
if (cmd == SIOCDELRT) {
tb = fib_get_table(net, cfg.fc_table);
if (tb)
err = fib_table_delete(tb, &cfg);
else
err = -ESRCH;
} else {
tb = fib_new_table(net, cfg.fc_table);
if (tb)
err = fib_table_insert(tb, &cfg);
else
err = -ENOBUFS;
}
/* allocated by rtentry_to_fib_config() */
kfree(cfg.fc_mx);
}
rtnl_unlock();
return err;
}
return -EINVAL;
}
const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
[RTA_DST] = { .type = NLA_U32 },
[RTA_SRC] = { .type = NLA_U32 },
[RTA_IIF] = { .type = NLA_U32 },
[RTA_OIF] = { .type = NLA_U32 },
[RTA_GATEWAY] = { .type = NLA_U32 },
[RTA_PRIORITY] = { .type = NLA_U32 },
[RTA_PREFSRC] = { .type = NLA_U32 },
[RTA_METRICS] = { .type = NLA_NESTED },
[RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
[RTA_FLOW] = { .type = NLA_U32 },
[RTA_ENCAP_TYPE] = { .type = NLA_U16 },
[RTA_ENCAP] = { .type = NLA_NESTED },
};
static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
struct nlmsghdr *nlh, struct fib_config *cfg)
{
struct nlattr *attr;
int err, remaining;
struct rtmsg *rtm;
err = nlmsg_validate(nlh, sizeof(*rtm), RTA_MAX, rtm_ipv4_policy);
if (err < 0)
goto errout;
memset(cfg, 0, sizeof(*cfg));
rtm = nlmsg_data(nlh);
cfg->fc_dst_len = rtm->rtm_dst_len;
cfg->fc_tos = rtm->rtm_tos;
cfg->fc_table = rtm->rtm_table;
cfg->fc_protocol = rtm->rtm_protocol;
cfg->fc_scope = rtm->rtm_scope;
cfg->fc_type = rtm->rtm_type;
cfg->fc_flags = rtm->rtm_flags;
cfg->fc_nlflags = nlh->nlmsg_flags;
cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
cfg->fc_nlinfo.nlh = nlh;
cfg->fc_nlinfo.nl_net = net;
if (cfg->fc_type > RTN_MAX) {
err = -EINVAL;
goto errout;
}
nlmsg_for_each_attr(attr, nlh, sizeof(struct rtmsg), remaining) {
switch (nla_type(attr)) {
case RTA_DST:
cfg->fc_dst = nla_get_be32(attr);
break;
case RTA_OIF:
cfg->fc_oif = nla_get_u32(attr);
break;
case RTA_GATEWAY:
cfg->fc_gw = nla_get_be32(attr);
break;
case RTA_PRIORITY:
cfg->fc_priority = nla_get_u32(attr);
break;
case RTA_PREFSRC:
cfg->fc_prefsrc = nla_get_be32(attr);
break;
case RTA_METRICS:
cfg->fc_mx = nla_data(attr);
cfg->fc_mx_len = nla_len(attr);
break;
case RTA_MULTIPATH:
cfg->fc_mp = nla_data(attr);
cfg->fc_mp_len = nla_len(attr);
break;
case RTA_FLOW:
cfg->fc_flow = nla_get_u32(attr);
break;
case RTA_TABLE:
cfg->fc_table = nla_get_u32(attr);
break;
case RTA_ENCAP:
cfg->fc_encap = attr;
break;
case RTA_ENCAP_TYPE:
cfg->fc_encap_type = nla_get_u16(attr);
break;
}
}
return 0;
errout:
return err;
}
static int inet_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct fib_config cfg;
struct fib_table *tb;
int err;
err = rtm_to_fib_config(net, skb, nlh, &cfg);
if (err < 0)
goto errout;
tb = fib_get_table(net, cfg.fc_table);
if (!tb) {
err = -ESRCH;
goto errout;
}
err = fib_table_delete(tb, &cfg);
errout:
return err;
}
static int inet_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct fib_config cfg;
struct fib_table *tb;
int err;
err = rtm_to_fib_config(net, skb, nlh, &cfg);
if (err < 0)
goto errout;
tb = fib_new_table(net, cfg.fc_table);
if (!tb) {
err = -ENOBUFS;
goto errout;
}
err = fib_table_insert(tb, &cfg);
errout:
return err;
}
static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
unsigned int h, s_h;
unsigned int e = 0, s_e;
struct fib_table *tb;
struct hlist_head *head;
int dumped = 0;
if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
return skb->len;
s_h = cb->args[0];
s_e = cb->args[1];
rcu_read_lock();
for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
e = 0;
head = &net->ipv4.fib_table_hash[h];
hlist_for_each_entry_rcu(tb, head, tb_hlist) {
if (e < s_e)
goto next;
if (dumped)
memset(&cb->args[2], 0, sizeof(cb->args) -
2 * sizeof(cb->args[0]));
if (fib_table_dump(tb, skb, cb) < 0)
goto out;
dumped = 1;
next:
e++;
}
}
out:
rcu_read_unlock();
cb->args[1] = e;
cb->args[0] = h;
return skb->len;
}
/* Prepare and feed intra-kernel routing request.
* Really, it should be netlink message, but :-( netlink
* can be not configured, so that we feed it directly
* to fib engine. It is legal, because all events occur
* only when netlink is already locked.
*/
static void fib_magic(int cmd, int type, __be32 dst, int dst_len, struct in_ifaddr *ifa)
{
struct net *net = dev_net(ifa->ifa_dev->dev);
u32 tb_id = l3mdev_fib_table(ifa->ifa_dev->dev);
struct fib_table *tb;
struct fib_config cfg = {
.fc_protocol = RTPROT_KERNEL,
.fc_type = type,
.fc_dst = dst,
.fc_dst_len = dst_len,
.fc_prefsrc = ifa->ifa_local,
.fc_oif = ifa->ifa_dev->dev->ifindex,
.fc_nlflags = NLM_F_CREATE | NLM_F_APPEND,
.fc_nlinfo = {
.nl_net = net,
},
};
if (!tb_id)
tb_id = (type == RTN_UNICAST) ? RT_TABLE_MAIN : RT_TABLE_LOCAL;
tb = fib_new_table(net, tb_id);
if (!tb)
return;
cfg.fc_table = tb->tb_id;
if (type != RTN_LOCAL)
cfg.fc_scope = RT_SCOPE_LINK;
else
cfg.fc_scope = RT_SCOPE_HOST;
if (cmd == RTM_NEWROUTE)
fib_table_insert(tb, &cfg);
else
fib_table_delete(tb, &cfg);
}
void fib_add_ifaddr(struct in_ifaddr *ifa)
{
struct in_device *in_dev = ifa->ifa_dev;
struct net_device *dev = in_dev->dev;
struct in_ifaddr *prim = ifa;
__be32 mask = ifa->ifa_mask;
__be32 addr = ifa->ifa_local;
__be32 prefix = ifa->ifa_address & mask;
if (ifa->ifa_flags & IFA_F_SECONDARY) {
prim = inet_ifa_byprefix(in_dev, prefix, mask);
if (!prim) {
pr_warn("%s: bug: prim == NULL\n", __func__);
return;
}
}
fib_magic(RTM_NEWROUTE, RTN_LOCAL, addr, 32, prim);
if (!(dev->flags & IFF_UP))
return;
/* Add broadcast address, if it is explicitly assigned. */
if (ifa->ifa_broadcast && ifa->ifa_broadcast != htonl(0xFFFFFFFF))
fib_magic(RTM_NEWROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
if (!ipv4_is_zeronet(prefix) && !(ifa->ifa_flags & IFA_F_SECONDARY) &&
(prefix != addr || ifa->ifa_prefixlen < 32)) {
if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
fib_magic(RTM_NEWROUTE,
dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
prefix, ifa->ifa_prefixlen, prim);
/* Add network specific broadcasts, when it takes a sense */
if (ifa->ifa_prefixlen < 31) {
fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix, 32, prim);
fib_magic(RTM_NEWROUTE, RTN_BROADCAST, prefix | ~mask,
32, prim);
}
}
}
/* Delete primary or secondary address.
* Optionally, on secondary address promotion consider the addresses
* from subnet iprim as deleted, even if they are in device list.
* In this case the secondary ifa can be in device list.
*/
void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
{
struct in_device *in_dev = ifa->ifa_dev;
struct net_device *dev = in_dev->dev;
struct in_ifaddr *ifa1;
struct in_ifaddr *prim = ifa, *prim1 = NULL;
__be32 brd = ifa->ifa_address | ~ifa->ifa_mask;
__be32 any = ifa->ifa_address & ifa->ifa_mask;
#define LOCAL_OK 1
#define BRD_OK 2
#define BRD0_OK 4
#define BRD1_OK 8
unsigned int ok = 0;
int subnet = 0; /* Primary network */
int gone = 1; /* Address is missing */
int same_prefsrc = 0; /* Another primary with same IP */
if (ifa->ifa_flags & IFA_F_SECONDARY) {
prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
if (!prim) {
pr_warn("%s: bug: prim == NULL\n", __func__);
return;
}
if (iprim && iprim != prim) {
pr_warn("%s: bug: iprim != prim\n", __func__);
return;
}
} else if (!ipv4_is_zeronet(any) &&
(any != ifa->ifa_local || ifa->ifa_prefixlen < 32)) {
if (!(ifa->ifa_flags & IFA_F_NOPREFIXROUTE))
fib_magic(RTM_DELROUTE,
dev->flags & IFF_LOOPBACK ? RTN_LOCAL : RTN_UNICAST,
any, ifa->ifa_prefixlen, prim);
subnet = 1;
}
if (in_dev->dead)
goto no_promotions;
/* Deletion is more complicated than add.
* We should take care of not to delete too much :-)
*
* Scan address list to be sure that addresses are really gone.
*/
for (ifa1 = in_dev->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
if (ifa1 == ifa) {
/* promotion, keep the IP */
gone = 0;
continue;
}
/* Ignore IFAs from our subnet */
if (iprim && ifa1->ifa_mask == iprim->ifa_mask &&
inet_ifa_match(ifa1->ifa_address, iprim))
continue;
/* Ignore ifa1 if it uses different primary IP (prefsrc) */
if (ifa1->ifa_flags & IFA_F_SECONDARY) {
/* Another address from our subnet? */
if (ifa1->ifa_mask == prim->ifa_mask &&
inet_ifa_match(ifa1->ifa_address, prim))
prim1 = prim;
else {
/* We reached the secondaries, so
* same_prefsrc should be determined.
*/
if (!same_prefsrc)
continue;
/* Search new prim1 if ifa1 is not
* using the current prim1
*/
if (!prim1 ||
ifa1->ifa_mask != prim1->ifa_mask ||
!inet_ifa_match(ifa1->ifa_address, prim1))
prim1 = inet_ifa_byprefix(in_dev,
ifa1->ifa_address,
ifa1->ifa_mask);
if (!prim1)
continue;
if (prim1->ifa_local != prim->ifa_local)
continue;
}
} else {
if (prim->ifa_local != ifa1->ifa_local)
continue;
prim1 = ifa1;
if (prim != prim1)
same_prefsrc = 1;
}
if (ifa->ifa_local == ifa1->ifa_local)
ok |= LOCAL_OK;
if (ifa->ifa_broadcast == ifa1->ifa_broadcast)
ok |= BRD_OK;
if (brd == ifa1->ifa_broadcast)
ok |= BRD1_OK;
if (any == ifa1->ifa_broadcast)
ok |= BRD0_OK;
/* primary has network specific broadcasts */
if (prim1 == ifa1 && ifa1->ifa_prefixlen < 31) {
__be32 brd1 = ifa1->ifa_address | ~ifa1->ifa_mask;
__be32 any1 = ifa1->ifa_address & ifa1->ifa_mask;
if (!ipv4_is_zeronet(any1)) {
if (ifa->ifa_broadcast == brd1 ||
ifa->ifa_broadcast == any1)
ok |= BRD_OK;
if (brd == brd1 || brd == any1)
ok |= BRD1_OK;
if (any == brd1 || any == any1)
ok |= BRD0_OK;
}
}
}
no_promotions:
if (!(ok & BRD_OK))
fib_magic(RTM_DELROUTE, RTN_BROADCAST, ifa->ifa_broadcast, 32, prim);
if (subnet && ifa->ifa_prefixlen < 31) {
if (!(ok & BRD1_OK))
fib_magic(RTM_DELROUTE, RTN_BROADCAST, brd, 32, prim);
if (!(ok & BRD0_OK))
fib_magic(RTM_DELROUTE, RTN_BROADCAST, any, 32, prim);
}
if (!(ok & LOCAL_OK)) {
unsigned int addr_type;
fib_magic(RTM_DELROUTE, RTN_LOCAL, ifa->ifa_local, 32, prim);
/* Check, that this local address finally disappeared. */
addr_type = inet_addr_type_dev_table(dev_net(dev), dev,
ifa->ifa_local);
if (gone && addr_type != RTN_LOCAL) {
/* And the last, but not the least thing.
* We must flush stray FIB entries.
*
* First of all, we scan fib_info list searching
* for stray nexthop entries, then ignite fib_flush.
*/
if (fib_sync_down_addr(dev_net(dev), ifa->ifa_local))
fib_flush(dev_net(dev));
}
}
#undef LOCAL_OK
#undef BRD_OK
#undef BRD0_OK
#undef BRD1_OK
}
static void nl_fib_lookup(struct net *net, struct fib_result_nl *frn)
{
struct fib_result res;
struct flowi4 fl4 = {
.flowi4_mark = frn->fl_mark,
.daddr = frn->fl_addr,
.flowi4_tos = frn->fl_tos,
.flowi4_scope = frn->fl_scope,
};
struct fib_table *tb;
rcu_read_lock();
tb = fib_get_table(net, frn->tb_id_in);
frn->err = -ENOENT;
if (tb) {
local_bh_disable();
frn->tb_id = tb->tb_id;
frn->err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
if (!frn->err) {
frn->prefixlen = res.prefixlen;
frn->nh_sel = res.nh_sel;
frn->type = res.type;
frn->scope = res.scope;
}
local_bh_enable();
}
rcu_read_unlock();
}
static void nl_fib_input(struct sk_buff *skb)
{
struct net *net;
struct fib_result_nl *frn;
struct nlmsghdr *nlh;
u32 portid;
net = sock_net(skb->sk);
nlh = nlmsg_hdr(skb);
if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
nlmsg_len(nlh) < sizeof(*frn))
return;
skb = netlink_skb_clone(skb, GFP_KERNEL);
if (!skb)
return;
nlh = nlmsg_hdr(skb);
frn = (struct fib_result_nl *) nlmsg_data(nlh);
nl_fib_lookup(net, frn);
portid = NETLINK_CB(skb).portid; /* netlink portid */
NETLINK_CB(skb).portid = 0; /* from kernel */
NETLINK_CB(skb).dst_group = 0; /* unicast */
netlink_unicast(net->ipv4.fibnl, skb, portid, MSG_DONTWAIT);
}
static int __net_init nl_fib_lookup_init(struct net *net)
{
struct sock *sk;
struct netlink_kernel_cfg cfg = {
.input = nl_fib_input,
};
sk = netlink_kernel_create(net, NETLINK_FIB_LOOKUP, &cfg);
if (!sk)
return -EAFNOSUPPORT;
net->ipv4.fibnl = sk;
return 0;
}
static void nl_fib_lookup_exit(struct net *net)
{
netlink_kernel_release(net->ipv4.fibnl);
net->ipv4.fibnl = NULL;
}
static void fib_disable_ip(struct net_device *dev, unsigned long event,
bool force)
{
if (fib_sync_down_dev(dev, event, force))
fib_flush(dev_net(dev));
rt_cache_flush(dev_net(dev));
arp_ifdown(dev);
}
static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct net_device *dev = ifa->ifa_dev->dev;
struct net *net = dev_net(dev);
switch (event) {
case NETDEV_UP:
fib_add_ifaddr(ifa);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
fib_sync_up(dev, RTNH_F_DEAD);
#endif
atomic_inc(&net->ipv4.dev_addr_genid);
rt_cache_flush(dev_net(dev));
break;
case NETDEV_DOWN:
fib_del_ifaddr(ifa, NULL);
atomic_inc(&net->ipv4.dev_addr_genid);
if (!ifa->ifa_dev->ifa_list) {
/* Last address was deleted from this interface.
* Disable IP.
*/
fib_disable_ip(dev, event, true);
} else {
rt_cache_flush(dev_net(dev));
}
break;
}
return NOTIFY_DONE;
}
static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct netdev_notifier_changeupper_info *info;
struct in_device *in_dev;
struct net *net = dev_net(dev);
unsigned int flags;
if (event == NETDEV_UNREGISTER) {
fib_disable_ip(dev, event, true);
rt_flush_dev(dev);
return NOTIFY_DONE;
}
in_dev = __in_dev_get_rtnl(dev);
if (!in_dev)
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
for_ifa(in_dev) {
fib_add_ifaddr(ifa);
} endfor_ifa(in_dev);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
fib_sync_up(dev, RTNH_F_DEAD);
#endif
atomic_inc(&net->ipv4.dev_addr_genid);
rt_cache_flush(net);
break;
case NETDEV_DOWN:
fib_disable_ip(dev, event, false);
break;
case NETDEV_CHANGE:
flags = dev_get_flags(dev);
if (flags & (IFF_RUNNING | IFF_LOWER_UP))
fib_sync_up(dev, RTNH_F_LINKDOWN);
else
fib_sync_down_dev(dev, event, false);
/* fall through */
case NETDEV_CHANGEMTU:
rt_cache_flush(net);
break;
case NETDEV_CHANGEUPPER:
info = ptr;
/* flush all routes if dev is linked to or unlinked from
* an L3 master device (e.g., VRF)
*/
if (info->upper_dev && netif_is_l3_master(info->upper_dev))
fib_disable_ip(dev, NETDEV_DOWN, true);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block fib_inetaddr_notifier = {
.notifier_call = fib_inetaddr_event,
};
static struct notifier_block fib_netdev_notifier = {
.notifier_call = fib_netdev_event,
};
static int __net_init ip_fib_net_init(struct net *net)
{
int err;
size_t size = sizeof(struct hlist_head) * FIB_TABLE_HASHSZ;
/* Avoid false sharing : Use at least a full cache line */
size = max_t(size_t, size, L1_CACHE_BYTES);
net->ipv4.fib_table_hash = kzalloc(size, GFP_KERNEL);
if (!net->ipv4.fib_table_hash)
return -ENOMEM;
err = fib4_rules_init(net);
if (err < 0)
goto fail;
return 0;
fail:
kfree(net->ipv4.fib_table_hash);
return err;
}
static void ip_fib_net_exit(struct net *net)
{
unsigned int i;
rtnl_lock();
#ifdef CONFIG_IP_MULTIPLE_TABLES
RCU_INIT_POINTER(net->ipv4.fib_local, NULL);
RCU_INIT_POINTER(net->ipv4.fib_main, NULL);
RCU_INIT_POINTER(net->ipv4.fib_default, NULL);
#endif
for (i = 0; i < FIB_TABLE_HASHSZ; i++) {
struct hlist_head *head = &net->ipv4.fib_table_hash[i];
struct hlist_node *tmp;
struct fib_table *tb;
hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
hlist_del(&tb->tb_hlist);
fib_table_flush(tb);
fib_free_table(tb);
}
}
#ifdef CONFIG_IP_MULTIPLE_TABLES
fib4_rules_exit(net);
#endif
rtnl_unlock();
kfree(net->ipv4.fib_table_hash);
}
static int __net_init fib_net_init(struct net *net)
{
int error;
#ifdef CONFIG_IP_ROUTE_CLASSID
net->ipv4.fib_num_tclassid_users = 0;
#endif
error = ip_fib_net_init(net);
if (error < 0)
goto out;
error = nl_fib_lookup_init(net);
if (error < 0)
goto out_nlfl;
error = fib_proc_init(net);
if (error < 0)
goto out_proc;
out:
return error;
out_proc:
nl_fib_lookup_exit(net);
out_nlfl:
ip_fib_net_exit(net);
goto out;
}
static void __net_exit fib_net_exit(struct net *net)
{
fib_proc_exit(net);
nl_fib_lookup_exit(net);
ip_fib_net_exit(net);
}
static struct pernet_operations fib_net_ops = {
.init = fib_net_init,
.exit = fib_net_exit,
};
void __init ip_fib_init(void)
{
rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
register_pernet_subsys(&fib_net_ops);
register_netdevice_notifier(&fib_netdev_notifier);
register_inetaddr_notifier(&fib_inetaddr_notifier);
fib_trie_init();
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_5006_1 |
crossvul-cpp_data_good_3432_3 | /*
* reflection.c: Routines for creating an image at runtime.
*
* Author:
* Paolo Molaro (lupus@ximian.com)
*
* Copyright 2001-2003 Ximian, Inc (http://www.ximian.com)
* Copyright 2004-2009 Novell, Inc (http://www.novell.com)
*
*/
#include <config.h>
#include "mono/utils/mono-digest.h"
#include "mono/utils/mono-membar.h"
#include "mono/metadata/reflection.h"
#include "mono/metadata/tabledefs.h"
#include "mono/metadata/metadata-internals.h"
#include <mono/metadata/profiler-private.h>
#include "mono/metadata/class-internals.h"
#include "mono/metadata/gc-internal.h"
#include "mono/metadata/tokentype.h"
#include "mono/metadata/domain-internals.h"
#include "mono/metadata/opcodes.h"
#include "mono/metadata/assembly.h"
#include "mono/metadata/object-internals.h"
#include <mono/metadata/exception.h>
#include <mono/metadata/marshal.h>
#include <mono/metadata/security-manager.h>
#include <stdio.h>
#include <glib.h>
#include <errno.h>
#include <time.h>
#include <string.h>
#include <ctype.h>
#include "image.h"
#include "cil-coff.h"
#include "mono-endian.h"
#include <mono/metadata/gc-internal.h>
#include <mono/metadata/mempool-internals.h>
#include <mono/metadata/security-core-clr.h>
#include <mono/metadata/debug-helpers.h>
#include <mono/metadata/verify-internals.h>
#include <mono/metadata/mono-ptr-array.h>
#include <mono/utils/mono-string.h>
#include <mono/utils/mono-error-internals.h>
static gboolean is_usertype (MonoReflectionType *ref);
static MonoReflectionType *mono_reflection_type_resolve_user_types (MonoReflectionType *type);
typedef struct {
char *p;
char *buf;
char *end;
} SigBuffer;
#define TEXT_OFFSET 512
#define CLI_H_SIZE 136
#define FILE_ALIGN 512
#define VIRT_ALIGN 8192
#define START_TEXT_RVA 0x00002000
typedef struct {
MonoReflectionILGen *ilgen;
MonoReflectionType *rtype;
MonoArray *parameters;
MonoArray *generic_params;
MonoGenericContainer *generic_container;
MonoArray *pinfo;
MonoArray *opt_types;
guint32 attrs;
guint32 iattrs;
guint32 call_conv;
guint32 *table_idx; /* note: it's a pointer */
MonoArray *code;
MonoObject *type;
MonoString *name;
MonoBoolean init_locals;
MonoBoolean skip_visibility;
MonoArray *return_modreq;
MonoArray *return_modopt;
MonoArray *param_modreq;
MonoArray *param_modopt;
MonoArray *permissions;
MonoMethod *mhandle;
guint32 nrefs;
gpointer *refs;
/* for PInvoke */
int charset, extra_flags, native_cc;
MonoString *dll, *dllentry;
} ReflectionMethodBuilder;
typedef struct {
guint32 owner;
MonoReflectionGenericParam *gparam;
} GenericParamTableEntry;
const unsigned char table_sizes [MONO_TABLE_NUM] = {
MONO_MODULE_SIZE,
MONO_TYPEREF_SIZE,
MONO_TYPEDEF_SIZE,
0,
MONO_FIELD_SIZE,
0,
MONO_METHOD_SIZE,
0,
MONO_PARAM_SIZE,
MONO_INTERFACEIMPL_SIZE,
MONO_MEMBERREF_SIZE, /* 0x0A */
MONO_CONSTANT_SIZE,
MONO_CUSTOM_ATTR_SIZE,
MONO_FIELD_MARSHAL_SIZE,
MONO_DECL_SECURITY_SIZE,
MONO_CLASS_LAYOUT_SIZE,
MONO_FIELD_LAYOUT_SIZE, /* 0x10 */
MONO_STAND_ALONE_SIGNATURE_SIZE,
MONO_EVENT_MAP_SIZE,
0,
MONO_EVENT_SIZE,
MONO_PROPERTY_MAP_SIZE,
0,
MONO_PROPERTY_SIZE,
MONO_METHOD_SEMA_SIZE,
MONO_METHODIMPL_SIZE,
MONO_MODULEREF_SIZE, /* 0x1A */
MONO_TYPESPEC_SIZE,
MONO_IMPLMAP_SIZE,
MONO_FIELD_RVA_SIZE,
0,
0,
MONO_ASSEMBLY_SIZE, /* 0x20 */
MONO_ASSEMBLY_PROCESSOR_SIZE,
MONO_ASSEMBLYOS_SIZE,
MONO_ASSEMBLYREF_SIZE,
MONO_ASSEMBLYREFPROC_SIZE,
MONO_ASSEMBLYREFOS_SIZE,
MONO_FILE_SIZE,
MONO_EXP_TYPE_SIZE,
MONO_MANIFEST_SIZE,
MONO_NESTED_CLASS_SIZE,
MONO_GENERICPARAM_SIZE, /* 0x2A */
MONO_METHODSPEC_SIZE,
MONO_GENPARCONSTRAINT_SIZE
};
#ifndef DISABLE_REFLECTION_EMIT
static guint32 mono_image_get_methodref_token (MonoDynamicImage *assembly, MonoMethod *method, gboolean create_typespec);
static guint32 mono_image_get_methodbuilder_token (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb, gboolean create_open_instance);
static guint32 mono_image_get_ctorbuilder_token (MonoDynamicImage *assembly, MonoReflectionCtorBuilder *cb);
static guint32 mono_image_get_sighelper_token (MonoDynamicImage *assembly, MonoReflectionSigHelper *helper);
static void ensure_runtime_vtable (MonoClass *klass);
static gpointer resolve_object (MonoImage *image, MonoObject *obj, MonoClass **handle_class, MonoGenericContext *context);
static guint32 mono_image_get_methodref_token_for_methodbuilder (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *method);
static guint32 encode_generic_method_sig (MonoDynamicImage *assembly, MonoGenericContext *context);
static gpointer register_assembly (MonoDomain *domain, MonoReflectionAssembly *res, MonoAssembly *assembly);
static void reflection_methodbuilder_from_method_builder (ReflectionMethodBuilder *rmb, MonoReflectionMethodBuilder *mb);
static void reflection_methodbuilder_from_ctor_builder (ReflectionMethodBuilder *rmb, MonoReflectionCtorBuilder *mb);
static guint32 create_generic_typespec (MonoDynamicImage *assembly, MonoReflectionTypeBuilder *tb);
#endif
static guint32 mono_image_typedef_or_ref (MonoDynamicImage *assembly, MonoType *type);
static guint32 mono_image_typedef_or_ref_full (MonoDynamicImage *assembly, MonoType *type, gboolean try_typespec);
static void mono_image_get_generic_param_info (MonoReflectionGenericParam *gparam, guint32 owner, MonoDynamicImage *assembly);
static guint32 encode_marshal_blob (MonoDynamicImage *assembly, MonoReflectionMarshal *minfo);
static guint32 encode_constant (MonoDynamicImage *assembly, MonoObject *val, guint32 *ret_type);
static char* type_get_qualified_name (MonoType *type, MonoAssembly *ass);
static void encode_type (MonoDynamicImage *assembly, MonoType *type, SigBuffer *buf);
static void get_default_param_value_blobs (MonoMethod *method, char **blobs, guint32 *types);
static MonoReflectionType *mono_reflection_type_get_underlying_system_type (MonoReflectionType* t);
static MonoType* mono_reflection_get_type_with_rootimage (MonoImage *rootimage, MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase, gboolean *type_resolve);
static MonoReflectionType* mono_reflection_type_resolve_user_types (MonoReflectionType *type);
static gboolean is_sre_array (MonoClass *class);
static gboolean is_sre_byref (MonoClass *class);
static gboolean is_sre_pointer (MonoClass *class);
static gboolean is_sre_type_builder (MonoClass *class);
static gboolean is_sre_method_builder (MonoClass *class);
static gboolean is_sre_ctor_builder (MonoClass *class);
static gboolean is_sre_field_builder (MonoClass *class);
static gboolean is_sr_mono_method (MonoClass *class);
static gboolean is_sr_mono_cmethod (MonoClass *class);
static gboolean is_sr_mono_generic_method (MonoClass *class);
static gboolean is_sr_mono_generic_cmethod (MonoClass *class);
static gboolean is_sr_mono_field (MonoClass *class);
static gboolean is_sr_mono_property (MonoClass *class);
static gboolean is_sre_method_on_tb_inst (MonoClass *class);
static gboolean is_sre_ctor_on_tb_inst (MonoClass *class);
static guint32 mono_image_get_methodspec_token (MonoDynamicImage *assembly, MonoMethod *method);
static guint32 mono_image_get_inflated_method_token (MonoDynamicImage *assembly, MonoMethod *m);
static MonoMethod * inflate_method (MonoReflectionType *type, MonoObject *obj);
static guint32 create_typespec (MonoDynamicImage *assembly, MonoType *type);
static void init_type_builder_generics (MonoObject *type);
#define RESOLVE_TYPE(type) do { type = (void*)mono_reflection_type_resolve_user_types ((MonoReflectionType*)type); } while (0)
#define RESOLVE_ARRAY_TYPE_ELEMENT(array, index) do { \
MonoReflectionType *__type = mono_array_get (array, MonoReflectionType*, index); \
__type = mono_reflection_type_resolve_user_types (__type); \
mono_array_set (arr, MonoReflectionType*, index, __type); \
} while (0)
#define mono_type_array_get_and_resolve(array, index) mono_reflection_type_get_handle ((MonoReflectionType*)mono_array_get (array, gpointer, index))
void
mono_reflection_init (void)
{
}
static void
sigbuffer_init (SigBuffer *buf, int size)
{
buf->buf = g_malloc (size);
buf->p = buf->buf;
buf->end = buf->buf + size;
}
static void
sigbuffer_make_room (SigBuffer *buf, int size)
{
if (buf->end - buf->p < size) {
int new_size = buf->end - buf->buf + size + 32;
char *p = g_realloc (buf->buf, new_size);
size = buf->p - buf->buf;
buf->buf = p;
buf->p = p + size;
buf->end = buf->buf + new_size;
}
}
static void
sigbuffer_add_value (SigBuffer *buf, guint32 val)
{
sigbuffer_make_room (buf, 6);
mono_metadata_encode_value (val, buf->p, &buf->p);
}
static void
sigbuffer_add_byte (SigBuffer *buf, guint8 val)
{
sigbuffer_make_room (buf, 1);
buf->p [0] = val;
buf->p++;
}
static void
sigbuffer_add_mem (SigBuffer *buf, char *p, guint32 size)
{
sigbuffer_make_room (buf, size);
memcpy (buf->p, p, size);
buf->p += size;
}
static void
sigbuffer_free (SigBuffer *buf)
{
g_free (buf->buf);
}
#ifndef DISABLE_REFLECTION_EMIT
/**
* mp_g_alloc:
*
* Allocate memory from the @image mempool if it is non-NULL. Otherwise, allocate memory
* from the C heap.
*/
static gpointer
image_g_malloc (MonoImage *image, guint size)
{
if (image)
return mono_image_alloc (image, size);
else
return g_malloc (size);
}
#endif /* !DISABLE_REFLECTION_EMIT */
/**
* image_g_alloc0:
*
* Allocate memory from the @image mempool if it is non-NULL. Otherwise, allocate memory
* from the C heap.
*/
static gpointer
image_g_malloc0 (MonoImage *image, guint size)
{
if (image)
return mono_image_alloc0 (image, size);
else
return g_malloc0 (size);
}
#ifndef DISABLE_REFLECTION_EMIT
static char*
image_strdup (MonoImage *image, const char *s)
{
if (image)
return mono_image_strdup (image, s);
else
return g_strdup (s);
}
#endif
#define image_g_new(image,struct_type, n_structs) \
((struct_type *) image_g_malloc (image, ((gsize) sizeof (struct_type)) * ((gsize) (n_structs))))
#define image_g_new0(image,struct_type, n_structs) \
((struct_type *) image_g_malloc0 (image, ((gsize) sizeof (struct_type)) * ((gsize) (n_structs))))
static void
alloc_table (MonoDynamicTable *table, guint nrows)
{
table->rows = nrows;
g_assert (table->columns);
if (nrows + 1 >= table->alloc_rows) {
while (nrows + 1 >= table->alloc_rows) {
if (table->alloc_rows == 0)
table->alloc_rows = 16;
else
table->alloc_rows *= 2;
}
table->values = g_renew (guint32, table->values, (table->alloc_rows) * table->columns);
}
}
static void
make_room_in_stream (MonoDynamicStream *stream, int size)
{
if (size <= stream->alloc_size)
return;
while (stream->alloc_size <= size) {
if (stream->alloc_size < 4096)
stream->alloc_size = 4096;
else
stream->alloc_size *= 2;
}
stream->data = g_realloc (stream->data, stream->alloc_size);
}
static guint32
string_heap_insert (MonoDynamicStream *sh, const char *str)
{
guint32 idx;
guint32 len;
gpointer oldkey, oldval;
if (g_hash_table_lookup_extended (sh->hash, str, &oldkey, &oldval))
return GPOINTER_TO_UINT (oldval);
len = strlen (str) + 1;
idx = sh->index;
make_room_in_stream (sh, idx + len);
/*
* We strdup the string even if we already copy them in sh->data
* so that the string pointers in the hash remain valid even if
* we need to realloc sh->data. We may want to avoid that later.
*/
g_hash_table_insert (sh->hash, g_strdup (str), GUINT_TO_POINTER (idx));
memcpy (sh->data + idx, str, len);
sh->index += len;
return idx;
}
static guint32
string_heap_insert_mstring (MonoDynamicStream *sh, MonoString *str)
{
char *name = mono_string_to_utf8 (str);
guint32 idx;
idx = string_heap_insert (sh, name);
g_free (name);
return idx;
}
#ifndef DISABLE_REFLECTION_EMIT
static void
string_heap_init (MonoDynamicStream *sh)
{
sh->index = 0;
sh->alloc_size = 4096;
sh->data = g_malloc (4096);
sh->hash = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, NULL);
string_heap_insert (sh, "");
}
#endif
static guint32
mono_image_add_stream_data (MonoDynamicStream *stream, const char *data, guint32 len)
{
guint32 idx;
make_room_in_stream (stream, stream->index + len);
memcpy (stream->data + stream->index, data, len);
idx = stream->index;
stream->index += len;
/*
* align index? Not without adding an additional param that controls it since
* we may store a blob value in pieces.
*/
return idx;
}
static guint32
mono_image_add_stream_zero (MonoDynamicStream *stream, guint32 len)
{
guint32 idx;
make_room_in_stream (stream, stream->index + len);
memset (stream->data + stream->index, 0, len);
idx = stream->index;
stream->index += len;
return idx;
}
static void
stream_data_align (MonoDynamicStream *stream)
{
char buf [4] = {0};
guint32 count = stream->index % 4;
/* we assume the stream data will be aligned */
if (count)
mono_image_add_stream_data (stream, buf, 4 - count);
}
#ifndef DISABLE_REFLECTION_EMIT
static int
mono_blob_entry_hash (const char* str)
{
guint len, h;
const char *end;
len = mono_metadata_decode_blob_size (str, &str);
if (len > 0) {
end = str + len;
h = *str;
for (str += 1; str < end; str++)
h = (h << 5) - h + *str;
return h;
} else {
return 0;
}
}
static gboolean
mono_blob_entry_equal (const char *str1, const char *str2) {
int len, len2;
const char *end1;
const char *end2;
len = mono_metadata_decode_blob_size (str1, &end1);
len2 = mono_metadata_decode_blob_size (str2, &end2);
if (len != len2)
return 0;
return memcmp (end1, end2, len) == 0;
}
#endif
static guint32
add_to_blob_cached (MonoDynamicImage *assembly, char *b1, int s1, char *b2, int s2)
{
guint32 idx;
char *copy;
gpointer oldkey, oldval;
copy = g_malloc (s1+s2);
memcpy (copy, b1, s1);
memcpy (copy + s1, b2, s2);
if (g_hash_table_lookup_extended (assembly->blob_cache, copy, &oldkey, &oldval)) {
g_free (copy);
idx = GPOINTER_TO_UINT (oldval);
} else {
idx = mono_image_add_stream_data (&assembly->blob, b1, s1);
mono_image_add_stream_data (&assembly->blob, b2, s2);
g_hash_table_insert (assembly->blob_cache, copy, GUINT_TO_POINTER (idx));
}
return idx;
}
static guint32
sigbuffer_add_to_blob_cached (MonoDynamicImage *assembly, SigBuffer *buf)
{
char blob_size [8];
char *b = blob_size;
guint32 size = buf->p - buf->buf;
/* store length */
g_assert (size <= (buf->end - buf->buf));
mono_metadata_encode_value (size, b, &b);
return add_to_blob_cached (assembly, blob_size, b-blob_size, buf->buf, size);
}
/*
* Copy len * nelem bytes from val to dest, swapping bytes to LE if necessary.
* dest may be misaligned.
*/
static void
swap_with_size (char *dest, const char* val, int len, int nelem) {
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
int elem;
for (elem = 0; elem < nelem; ++elem) {
switch (len) {
case 1:
*dest = *val;
break;
case 2:
dest [0] = val [1];
dest [1] = val [0];
break;
case 4:
dest [0] = val [3];
dest [1] = val [2];
dest [2] = val [1];
dest [3] = val [0];
break;
case 8:
dest [0] = val [7];
dest [1] = val [6];
dest [2] = val [5];
dest [3] = val [4];
dest [4] = val [3];
dest [5] = val [2];
dest [6] = val [1];
dest [7] = val [0];
break;
default:
g_assert_not_reached ();
}
dest += len;
val += len;
}
#else
memcpy (dest, val, len * nelem);
#endif
}
static guint32
add_mono_string_to_blob_cached (MonoDynamicImage *assembly, MonoString *str)
{
char blob_size [64];
char *b = blob_size;
guint32 idx = 0, len;
len = str->length * 2;
mono_metadata_encode_value (len, b, &b);
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
{
char *swapped = g_malloc (2 * mono_string_length (str));
const char *p = (const char*)mono_string_chars (str);
swap_with_size (swapped, p, 2, mono_string_length (str));
idx = add_to_blob_cached (assembly, blob_size, b-blob_size, swapped, len);
g_free (swapped);
}
#else
idx = add_to_blob_cached (assembly, blob_size, b-blob_size, (char*)mono_string_chars (str), len);
#endif
return idx;
}
#ifndef DISABLE_REFLECTION_EMIT
static MonoClass *
default_class_from_mono_type (MonoType *type)
{
switch (type->type) {
case MONO_TYPE_OBJECT:
return mono_defaults.object_class;
case MONO_TYPE_VOID:
return mono_defaults.void_class;
case MONO_TYPE_BOOLEAN:
return mono_defaults.boolean_class;
case MONO_TYPE_CHAR:
return mono_defaults.char_class;
case MONO_TYPE_I1:
return mono_defaults.sbyte_class;
case MONO_TYPE_U1:
return mono_defaults.byte_class;
case MONO_TYPE_I2:
return mono_defaults.int16_class;
case MONO_TYPE_U2:
return mono_defaults.uint16_class;
case MONO_TYPE_I4:
return mono_defaults.int32_class;
case MONO_TYPE_U4:
return mono_defaults.uint32_class;
case MONO_TYPE_I:
return mono_defaults.int_class;
case MONO_TYPE_U:
return mono_defaults.uint_class;
case MONO_TYPE_I8:
return mono_defaults.int64_class;
case MONO_TYPE_U8:
return mono_defaults.uint64_class;
case MONO_TYPE_R4:
return mono_defaults.single_class;
case MONO_TYPE_R8:
return mono_defaults.double_class;
case MONO_TYPE_STRING:
return mono_defaults.string_class;
default:
g_warning ("default_class_from_mono_type: implement me 0x%02x\n", type->type);
g_assert_not_reached ();
}
return NULL;
}
#endif
/*
* mono_class_get_ref_info:
*
* Return the type builder/generic param builder corresponding to KLASS, if it exists.
*/
gpointer
mono_class_get_ref_info (MonoClass *klass)
{
if (klass->ref_info_handle == 0)
return NULL;
else
return mono_gchandle_get_target (klass->ref_info_handle);
}
void
mono_class_set_ref_info (MonoClass *klass, gpointer obj)
{
klass->ref_info_handle = mono_gchandle_new ((MonoObject*)obj, FALSE);
g_assert (klass->ref_info_handle != 0);
}
void
mono_class_free_ref_info (MonoClass *klass)
{
if (klass->ref_info_handle) {
mono_gchandle_free (klass->ref_info_handle);
klass->ref_info_handle = 0;
}
}
static void
encode_generic_class (MonoDynamicImage *assembly, MonoGenericClass *gclass, SigBuffer *buf)
{
int i;
MonoGenericInst *class_inst;
MonoClass *klass;
g_assert (gclass);
class_inst = gclass->context.class_inst;
sigbuffer_add_value (buf, MONO_TYPE_GENERICINST);
klass = gclass->container_class;
sigbuffer_add_value (buf, klass->byval_arg.type);
sigbuffer_add_value (buf, mono_image_typedef_or_ref_full (assembly, &klass->byval_arg, FALSE));
sigbuffer_add_value (buf, class_inst->type_argc);
for (i = 0; i < class_inst->type_argc; ++i)
encode_type (assembly, class_inst->type_argv [i], buf);
}
static void
encode_type (MonoDynamicImage *assembly, MonoType *type, SigBuffer *buf)
{
if (!type) {
g_assert_not_reached ();
return;
}
if (type->byref)
sigbuffer_add_value (buf, MONO_TYPE_BYREF);
switch (type->type){
case MONO_TYPE_VOID:
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_CHAR:
case MONO_TYPE_I1:
case MONO_TYPE_U1:
case MONO_TYPE_I2:
case MONO_TYPE_U2:
case MONO_TYPE_I4:
case MONO_TYPE_U4:
case MONO_TYPE_I8:
case MONO_TYPE_U8:
case MONO_TYPE_R4:
case MONO_TYPE_R8:
case MONO_TYPE_I:
case MONO_TYPE_U:
case MONO_TYPE_STRING:
case MONO_TYPE_OBJECT:
case MONO_TYPE_TYPEDBYREF:
sigbuffer_add_value (buf, type->type);
break;
case MONO_TYPE_PTR:
sigbuffer_add_value (buf, type->type);
encode_type (assembly, type->data.type, buf);
break;
case MONO_TYPE_SZARRAY:
sigbuffer_add_value (buf, type->type);
encode_type (assembly, &type->data.klass->byval_arg, buf);
break;
case MONO_TYPE_VALUETYPE:
case MONO_TYPE_CLASS: {
MonoClass *k = mono_class_from_mono_type (type);
if (k->generic_container) {
MonoGenericClass *gclass = mono_metadata_lookup_generic_class (k, k->generic_container->context.class_inst, TRUE);
encode_generic_class (assembly, gclass, buf);
} else {
/*
* Make sure we use the correct type.
*/
sigbuffer_add_value (buf, k->byval_arg.type);
/*
* ensure only non-byref gets passed to mono_image_typedef_or_ref(),
* otherwise two typerefs could point to the same type, leading to
* verification errors.
*/
sigbuffer_add_value (buf, mono_image_typedef_or_ref (assembly, &k->byval_arg));
}
break;
}
case MONO_TYPE_ARRAY:
sigbuffer_add_value (buf, type->type);
encode_type (assembly, &type->data.array->eklass->byval_arg, buf);
sigbuffer_add_value (buf, type->data.array->rank);
sigbuffer_add_value (buf, 0); /* FIXME: set to 0 for now */
sigbuffer_add_value (buf, 0);
break;
case MONO_TYPE_GENERICINST:
encode_generic_class (assembly, type->data.generic_class, buf);
break;
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
sigbuffer_add_value (buf, type->type);
sigbuffer_add_value (buf, mono_type_get_generic_param_num (type));
break;
default:
g_error ("need to encode type %x", type->type);
}
}
static void
encode_reflection_type (MonoDynamicImage *assembly, MonoReflectionType *type, SigBuffer *buf)
{
if (!type) {
sigbuffer_add_value (buf, MONO_TYPE_VOID);
return;
}
encode_type (assembly, mono_reflection_type_get_handle (type), buf);
}
static void
encode_custom_modifiers (MonoDynamicImage *assembly, MonoArray *modreq, MonoArray *modopt, SigBuffer *buf)
{
int i;
if (modreq) {
for (i = 0; i < mono_array_length (modreq); ++i) {
MonoType *mod = mono_type_array_get_and_resolve (modreq, i);
sigbuffer_add_byte (buf, MONO_TYPE_CMOD_REQD);
sigbuffer_add_value (buf, mono_image_typedef_or_ref (assembly, mod));
}
}
if (modopt) {
for (i = 0; i < mono_array_length (modopt); ++i) {
MonoType *mod = mono_type_array_get_and_resolve (modopt, i);
sigbuffer_add_byte (buf, MONO_TYPE_CMOD_OPT);
sigbuffer_add_value (buf, mono_image_typedef_or_ref (assembly, mod));
}
}
}
#ifndef DISABLE_REFLECTION_EMIT
static guint32
method_encode_signature (MonoDynamicImage *assembly, MonoMethodSignature *sig)
{
SigBuffer buf;
int i;
guint32 nparams = sig->param_count;
guint32 idx;
if (!assembly->save)
return 0;
sigbuffer_init (&buf, 32);
/*
* FIXME: vararg, explicit_this, differenc call_conv values...
*/
idx = sig->call_convention;
if (sig->hasthis)
idx |= 0x20; /* hasthis */
if (sig->generic_param_count)
idx |= 0x10; /* generic */
sigbuffer_add_byte (&buf, idx);
if (sig->generic_param_count)
sigbuffer_add_value (&buf, sig->generic_param_count);
sigbuffer_add_value (&buf, nparams);
encode_type (assembly, sig->ret, &buf);
for (i = 0; i < nparams; ++i) {
if (i == sig->sentinelpos)
sigbuffer_add_byte (&buf, MONO_TYPE_SENTINEL);
encode_type (assembly, sig->params [i], &buf);
}
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
#endif
static guint32
method_builder_encode_signature (MonoDynamicImage *assembly, ReflectionMethodBuilder *mb)
{
/*
* FIXME: reuse code from method_encode_signature().
*/
SigBuffer buf;
int i;
guint32 nparams = mb->parameters ? mono_array_length (mb->parameters): 0;
guint32 ngparams = mb->generic_params ? mono_array_length (mb->generic_params): 0;
guint32 notypes = mb->opt_types ? mono_array_length (mb->opt_types): 0;
guint32 idx;
sigbuffer_init (&buf, 32);
/* LAMESPEC: all the call conv spec is foobared */
idx = mb->call_conv & 0x60; /* has-this, explicit-this */
if (mb->call_conv & 2)
idx |= 0x5; /* vararg */
if (!(mb->attrs & METHOD_ATTRIBUTE_STATIC))
idx |= 0x20; /* hasthis */
if (ngparams)
idx |= 0x10; /* generic */
sigbuffer_add_byte (&buf, idx);
if (ngparams)
sigbuffer_add_value (&buf, ngparams);
sigbuffer_add_value (&buf, nparams + notypes);
encode_custom_modifiers (assembly, mb->return_modreq, mb->return_modopt, &buf);
encode_reflection_type (assembly, mb->rtype, &buf);
for (i = 0; i < nparams; ++i) {
MonoArray *modreq = NULL;
MonoArray *modopt = NULL;
MonoReflectionType *pt;
if (mb->param_modreq && (i < mono_array_length (mb->param_modreq)))
modreq = mono_array_get (mb->param_modreq, MonoArray*, i);
if (mb->param_modopt && (i < mono_array_length (mb->param_modopt)))
modopt = mono_array_get (mb->param_modopt, MonoArray*, i);
encode_custom_modifiers (assembly, modreq, modopt, &buf);
pt = mono_array_get (mb->parameters, MonoReflectionType*, i);
encode_reflection_type (assembly, pt, &buf);
}
if (notypes)
sigbuffer_add_byte (&buf, MONO_TYPE_SENTINEL);
for (i = 0; i < notypes; ++i) {
MonoReflectionType *pt;
pt = mono_array_get (mb->opt_types, MonoReflectionType*, i);
encode_reflection_type (assembly, pt, &buf);
}
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
static guint32
encode_locals (MonoDynamicImage *assembly, MonoReflectionILGen *ilgen)
{
MonoDynamicTable *table;
guint32 *values;
guint32 idx, sig_idx;
guint nl = mono_array_length (ilgen->locals);
SigBuffer buf;
int i;
sigbuffer_init (&buf, 32);
sigbuffer_add_value (&buf, 0x07);
sigbuffer_add_value (&buf, nl);
for (i = 0; i < nl; ++i) {
MonoReflectionLocalBuilder *lb = mono_array_get (ilgen->locals, MonoReflectionLocalBuilder*, i);
if (lb->is_pinned)
sigbuffer_add_value (&buf, MONO_TYPE_PINNED);
encode_reflection_type (assembly, (MonoReflectionType*)lb->type, &buf);
}
sig_idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
if (assembly->standalonesig_cache == NULL)
assembly->standalonesig_cache = g_hash_table_new (NULL, NULL);
idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->standalonesig_cache, GUINT_TO_POINTER (sig_idx)));
if (idx)
return idx;
table = &assembly->tables [MONO_TABLE_STANDALONESIG];
idx = table->next_idx ++;
table->rows ++;
alloc_table (table, table->rows);
values = table->values + idx * MONO_STAND_ALONE_SIGNATURE_SIZE;
values [MONO_STAND_ALONE_SIGNATURE] = sig_idx;
g_hash_table_insert (assembly->standalonesig_cache, GUINT_TO_POINTER (sig_idx), GUINT_TO_POINTER (idx));
return idx;
}
static guint32
method_count_clauses (MonoReflectionILGen *ilgen)
{
guint32 num_clauses = 0;
int i;
MonoILExceptionInfo *ex_info;
for (i = 0; i < mono_array_length (ilgen->ex_handlers); ++i) {
ex_info = (MonoILExceptionInfo*)mono_array_addr (ilgen->ex_handlers, MonoILExceptionInfo, i);
if (ex_info->handlers)
num_clauses += mono_array_length (ex_info->handlers);
else
num_clauses++;
}
return num_clauses;
}
#ifndef DISABLE_REFLECTION_EMIT
static MonoExceptionClause*
method_encode_clauses (MonoImage *image, MonoDynamicImage *assembly, MonoReflectionILGen *ilgen, guint32 num_clauses)
{
MonoExceptionClause *clauses;
MonoExceptionClause *clause;
MonoILExceptionInfo *ex_info;
MonoILExceptionBlock *ex_block;
guint32 finally_start;
int i, j, clause_index;;
clauses = image_g_new0 (image, MonoExceptionClause, num_clauses);
clause_index = 0;
for (i = mono_array_length (ilgen->ex_handlers) - 1; i >= 0; --i) {
ex_info = (MonoILExceptionInfo*)mono_array_addr (ilgen->ex_handlers, MonoILExceptionInfo, i);
finally_start = ex_info->start + ex_info->len;
if (!ex_info->handlers)
continue;
for (j = 0; j < mono_array_length (ex_info->handlers); ++j) {
ex_block = (MonoILExceptionBlock*)mono_array_addr (ex_info->handlers, MonoILExceptionBlock, j);
clause = &(clauses [clause_index]);
clause->flags = ex_block->type;
clause->try_offset = ex_info->start;
if (ex_block->type == MONO_EXCEPTION_CLAUSE_FINALLY)
clause->try_len = finally_start - ex_info->start;
else
clause->try_len = ex_info->len;
clause->handler_offset = ex_block->start;
clause->handler_len = ex_block->len;
if (ex_block->extype) {
clause->data.catch_class = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)ex_block->extype));
} else {
if (ex_block->type == MONO_EXCEPTION_CLAUSE_FILTER)
clause->data.filter_offset = ex_block->filter_offset;
else
clause->data.filter_offset = 0;
}
finally_start = ex_block->start + ex_block->len;
clause_index ++;
}
}
return clauses;
}
#endif /* !DISABLE_REFLECTION_EMIT */
static guint32
method_encode_code (MonoDynamicImage *assembly, ReflectionMethodBuilder *mb)
{
char flags = 0;
guint32 idx;
guint32 code_size;
gint32 max_stack, i;
gint32 num_locals = 0;
gint32 num_exception = 0;
gint maybe_small;
guint32 fat_flags;
char fat_header [12];
guint32 int_value;
guint16 short_value;
guint32 local_sig = 0;
guint32 header_size = 12;
MonoArray *code;
if ((mb->attrs & (METHOD_ATTRIBUTE_PINVOKE_IMPL | METHOD_ATTRIBUTE_ABSTRACT)) ||
(mb->iattrs & (METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL | METHOD_IMPL_ATTRIBUTE_RUNTIME)))
return 0;
/*if (mb->name)
g_print ("Encode method %s\n", mono_string_to_utf8 (mb->name));*/
if (mb->ilgen) {
code = mb->ilgen->code;
code_size = mb->ilgen->code_len;
max_stack = mb->ilgen->max_stack;
num_locals = mb->ilgen->locals ? mono_array_length (mb->ilgen->locals) : 0;
if (mb->ilgen->ex_handlers)
num_exception = method_count_clauses (mb->ilgen);
} else {
code = mb->code;
if (code == NULL){
char *name = mono_string_to_utf8 (mb->name);
char *str = g_strdup_printf ("Method %s does not have any IL associated", name);
MonoException *exception = mono_get_exception_argument (NULL, "a method does not have any IL associated");
g_free (str);
g_free (name);
mono_raise_exception (exception);
}
code_size = mono_array_length (code);
max_stack = 8; /* we probably need to run a verifier on the code... */
}
stream_data_align (&assembly->code);
/* check for exceptions, maxstack, locals */
maybe_small = (max_stack <= 8) && (!num_locals) && (!num_exception);
if (maybe_small) {
if (code_size < 64 && !(code_size & 1)) {
flags = (code_size << 2) | 0x2;
} else if (code_size < 32 && (code_size & 1)) {
flags = (code_size << 2) | 0x6; /* LAMESPEC: see metadata.c */
} else {
goto fat_header;
}
idx = mono_image_add_stream_data (&assembly->code, &flags, 1);
/* add to the fixup todo list */
if (mb->ilgen && mb->ilgen->num_token_fixups)
mono_g_hash_table_insert (assembly->token_fixups, mb->ilgen, GUINT_TO_POINTER (idx + 1));
mono_image_add_stream_data (&assembly->code, mono_array_addr (code, char, 0), code_size);
return assembly->text_rva + idx;
}
fat_header:
if (num_locals)
local_sig = MONO_TOKEN_SIGNATURE | encode_locals (assembly, mb->ilgen);
/*
* FIXME: need to set also the header size in fat_flags.
* (and more sects and init locals flags)
*/
fat_flags = 0x03;
if (num_exception)
fat_flags |= METHOD_HEADER_MORE_SECTS;
if (mb->init_locals)
fat_flags |= METHOD_HEADER_INIT_LOCALS;
fat_header [0] = fat_flags;
fat_header [1] = (header_size / 4 ) << 4;
short_value = GUINT16_TO_LE (max_stack);
memcpy (fat_header + 2, &short_value, 2);
int_value = GUINT32_TO_LE (code_size);
memcpy (fat_header + 4, &int_value, 4);
int_value = GUINT32_TO_LE (local_sig);
memcpy (fat_header + 8, &int_value, 4);
idx = mono_image_add_stream_data (&assembly->code, fat_header, 12);
/* add to the fixup todo list */
if (mb->ilgen && mb->ilgen->num_token_fixups)
mono_g_hash_table_insert (assembly->token_fixups, mb->ilgen, GUINT_TO_POINTER (idx + 12));
mono_image_add_stream_data (&assembly->code, mono_array_addr (code, char, 0), code_size);
if (num_exception) {
unsigned char sheader [4];
MonoILExceptionInfo * ex_info;
MonoILExceptionBlock * ex_block;
int j;
stream_data_align (&assembly->code);
/* always use fat format for now */
sheader [0] = METHOD_HEADER_SECTION_FAT_FORMAT | METHOD_HEADER_SECTION_EHTABLE;
num_exception *= 6 * sizeof (guint32);
num_exception += 4; /* include the size of the header */
sheader [1] = num_exception & 0xff;
sheader [2] = (num_exception >> 8) & 0xff;
sheader [3] = (num_exception >> 16) & 0xff;
mono_image_add_stream_data (&assembly->code, (char*)sheader, 4);
/* fat header, so we are already aligned */
/* reverse order */
for (i = mono_array_length (mb->ilgen->ex_handlers) - 1; i >= 0; --i) {
ex_info = (MonoILExceptionInfo *)mono_array_addr (mb->ilgen->ex_handlers, MonoILExceptionInfo, i);
if (ex_info->handlers) {
int finally_start = ex_info->start + ex_info->len;
for (j = 0; j < mono_array_length (ex_info->handlers); ++j) {
guint32 val;
ex_block = (MonoILExceptionBlock*)mono_array_addr (ex_info->handlers, MonoILExceptionBlock, j);
/* the flags */
val = GUINT32_TO_LE (ex_block->type);
mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32));
/* try offset */
val = GUINT32_TO_LE (ex_info->start);
mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32));
/* need fault, too, probably */
if (ex_block->type == MONO_EXCEPTION_CLAUSE_FINALLY)
val = GUINT32_TO_LE (finally_start - ex_info->start);
else
val = GUINT32_TO_LE (ex_info->len);
mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32));
/* handler offset */
val = GUINT32_TO_LE (ex_block->start);
mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32));
/* handler len */
val = GUINT32_TO_LE (ex_block->len);
mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32));
finally_start = ex_block->start + ex_block->len;
if (ex_block->extype) {
val = mono_metadata_token_from_dor (mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)ex_block->extype)));
} else {
if (ex_block->type == MONO_EXCEPTION_CLAUSE_FILTER)
val = ex_block->filter_offset;
else
val = 0;
}
val = GUINT32_TO_LE (val);
mono_image_add_stream_data (&assembly->code, (char*)&val, sizeof (guint32));
/*g_print ("out clause %d: from %d len=%d, handler at %d, %d, finally_start=%d, ex_info->start=%d, ex_info->len=%d, ex_block->type=%d, j=%d, i=%d\n",
clause.flags, clause.try_offset, clause.try_len, clause.handler_offset, clause.handler_len, finally_start, ex_info->start, ex_info->len, ex_block->type, j, i);*/
}
} else {
g_error ("No clauses for ex info block %d", i);
}
}
}
return assembly->text_rva + idx;
}
static guint32
find_index_in_table (MonoDynamicImage *assembly, int table_idx, int col, guint32 token)
{
int i;
MonoDynamicTable *table;
guint32 *values;
table = &assembly->tables [table_idx];
g_assert (col < table->columns);
values = table->values + table->columns;
for (i = 1; i <= table->rows; ++i) {
if (values [col] == token)
return i;
values += table->columns;
}
return 0;
}
/*
* LOCKING: Acquires the loader lock.
*/
static MonoCustomAttrInfo*
lookup_custom_attr (MonoImage *image, gpointer member)
{
MonoCustomAttrInfo* res;
res = mono_image_property_lookup (image, member, MONO_PROP_DYNAMIC_CATTR);
if (!res)
return NULL;
res = g_memdup (res, MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * res->num_attrs);
res->cached = 0;
return res;
}
static gboolean
custom_attr_visible (MonoImage *image, MonoReflectionCustomAttr *cattr)
{
/* FIXME: Need to do more checks */
if (cattr->ctor->method && (cattr->ctor->method->klass->image != image)) {
int visibility = cattr->ctor->method->klass->flags & TYPE_ATTRIBUTE_VISIBILITY_MASK;
if ((visibility != TYPE_ATTRIBUTE_PUBLIC) && (visibility != TYPE_ATTRIBUTE_NESTED_PUBLIC))
return FALSE;
}
return TRUE;
}
static MonoCustomAttrInfo*
mono_custom_attrs_from_builders (MonoImage *alloc_img, MonoImage *image, MonoArray *cattrs)
{
int i, index, count, not_visible;
MonoCustomAttrInfo *ainfo;
MonoReflectionCustomAttr *cattr;
if (!cattrs)
return NULL;
/* FIXME: check in assembly the Run flag is set */
count = mono_array_length (cattrs);
/* Skip nonpublic attributes since MS.NET seems to do the same */
/* FIXME: This needs to be done more globally */
not_visible = 0;
for (i = 0; i < count; ++i) {
cattr = (MonoReflectionCustomAttr*)mono_array_get (cattrs, gpointer, i);
if (!custom_attr_visible (image, cattr))
not_visible ++;
}
count -= not_visible;
ainfo = image_g_malloc0 (alloc_img, MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * count);
ainfo->image = image;
ainfo->num_attrs = count;
ainfo->cached = alloc_img != NULL;
index = 0;
for (i = 0; i < count; ++i) {
cattr = (MonoReflectionCustomAttr*)mono_array_get (cattrs, gpointer, i);
if (custom_attr_visible (image, cattr)) {
unsigned char *saved = mono_image_alloc (image, mono_array_length (cattr->data));
memcpy (saved, mono_array_addr (cattr->data, char, 0), mono_array_length (cattr->data));
ainfo->attrs [index].ctor = cattr->ctor->method;
ainfo->attrs [index].data = saved;
ainfo->attrs [index].data_size = mono_array_length (cattr->data);
index ++;
}
}
return ainfo;
}
#ifndef DISABLE_REFLECTION_EMIT
/*
* LOCKING: Acquires the loader lock.
*/
static void
mono_save_custom_attrs (MonoImage *image, void *obj, MonoArray *cattrs)
{
MonoCustomAttrInfo *ainfo, *tmp;
if (!cattrs || !mono_array_length (cattrs))
return;
ainfo = mono_custom_attrs_from_builders (image, image, cattrs);
mono_loader_lock ();
tmp = mono_image_property_lookup (image, obj, MONO_PROP_DYNAMIC_CATTR);
if (tmp)
mono_custom_attrs_free (tmp);
mono_image_property_insert (image, obj, MONO_PROP_DYNAMIC_CATTR, ainfo);
mono_loader_unlock ();
}
#endif
void
mono_custom_attrs_free (MonoCustomAttrInfo *ainfo)
{
if (!ainfo->cached)
g_free (ainfo);
}
/*
* idx is the table index of the object
* type is one of MONO_CUSTOM_ATTR_*
*/
static void
mono_image_add_cattrs (MonoDynamicImage *assembly, guint32 idx, guint32 type, MonoArray *cattrs)
{
MonoDynamicTable *table;
MonoReflectionCustomAttr *cattr;
guint32 *values;
guint32 count, i, token;
char blob_size [6];
char *p = blob_size;
/* it is legal to pass a NULL cattrs: we avoid to use the if in a lot of places */
if (!cattrs)
return;
count = mono_array_length (cattrs);
table = &assembly->tables [MONO_TABLE_CUSTOMATTRIBUTE];
table->rows += count;
alloc_table (table, table->rows);
values = table->values + table->next_idx * MONO_CUSTOM_ATTR_SIZE;
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= type;
for (i = 0; i < count; ++i) {
cattr = (MonoReflectionCustomAttr*)mono_array_get (cattrs, gpointer, i);
values [MONO_CUSTOM_ATTR_PARENT] = idx;
token = mono_image_create_token (assembly, (MonoObject*)cattr->ctor, FALSE, FALSE);
type = mono_metadata_token_index (token);
type <<= MONO_CUSTOM_ATTR_TYPE_BITS;
switch (mono_metadata_token_table (token)) {
case MONO_TABLE_METHOD:
type |= MONO_CUSTOM_ATTR_TYPE_METHODDEF;
break;
case MONO_TABLE_MEMBERREF:
type |= MONO_CUSTOM_ATTR_TYPE_MEMBERREF;
break;
default:
g_warning ("got wrong token in custom attr");
continue;
}
values [MONO_CUSTOM_ATTR_TYPE] = type;
p = blob_size;
mono_metadata_encode_value (mono_array_length (cattr->data), p, &p);
values [MONO_CUSTOM_ATTR_VALUE] = add_to_blob_cached (assembly, blob_size, p - blob_size,
mono_array_addr (cattr->data, char, 0), mono_array_length (cattr->data));
values += MONO_CUSTOM_ATTR_SIZE;
++table->next_idx;
}
}
static void
mono_image_add_decl_security (MonoDynamicImage *assembly, guint32 parent_token, MonoArray *permissions)
{
MonoDynamicTable *table;
guint32 *values;
guint32 count, i, idx;
MonoReflectionPermissionSet *perm;
if (!permissions)
return;
count = mono_array_length (permissions);
table = &assembly->tables [MONO_TABLE_DECLSECURITY];
table->rows += count;
alloc_table (table, table->rows);
for (i = 0; i < mono_array_length (permissions); ++i) {
perm = (MonoReflectionPermissionSet*)mono_array_addr (permissions, MonoReflectionPermissionSet, i);
values = table->values + table->next_idx * MONO_DECL_SECURITY_SIZE;
idx = mono_metadata_token_index (parent_token);
idx <<= MONO_HAS_DECL_SECURITY_BITS;
switch (mono_metadata_token_table (parent_token)) {
case MONO_TABLE_TYPEDEF:
idx |= MONO_HAS_DECL_SECURITY_TYPEDEF;
break;
case MONO_TABLE_METHOD:
idx |= MONO_HAS_DECL_SECURITY_METHODDEF;
break;
case MONO_TABLE_ASSEMBLY:
idx |= MONO_HAS_DECL_SECURITY_ASSEMBLY;
break;
default:
g_assert_not_reached ();
}
values [MONO_DECL_SECURITY_ACTION] = perm->action;
values [MONO_DECL_SECURITY_PARENT] = idx;
values [MONO_DECL_SECURITY_PERMISSIONSET] = add_mono_string_to_blob_cached (assembly, perm->pset);
++table->next_idx;
}
}
/*
* Fill in the MethodDef and ParamDef tables for a method.
* This is used for both normal methods and constructors.
*/
static void
mono_image_basic_method (ReflectionMethodBuilder *mb, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
guint i, count;
/* room in this table is already allocated */
table = &assembly->tables [MONO_TABLE_METHOD];
*mb->table_idx = table->next_idx ++;
g_hash_table_insert (assembly->method_to_table_idx, mb->mhandle, GUINT_TO_POINTER ((*mb->table_idx)));
values = table->values + *mb->table_idx * MONO_METHOD_SIZE;
values [MONO_METHOD_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->name);
values [MONO_METHOD_FLAGS] = mb->attrs;
values [MONO_METHOD_IMPLFLAGS] = mb->iattrs;
values [MONO_METHOD_SIGNATURE] = method_builder_encode_signature (assembly, mb);
values [MONO_METHOD_RVA] = method_encode_code (assembly, mb);
table = &assembly->tables [MONO_TABLE_PARAM];
values [MONO_METHOD_PARAMLIST] = table->next_idx;
mono_image_add_decl_security (assembly,
mono_metadata_make_token (MONO_TABLE_METHOD, *mb->table_idx), mb->permissions);
if (mb->pinfo) {
MonoDynamicTable *mtable;
guint32 *mvalues;
mtable = &assembly->tables [MONO_TABLE_FIELDMARSHAL];
mvalues = mtable->values + mtable->next_idx * MONO_FIELD_MARSHAL_SIZE;
count = 0;
for (i = 0; i < mono_array_length (mb->pinfo); ++i) {
if (mono_array_get (mb->pinfo, gpointer, i))
count++;
}
table->rows += count;
alloc_table (table, table->rows);
values = table->values + table->next_idx * MONO_PARAM_SIZE;
for (i = 0; i < mono_array_length (mb->pinfo); ++i) {
MonoReflectionParamBuilder *pb;
if ((pb = mono_array_get (mb->pinfo, MonoReflectionParamBuilder*, i))) {
values [MONO_PARAM_FLAGS] = pb->attrs;
values [MONO_PARAM_SEQUENCE] = i;
if (pb->name != NULL) {
values [MONO_PARAM_NAME] = string_heap_insert_mstring (&assembly->sheap, pb->name);
} else {
values [MONO_PARAM_NAME] = 0;
}
values += MONO_PARAM_SIZE;
if (pb->marshal_info) {
mtable->rows++;
alloc_table (mtable, mtable->rows);
mvalues = mtable->values + mtable->rows * MONO_FIELD_MARSHAL_SIZE;
mvalues [MONO_FIELD_MARSHAL_PARENT] = (table->next_idx << MONO_HAS_FIELD_MARSHAL_BITS) | MONO_HAS_FIELD_MARSHAL_PARAMDEF;
mvalues [MONO_FIELD_MARSHAL_NATIVE_TYPE] = encode_marshal_blob (assembly, pb->marshal_info);
}
pb->table_idx = table->next_idx++;
if (pb->attrs & PARAM_ATTRIBUTE_HAS_DEFAULT) {
guint32 field_type = 0;
mtable = &assembly->tables [MONO_TABLE_CONSTANT];
mtable->rows ++;
alloc_table (mtable, mtable->rows);
mvalues = mtable->values + mtable->rows * MONO_CONSTANT_SIZE;
mvalues [MONO_CONSTANT_PARENT] = MONO_HASCONSTANT_PARAM | (pb->table_idx << MONO_HASCONSTANT_BITS);
mvalues [MONO_CONSTANT_VALUE] = encode_constant (assembly, pb->def_value, &field_type);
mvalues [MONO_CONSTANT_TYPE] = field_type;
mvalues [MONO_CONSTANT_PADDING] = 0;
}
}
}
}
}
#ifndef DISABLE_REFLECTION_EMIT
static void
reflection_methodbuilder_from_method_builder (ReflectionMethodBuilder *rmb, MonoReflectionMethodBuilder *mb)
{
memset (rmb, 0, sizeof (ReflectionMethodBuilder));
rmb->ilgen = mb->ilgen;
rmb->rtype = mono_reflection_type_resolve_user_types ((MonoReflectionType*)mb->rtype);
rmb->parameters = mb->parameters;
rmb->generic_params = mb->generic_params;
rmb->generic_container = mb->generic_container;
rmb->opt_types = NULL;
rmb->pinfo = mb->pinfo;
rmb->attrs = mb->attrs;
rmb->iattrs = mb->iattrs;
rmb->call_conv = mb->call_conv;
rmb->code = mb->code;
rmb->type = mb->type;
rmb->name = mb->name;
rmb->table_idx = &mb->table_idx;
rmb->init_locals = mb->init_locals;
rmb->skip_visibility = FALSE;
rmb->return_modreq = mb->return_modreq;
rmb->return_modopt = mb->return_modopt;
rmb->param_modreq = mb->param_modreq;
rmb->param_modopt = mb->param_modopt;
rmb->permissions = mb->permissions;
rmb->mhandle = mb->mhandle;
rmb->nrefs = 0;
rmb->refs = NULL;
if (mb->dll) {
rmb->charset = mb->charset;
rmb->extra_flags = mb->extra_flags;
rmb->native_cc = mb->native_cc;
rmb->dllentry = mb->dllentry;
rmb->dll = mb->dll;
}
}
static void
reflection_methodbuilder_from_ctor_builder (ReflectionMethodBuilder *rmb, MonoReflectionCtorBuilder *mb)
{
const char *name = mb->attrs & METHOD_ATTRIBUTE_STATIC ? ".cctor": ".ctor";
memset (rmb, 0, sizeof (ReflectionMethodBuilder));
rmb->ilgen = mb->ilgen;
rmb->rtype = mono_type_get_object (mono_domain_get (), &mono_defaults.void_class->byval_arg);
rmb->parameters = mb->parameters;
rmb->generic_params = NULL;
rmb->generic_container = NULL;
rmb->opt_types = NULL;
rmb->pinfo = mb->pinfo;
rmb->attrs = mb->attrs;
rmb->iattrs = mb->iattrs;
rmb->call_conv = mb->call_conv;
rmb->code = NULL;
rmb->type = mb->type;
rmb->name = mono_string_new (mono_domain_get (), name);
rmb->table_idx = &mb->table_idx;
rmb->init_locals = mb->init_locals;
rmb->skip_visibility = FALSE;
rmb->return_modreq = NULL;
rmb->return_modopt = NULL;
rmb->param_modreq = mb->param_modreq;
rmb->param_modopt = mb->param_modopt;
rmb->permissions = mb->permissions;
rmb->mhandle = mb->mhandle;
rmb->nrefs = 0;
rmb->refs = NULL;
}
static void
reflection_methodbuilder_from_dynamic_method (ReflectionMethodBuilder *rmb, MonoReflectionDynamicMethod *mb)
{
memset (rmb, 0, sizeof (ReflectionMethodBuilder));
rmb->ilgen = mb->ilgen;
rmb->rtype = mb->rtype;
rmb->parameters = mb->parameters;
rmb->generic_params = NULL;
rmb->generic_container = NULL;
rmb->opt_types = NULL;
rmb->pinfo = NULL;
rmb->attrs = mb->attrs;
rmb->iattrs = 0;
rmb->call_conv = mb->call_conv;
rmb->code = NULL;
rmb->type = (MonoObject *) mb->owner;
rmb->name = mb->name;
rmb->table_idx = NULL;
rmb->init_locals = mb->init_locals;
rmb->skip_visibility = mb->skip_visibility;
rmb->return_modreq = NULL;
rmb->return_modopt = NULL;
rmb->param_modreq = NULL;
rmb->param_modopt = NULL;
rmb->permissions = NULL;
rmb->mhandle = mb->mhandle;
rmb->nrefs = 0;
rmb->refs = NULL;
}
#endif
static void
mono_image_add_methodimpl (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb)
{
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)mb->type;
MonoDynamicTable *table;
guint32 *values;
guint32 tok;
if (!mb->override_method)
return;
table = &assembly->tables [MONO_TABLE_METHODIMPL];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_METHODIMPL_SIZE;
values [MONO_METHODIMPL_CLASS] = tb->table_idx;
values [MONO_METHODIMPL_BODY] = MONO_METHODDEFORREF_METHODDEF | (mb->table_idx << MONO_METHODDEFORREF_BITS);
tok = mono_image_create_token (assembly, (MonoObject*)mb->override_method, FALSE, FALSE);
switch (mono_metadata_token_table (tok)) {
case MONO_TABLE_MEMBERREF:
tok = (mono_metadata_token_index (tok) << MONO_METHODDEFORREF_BITS ) | MONO_METHODDEFORREF_METHODREF;
break;
case MONO_TABLE_METHOD:
tok = (mono_metadata_token_index (tok) << MONO_METHODDEFORREF_BITS ) | MONO_METHODDEFORREF_METHODDEF;
break;
default:
g_assert_not_reached ();
}
values [MONO_METHODIMPL_DECLARATION] = tok;
}
#ifndef DISABLE_REFLECTION_EMIT
static void
mono_image_get_method_info (MonoReflectionMethodBuilder *mb, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
ReflectionMethodBuilder rmb;
int i;
reflection_methodbuilder_from_method_builder (&rmb, mb);
mono_image_basic_method (&rmb, assembly);
mb->table_idx = *rmb.table_idx;
if (mb->dll) { /* It's a P/Invoke method */
guint32 moduleref;
/* map CharSet values to on-disk values */
int ncharset = (mb->charset ? (mb->charset - 1) * 2 : 0);
int extra_flags = mb->extra_flags;
table = &assembly->tables [MONO_TABLE_IMPLMAP];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_IMPLMAP_SIZE;
values [MONO_IMPLMAP_FLAGS] = (mb->native_cc << 8) | ncharset | extra_flags;
values [MONO_IMPLMAP_MEMBER] = (mb->table_idx << 1) | 1; /* memberforwarded: method */
if (mb->dllentry)
values [MONO_IMPLMAP_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->dllentry);
else
values [MONO_IMPLMAP_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->name);
moduleref = string_heap_insert_mstring (&assembly->sheap, mb->dll);
if (!(values [MONO_IMPLMAP_SCOPE] = find_index_in_table (assembly, MONO_TABLE_MODULEREF, MONO_MODULEREF_NAME, moduleref))) {
table = &assembly->tables [MONO_TABLE_MODULEREF];
table->rows ++;
alloc_table (table, table->rows);
table->values [table->rows * MONO_MODULEREF_SIZE + MONO_MODULEREF_NAME] = moduleref;
values [MONO_IMPLMAP_SCOPE] = table->rows;
}
}
if (mb->generic_params) {
table = &assembly->tables [MONO_TABLE_GENERICPARAM];
table->rows += mono_array_length (mb->generic_params);
alloc_table (table, table->rows);
for (i = 0; i < mono_array_length (mb->generic_params); ++i) {
guint32 owner = MONO_TYPEORMETHOD_METHOD | (mb->table_idx << MONO_TYPEORMETHOD_BITS);
mono_image_get_generic_param_info (
mono_array_get (mb->generic_params, gpointer, i), owner, assembly);
}
}
}
static void
mono_image_get_ctor_info (MonoDomain *domain, MonoReflectionCtorBuilder *mb, MonoDynamicImage *assembly)
{
ReflectionMethodBuilder rmb;
reflection_methodbuilder_from_ctor_builder (&rmb, mb);
mono_image_basic_method (&rmb, assembly);
mb->table_idx = *rmb.table_idx;
}
#endif
static char*
type_get_fully_qualified_name (MonoType *type)
{
return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED);
}
static char*
type_get_qualified_name (MonoType *type, MonoAssembly *ass) {
MonoClass *klass;
MonoAssembly *ta;
klass = mono_class_from_mono_type (type);
if (!klass)
return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_REFLECTION);
ta = klass->image->assembly;
if (ta->dynamic || (ta == ass)) {
if (klass->generic_class || klass->generic_container)
/* For generic type definitions, we want T, while REFLECTION returns T<K> */
return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_FULL_NAME);
else
return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_REFLECTION);
}
return mono_type_get_name_full (type, MONO_TYPE_NAME_FORMAT_ASSEMBLY_QUALIFIED);
}
#ifndef DISABLE_REFLECTION_EMIT
/*field_image is the image to which the eventual custom mods have been encoded against*/
static guint32
fieldref_encode_signature (MonoDynamicImage *assembly, MonoImage *field_image, MonoType *type)
{
SigBuffer buf;
guint32 idx, i, token;
if (!assembly->save)
return 0;
sigbuffer_init (&buf, 32);
sigbuffer_add_value (&buf, 0x06);
/* encode custom attributes before the type */
if (type->num_mods) {
for (i = 0; i < type->num_mods; ++i) {
if (field_image) {
MonoClass *class = mono_class_get (field_image, type->modifiers [i].token);
g_assert (class);
token = mono_image_typedef_or_ref (assembly, &class->byval_arg);
} else {
token = type->modifiers [i].token;
}
if (type->modifiers [i].required)
sigbuffer_add_byte (&buf, MONO_TYPE_CMOD_REQD);
else
sigbuffer_add_byte (&buf, MONO_TYPE_CMOD_OPT);
sigbuffer_add_value (&buf, token);
}
}
encode_type (assembly, type, &buf);
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
#endif
static guint32
field_encode_signature (MonoDynamicImage *assembly, MonoReflectionFieldBuilder *fb)
{
SigBuffer buf;
guint32 idx;
guint32 typespec = 0;
MonoType *type;
MonoClass *class;
init_type_builder_generics (fb->type);
type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type);
class = mono_class_from_mono_type (type);
sigbuffer_init (&buf, 32);
sigbuffer_add_value (&buf, 0x06);
encode_custom_modifiers (assembly, fb->modreq, fb->modopt, &buf);
/* encode custom attributes before the type */
if (class->generic_container)
typespec = create_typespec (assembly, type);
if (typespec) {
MonoGenericClass *gclass;
gclass = mono_metadata_lookup_generic_class (class, class->generic_container->context.class_inst, TRUE);
encode_generic_class (assembly, gclass, &buf);
} else {
encode_type (assembly, type, &buf);
}
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
static guint32
encode_constant (MonoDynamicImage *assembly, MonoObject *val, guint32 *ret_type) {
char blob_size [64];
char *b = blob_size;
char *p, *box_val;
char* buf;
guint32 idx = 0, len = 0, dummy = 0;
#ifdef ARM_FPU_FPA
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
guint32 fpa_double [2];
guint32 *fpa_p;
#endif
#endif
p = buf = g_malloc (64);
if (!val) {
*ret_type = MONO_TYPE_CLASS;
len = 4;
box_val = (char*)&dummy;
} else {
box_val = ((char*)val) + sizeof (MonoObject);
*ret_type = val->vtable->klass->byval_arg.type;
}
handle_enum:
switch (*ret_type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U1:
case MONO_TYPE_I1:
len = 1;
break;
case MONO_TYPE_CHAR:
case MONO_TYPE_U2:
case MONO_TYPE_I2:
len = 2;
break;
case MONO_TYPE_U4:
case MONO_TYPE_I4:
case MONO_TYPE_R4:
len = 4;
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
len = 8;
break;
case MONO_TYPE_R8:
len = 8;
#ifdef ARM_FPU_FPA
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
fpa_p = (guint32*)box_val;
fpa_double [0] = fpa_p [1];
fpa_double [1] = fpa_p [0];
box_val = (char*)fpa_double;
#endif
#endif
break;
case MONO_TYPE_VALUETYPE: {
MonoClass *klass = val->vtable->klass;
if (klass->enumtype) {
*ret_type = mono_class_enum_basetype (klass)->type;
goto handle_enum;
} else if (mono_is_corlib_image (klass->image) && strcmp (klass->name_space, "System") == 0 && strcmp (klass->name, "DateTime") == 0) {
len = 8;
} else
g_error ("we can't encode valuetypes, we should have never reached this line");
break;
}
case MONO_TYPE_CLASS:
break;
case MONO_TYPE_STRING: {
MonoString *str = (MonoString*)val;
/* there is no signature */
len = str->length * 2;
mono_metadata_encode_value (len, b, &b);
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
{
char *swapped = g_malloc (2 * mono_string_length (str));
const char *p = (const char*)mono_string_chars (str);
swap_with_size (swapped, p, 2, mono_string_length (str));
idx = add_to_blob_cached (assembly, blob_size, b-blob_size, swapped, len);
g_free (swapped);
}
#else
idx = add_to_blob_cached (assembly, blob_size, b-blob_size, (char*)mono_string_chars (str), len);
#endif
g_free (buf);
return idx;
}
case MONO_TYPE_GENERICINST:
*ret_type = val->vtable->klass->generic_class->container_class->byval_arg.type;
goto handle_enum;
default:
g_error ("we don't encode constant type 0x%02x yet", *ret_type);
}
/* there is no signature */
mono_metadata_encode_value (len, b, &b);
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
idx = mono_image_add_stream_data (&assembly->blob, blob_size, b-blob_size);
swap_with_size (blob_size, box_val, len, 1);
mono_image_add_stream_data (&assembly->blob, blob_size, len);
#else
idx = add_to_blob_cached (assembly, blob_size, b-blob_size, box_val, len);
#endif
g_free (buf);
return idx;
}
static guint32
encode_marshal_blob (MonoDynamicImage *assembly, MonoReflectionMarshal *minfo) {
char *str;
SigBuffer buf;
guint32 idx, len;
sigbuffer_init (&buf, 32);
sigbuffer_add_value (&buf, minfo->type);
switch (minfo->type) {
case MONO_NATIVE_BYVALTSTR:
case MONO_NATIVE_BYVALARRAY:
sigbuffer_add_value (&buf, minfo->count);
break;
case MONO_NATIVE_LPARRAY:
if (minfo->eltype || minfo->has_size) {
sigbuffer_add_value (&buf, minfo->eltype);
if (minfo->has_size) {
sigbuffer_add_value (&buf, minfo->param_num != -1? minfo->param_num: 0);
sigbuffer_add_value (&buf, minfo->count != -1? minfo->count: 0);
/* LAMESPEC: ElemMult is undocumented */
sigbuffer_add_value (&buf, minfo->param_num != -1? 1: 0);
}
}
break;
case MONO_NATIVE_SAFEARRAY:
if (minfo->eltype)
sigbuffer_add_value (&buf, minfo->eltype);
break;
case MONO_NATIVE_CUSTOM:
if (minfo->guid) {
str = mono_string_to_utf8 (minfo->guid);
len = strlen (str);
sigbuffer_add_value (&buf, len);
sigbuffer_add_mem (&buf, str, len);
g_free (str);
} else {
sigbuffer_add_value (&buf, 0);
}
/* native type name */
sigbuffer_add_value (&buf, 0);
/* custom marshaler type name */
if (minfo->marshaltype || minfo->marshaltyperef) {
if (minfo->marshaltyperef)
str = type_get_fully_qualified_name (mono_reflection_type_get_handle ((MonoReflectionType*)minfo->marshaltyperef));
else
str = mono_string_to_utf8 (minfo->marshaltype);
len = strlen (str);
sigbuffer_add_value (&buf, len);
sigbuffer_add_mem (&buf, str, len);
g_free (str);
} else {
/* FIXME: Actually a bug, since this field is required. Punting for now ... */
sigbuffer_add_value (&buf, 0);
}
if (minfo->mcookie) {
str = mono_string_to_utf8 (minfo->mcookie);
len = strlen (str);
sigbuffer_add_value (&buf, len);
sigbuffer_add_mem (&buf, str, len);
g_free (str);
} else {
sigbuffer_add_value (&buf, 0);
}
break;
default:
break;
}
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
static void
mono_image_get_field_info (MonoReflectionFieldBuilder *fb, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
/* maybe this fixup should be done in the C# code */
if (fb->attrs & FIELD_ATTRIBUTE_LITERAL)
fb->attrs |= FIELD_ATTRIBUTE_HAS_DEFAULT;
table = &assembly->tables [MONO_TABLE_FIELD];
fb->table_idx = table->next_idx ++;
g_hash_table_insert (assembly->field_to_table_idx, fb->handle, GUINT_TO_POINTER (fb->table_idx));
values = table->values + fb->table_idx * MONO_FIELD_SIZE;
values [MONO_FIELD_NAME] = string_heap_insert_mstring (&assembly->sheap, fb->name);
values [MONO_FIELD_FLAGS] = fb->attrs;
values [MONO_FIELD_SIGNATURE] = field_encode_signature (assembly, fb);
if (fb->offset != -1) {
table = &assembly->tables [MONO_TABLE_FIELDLAYOUT];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_FIELD_LAYOUT_SIZE;
values [MONO_FIELD_LAYOUT_FIELD] = fb->table_idx;
values [MONO_FIELD_LAYOUT_OFFSET] = fb->offset;
}
if (fb->attrs & FIELD_ATTRIBUTE_LITERAL) {
guint32 field_type = 0;
table = &assembly->tables [MONO_TABLE_CONSTANT];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_CONSTANT_SIZE;
values [MONO_CONSTANT_PARENT] = MONO_HASCONSTANT_FIEDDEF | (fb->table_idx << MONO_HASCONSTANT_BITS);
values [MONO_CONSTANT_VALUE] = encode_constant (assembly, fb->def_value, &field_type);
values [MONO_CONSTANT_TYPE] = field_type;
values [MONO_CONSTANT_PADDING] = 0;
}
if (fb->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA) {
guint32 rva_idx;
table = &assembly->tables [MONO_TABLE_FIELDRVA];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_FIELD_RVA_SIZE;
values [MONO_FIELD_RVA_FIELD] = fb->table_idx;
/*
* We store it in the code section because it's simpler for now.
*/
if (fb->rva_data) {
if (mono_array_length (fb->rva_data) >= 10)
stream_data_align (&assembly->code);
rva_idx = mono_image_add_stream_data (&assembly->code, mono_array_addr (fb->rva_data, char, 0), mono_array_length (fb->rva_data));
} else
rva_idx = mono_image_add_stream_zero (&assembly->code, mono_class_value_size (fb->handle->parent, NULL));
values [MONO_FIELD_RVA_RVA] = rva_idx + assembly->text_rva;
}
if (fb->marshal_info) {
table = &assembly->tables [MONO_TABLE_FIELDMARSHAL];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_FIELD_MARSHAL_SIZE;
values [MONO_FIELD_MARSHAL_PARENT] = (fb->table_idx << MONO_HAS_FIELD_MARSHAL_BITS) | MONO_HAS_FIELD_MARSHAL_FIELDSREF;
values [MONO_FIELD_MARSHAL_NATIVE_TYPE] = encode_marshal_blob (assembly, fb->marshal_info);
}
}
static guint32
property_encode_signature (MonoDynamicImage *assembly, MonoReflectionPropertyBuilder *fb)
{
SigBuffer buf;
guint32 nparams = 0;
MonoReflectionMethodBuilder *mb = fb->get_method;
MonoReflectionMethodBuilder *smb = fb->set_method;
guint32 idx, i;
if (mb && mb->parameters)
nparams = mono_array_length (mb->parameters);
if (!mb && smb && smb->parameters)
nparams = mono_array_length (smb->parameters) - 1;
sigbuffer_init (&buf, 32);
if (fb->call_conv & 0x20)
sigbuffer_add_byte (&buf, 0x28);
else
sigbuffer_add_byte (&buf, 0x08);
sigbuffer_add_value (&buf, nparams);
if (mb) {
encode_reflection_type (assembly, (MonoReflectionType*)mb->rtype, &buf);
for (i = 0; i < nparams; ++i) {
MonoReflectionType *pt = mono_array_get (mb->parameters, MonoReflectionType*, i);
encode_reflection_type (assembly, pt, &buf);
}
} else if (smb && smb->parameters) {
/* the property type is the last param */
encode_reflection_type (assembly, mono_array_get (smb->parameters, MonoReflectionType*, nparams), &buf);
for (i = 0; i < nparams; ++i) {
MonoReflectionType *pt = mono_array_get (smb->parameters, MonoReflectionType*, i);
encode_reflection_type (assembly, pt, &buf);
}
} else {
encode_reflection_type (assembly, (MonoReflectionType*)fb->type, &buf);
}
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
static void
mono_image_get_property_info (MonoReflectionPropertyBuilder *pb, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
guint num_methods = 0;
guint32 semaidx;
/*
* we need to set things in the following tables:
* PROPERTYMAP (info already filled in _get_type_info ())
* PROPERTY (rows already preallocated in _get_type_info ())
* METHOD (method info already done with the generic method code)
* METHODSEMANTICS
* CONSTANT
*/
table = &assembly->tables [MONO_TABLE_PROPERTY];
pb->table_idx = table->next_idx ++;
values = table->values + pb->table_idx * MONO_PROPERTY_SIZE;
values [MONO_PROPERTY_NAME] = string_heap_insert_mstring (&assembly->sheap, pb->name);
values [MONO_PROPERTY_FLAGS] = pb->attrs;
values [MONO_PROPERTY_TYPE] = property_encode_signature (assembly, pb);
/* FIXME: we still don't handle 'other' methods */
if (pb->get_method) num_methods ++;
if (pb->set_method) num_methods ++;
table = &assembly->tables [MONO_TABLE_METHODSEMANTICS];
table->rows += num_methods;
alloc_table (table, table->rows);
if (pb->get_method) {
semaidx = table->next_idx ++;
values = table->values + semaidx * MONO_METHOD_SEMA_SIZE;
values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_GETTER;
values [MONO_METHOD_SEMA_METHOD] = pb->get_method->table_idx;
values [MONO_METHOD_SEMA_ASSOCIATION] = (pb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_PROPERTY;
}
if (pb->set_method) {
semaidx = table->next_idx ++;
values = table->values + semaidx * MONO_METHOD_SEMA_SIZE;
values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_SETTER;
values [MONO_METHOD_SEMA_METHOD] = pb->set_method->table_idx;
values [MONO_METHOD_SEMA_ASSOCIATION] = (pb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_PROPERTY;
}
if (pb->attrs & PROPERTY_ATTRIBUTE_HAS_DEFAULT) {
guint32 field_type = 0;
table = &assembly->tables [MONO_TABLE_CONSTANT];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_CONSTANT_SIZE;
values [MONO_CONSTANT_PARENT] = MONO_HASCONSTANT_PROPERTY | (pb->table_idx << MONO_HASCONSTANT_BITS);
values [MONO_CONSTANT_VALUE] = encode_constant (assembly, pb->def_value, &field_type);
values [MONO_CONSTANT_TYPE] = field_type;
values [MONO_CONSTANT_PADDING] = 0;
}
}
static void
mono_image_get_event_info (MonoReflectionEventBuilder *eb, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
guint num_methods = 0;
guint32 semaidx;
/*
* we need to set things in the following tables:
* EVENTMAP (info already filled in _get_type_info ())
* EVENT (rows already preallocated in _get_type_info ())
* METHOD (method info already done with the generic method code)
* METHODSEMANTICS
*/
table = &assembly->tables [MONO_TABLE_EVENT];
eb->table_idx = table->next_idx ++;
values = table->values + eb->table_idx * MONO_EVENT_SIZE;
values [MONO_EVENT_NAME] = string_heap_insert_mstring (&assembly->sheap, eb->name);
values [MONO_EVENT_FLAGS] = eb->attrs;
values [MONO_EVENT_TYPE] = mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle (eb->type));
/*
* FIXME: we still don't handle 'other' methods
*/
if (eb->add_method) num_methods ++;
if (eb->remove_method) num_methods ++;
if (eb->raise_method) num_methods ++;
table = &assembly->tables [MONO_TABLE_METHODSEMANTICS];
table->rows += num_methods;
alloc_table (table, table->rows);
if (eb->add_method) {
semaidx = table->next_idx ++;
values = table->values + semaidx * MONO_METHOD_SEMA_SIZE;
values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_ADD_ON;
values [MONO_METHOD_SEMA_METHOD] = eb->add_method->table_idx;
values [MONO_METHOD_SEMA_ASSOCIATION] = (eb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_EVENT;
}
if (eb->remove_method) {
semaidx = table->next_idx ++;
values = table->values + semaidx * MONO_METHOD_SEMA_SIZE;
values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_REMOVE_ON;
values [MONO_METHOD_SEMA_METHOD] = eb->remove_method->table_idx;
values [MONO_METHOD_SEMA_ASSOCIATION] = (eb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_EVENT;
}
if (eb->raise_method) {
semaidx = table->next_idx ++;
values = table->values + semaidx * MONO_METHOD_SEMA_SIZE;
values [MONO_METHOD_SEMA_SEMANTICS] = METHOD_SEMANTIC_FIRE;
values [MONO_METHOD_SEMA_METHOD] = eb->raise_method->table_idx;
values [MONO_METHOD_SEMA_ASSOCIATION] = (eb->table_idx << MONO_HAS_SEMANTICS_BITS) | MONO_HAS_SEMANTICS_EVENT;
}
}
static void
encode_constraints (MonoReflectionGenericParam *gparam, guint32 owner, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 num_constraints, i;
guint32 *values;
guint32 table_idx;
table = &assembly->tables [MONO_TABLE_GENERICPARAMCONSTRAINT];
num_constraints = gparam->iface_constraints ?
mono_array_length (gparam->iface_constraints) : 0;
table->rows += num_constraints;
if (gparam->base_type)
table->rows++;
alloc_table (table, table->rows);
if (gparam->base_type) {
table_idx = table->next_idx ++;
values = table->values + table_idx * MONO_GENPARCONSTRAINT_SIZE;
values [MONO_GENPARCONSTRAINT_GENERICPAR] = owner;
values [MONO_GENPARCONSTRAINT_CONSTRAINT] = mono_image_typedef_or_ref (
assembly, mono_reflection_type_get_handle (gparam->base_type));
}
for (i = 0; i < num_constraints; i++) {
MonoReflectionType *constraint = mono_array_get (
gparam->iface_constraints, gpointer, i);
table_idx = table->next_idx ++;
values = table->values + table_idx * MONO_GENPARCONSTRAINT_SIZE;
values [MONO_GENPARCONSTRAINT_GENERICPAR] = owner;
values [MONO_GENPARCONSTRAINT_CONSTRAINT] = mono_image_typedef_or_ref (
assembly, mono_reflection_type_get_handle (constraint));
}
}
static void
mono_image_get_generic_param_info (MonoReflectionGenericParam *gparam, guint32 owner, MonoDynamicImage *assembly)
{
GenericParamTableEntry *entry;
/*
* The GenericParam table must be sorted according to the `owner' field.
* We need to do this sorting prior to writing the GenericParamConstraint
* table, since we have to use the final GenericParam table indices there
* and they must also be sorted.
*/
entry = g_new0 (GenericParamTableEntry, 1);
entry->owner = owner;
/* FIXME: track where gen_params should be freed and remove the GC root as well */
MONO_GC_REGISTER_ROOT_IF_MOVING (entry->gparam);
entry->gparam = gparam;
g_ptr_array_add (assembly->gen_params, entry);
}
static void
write_generic_param_entry (MonoDynamicImage *assembly, GenericParamTableEntry *entry)
{
MonoDynamicTable *table;
MonoGenericParam *param;
guint32 *values;
guint32 table_idx;
table = &assembly->tables [MONO_TABLE_GENERICPARAM];
table_idx = table->next_idx ++;
values = table->values + table_idx * MONO_GENERICPARAM_SIZE;
param = mono_reflection_type_get_handle ((MonoReflectionType*)entry->gparam)->data.generic_param;
values [MONO_GENERICPARAM_OWNER] = entry->owner;
values [MONO_GENERICPARAM_FLAGS] = entry->gparam->attrs;
values [MONO_GENERICPARAM_NUMBER] = mono_generic_param_num (param);
values [MONO_GENERICPARAM_NAME] = string_heap_insert (&assembly->sheap, mono_generic_param_info (param)->name);
mono_image_add_cattrs (assembly, table_idx, MONO_CUSTOM_ATTR_GENERICPAR, entry->gparam->cattrs);
encode_constraints (entry->gparam, table_idx, assembly);
}
static guint32
resolution_scope_from_image (MonoDynamicImage *assembly, MonoImage *image)
{
MonoDynamicTable *table;
guint32 token;
guint32 *values;
guint32 cols [MONO_ASSEMBLY_SIZE];
const char *pubkey;
guint32 publen;
if ((token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, image))))
return token;
if (image->assembly->dynamic && (image->assembly == assembly->image.assembly)) {
table = &assembly->tables [MONO_TABLE_MODULEREF];
token = table->next_idx ++;
table->rows ++;
alloc_table (table, table->rows);
values = table->values + token * MONO_MODULEREF_SIZE;
values [MONO_MODULEREF_NAME] = string_heap_insert (&assembly->sheap, image->module_name);
token <<= MONO_RESOLTION_SCOPE_BITS;
token |= MONO_RESOLTION_SCOPE_MODULEREF;
g_hash_table_insert (assembly->handleref, image, GUINT_TO_POINTER (token));
return token;
}
if (image->assembly->dynamic)
/* FIXME: */
memset (cols, 0, sizeof (cols));
else {
/* image->assembly->image is the manifest module */
image = image->assembly->image;
mono_metadata_decode_row (&image->tables [MONO_TABLE_ASSEMBLY], 0, cols, MONO_ASSEMBLY_SIZE);
}
table = &assembly->tables [MONO_TABLE_ASSEMBLYREF];
token = table->next_idx ++;
table->rows ++;
alloc_table (table, table->rows);
values = table->values + token * MONO_ASSEMBLYREF_SIZE;
values [MONO_ASSEMBLYREF_NAME] = string_heap_insert (&assembly->sheap, image->assembly_name);
values [MONO_ASSEMBLYREF_MAJOR_VERSION] = cols [MONO_ASSEMBLY_MAJOR_VERSION];
values [MONO_ASSEMBLYREF_MINOR_VERSION] = cols [MONO_ASSEMBLY_MINOR_VERSION];
values [MONO_ASSEMBLYREF_BUILD_NUMBER] = cols [MONO_ASSEMBLY_BUILD_NUMBER];
values [MONO_ASSEMBLYREF_REV_NUMBER] = cols [MONO_ASSEMBLY_REV_NUMBER];
values [MONO_ASSEMBLYREF_FLAGS] = 0;
values [MONO_ASSEMBLYREF_CULTURE] = 0;
values [MONO_ASSEMBLYREF_HASH_VALUE] = 0;
if (strcmp ("", image->assembly->aname.culture)) {
values [MONO_ASSEMBLYREF_CULTURE] = string_heap_insert (&assembly->sheap,
image->assembly->aname.culture);
}
if ((pubkey = mono_image_get_public_key (image, &publen))) {
guchar pubtoken [9];
pubtoken [0] = 8;
mono_digest_get_public_token (pubtoken + 1, (guchar*)pubkey, publen);
values [MONO_ASSEMBLYREF_PUBLIC_KEY] = mono_image_add_stream_data (&assembly->blob, (char*)pubtoken, 9);
} else {
values [MONO_ASSEMBLYREF_PUBLIC_KEY] = 0;
}
token <<= MONO_RESOLTION_SCOPE_BITS;
token |= MONO_RESOLTION_SCOPE_ASSEMBLYREF;
g_hash_table_insert (assembly->handleref, image, GUINT_TO_POINTER (token));
return token;
}
static guint32
create_typespec (MonoDynamicImage *assembly, MonoType *type)
{
MonoDynamicTable *table;
guint32 *values;
guint32 token;
SigBuffer buf;
if ((token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->typespec, type))))
return token;
sigbuffer_init (&buf, 32);
switch (type->type) {
case MONO_TYPE_FNPTR:
case MONO_TYPE_PTR:
case MONO_TYPE_SZARRAY:
case MONO_TYPE_ARRAY:
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
case MONO_TYPE_GENERICINST:
encode_type (assembly, type, &buf);
break;
case MONO_TYPE_CLASS:
case MONO_TYPE_VALUETYPE: {
MonoClass *k = mono_class_from_mono_type (type);
if (!k || !k->generic_container) {
sigbuffer_free (&buf);
return 0;
}
encode_type (assembly, type, &buf);
break;
}
default:
sigbuffer_free (&buf);
return 0;
}
table = &assembly->tables [MONO_TABLE_TYPESPEC];
if (assembly->save) {
token = sigbuffer_add_to_blob_cached (assembly, &buf);
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_TYPESPEC_SIZE;
values [MONO_TYPESPEC_SIGNATURE] = token;
}
sigbuffer_free (&buf);
token = MONO_TYPEDEFORREF_TYPESPEC | (table->next_idx << MONO_TYPEDEFORREF_BITS);
g_hash_table_insert (assembly->typespec, type, GUINT_TO_POINTER(token));
table->next_idx ++;
return token;
}
static guint32
mono_image_typedef_or_ref_full (MonoDynamicImage *assembly, MonoType *type, gboolean try_typespec)
{
MonoDynamicTable *table;
guint32 *values;
guint32 token, scope, enclosing;
MonoClass *klass;
/* if the type requires a typespec, we must try that first*/
if (try_typespec && (token = create_typespec (assembly, type)))
return token;
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->typeref, type));
if (token)
return token;
klass = mono_class_from_mono_type (type);
if (!klass)
klass = mono_class_from_mono_type (type);
/*
* If it's in the same module and not a generic type parameter:
*/
if ((klass->image == &assembly->image) && (type->type != MONO_TYPE_VAR) &&
(type->type != MONO_TYPE_MVAR)) {
MonoReflectionTypeBuilder *tb = mono_class_get_ref_info (klass);
token = MONO_TYPEDEFORREF_TYPEDEF | (tb->table_idx << MONO_TYPEDEFORREF_BITS);
mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), mono_class_get_ref_info (klass));
return token;
}
if (klass->nested_in) {
enclosing = mono_image_typedef_or_ref_full (assembly, &klass->nested_in->byval_arg, FALSE);
/* get the typeref idx of the enclosing type */
enclosing >>= MONO_TYPEDEFORREF_BITS;
scope = (enclosing << MONO_RESOLTION_SCOPE_BITS) | MONO_RESOLTION_SCOPE_TYPEREF;
} else {
scope = resolution_scope_from_image (assembly, klass->image);
}
table = &assembly->tables [MONO_TABLE_TYPEREF];
if (assembly->save) {
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_TYPEREF_SIZE;
values [MONO_TYPEREF_SCOPE] = scope;
values [MONO_TYPEREF_NAME] = string_heap_insert (&assembly->sheap, klass->name);
values [MONO_TYPEREF_NAMESPACE] = string_heap_insert (&assembly->sheap, klass->name_space);
}
token = MONO_TYPEDEFORREF_TYPEREF | (table->next_idx << MONO_TYPEDEFORREF_BITS); /* typeref */
g_hash_table_insert (assembly->typeref, type, GUINT_TO_POINTER(token));
table->next_idx ++;
mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), mono_class_get_ref_info (klass));
return token;
}
/*
* Despite the name, we handle also TypeSpec (with the above helper).
*/
static guint32
mono_image_typedef_or_ref (MonoDynamicImage *assembly, MonoType *type)
{
return mono_image_typedef_or_ref_full (assembly, type, TRUE);
}
#ifndef DISABLE_REFLECTION_EMIT
static guint32
mono_image_add_memberef_row (MonoDynamicImage *assembly, guint32 parent, const char *name, guint32 sig)
{
MonoDynamicTable *table;
guint32 *values;
guint32 token, pclass;
switch (parent & MONO_TYPEDEFORREF_MASK) {
case MONO_TYPEDEFORREF_TYPEREF:
pclass = MONO_MEMBERREF_PARENT_TYPEREF;
break;
case MONO_TYPEDEFORREF_TYPESPEC:
pclass = MONO_MEMBERREF_PARENT_TYPESPEC;
break;
case MONO_TYPEDEFORREF_TYPEDEF:
pclass = MONO_MEMBERREF_PARENT_TYPEDEF;
break;
default:
g_warning ("unknown typeref or def token 0x%08x for %s", parent, name);
return 0;
}
/* extract the index */
parent >>= MONO_TYPEDEFORREF_BITS;
table = &assembly->tables [MONO_TABLE_MEMBERREF];
if (assembly->save) {
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_MEMBERREF_SIZE;
values [MONO_MEMBERREF_CLASS] = pclass | (parent << MONO_MEMBERREF_PARENT_BITS);
values [MONO_MEMBERREF_NAME] = string_heap_insert (&assembly->sheap, name);
values [MONO_MEMBERREF_SIGNATURE] = sig;
}
token = MONO_TOKEN_MEMBER_REF | table->next_idx;
table->next_idx ++;
return token;
}
/*
* Insert a memberef row into the metadata: the token that point to the memberref
* is returned. Caching is done in the caller (mono_image_get_methodref_token() or
* mono_image_get_fieldref_token()).
* The sig param is an index to an already built signature.
*/
static guint32
mono_image_get_memberref_token (MonoDynamicImage *assembly, MonoType *type, const char *name, guint32 sig)
{
guint32 parent = mono_image_typedef_or_ref (assembly, type);
return mono_image_add_memberef_row (assembly, parent, name, sig);
}
static guint32
mono_image_get_methodref_token (MonoDynamicImage *assembly, MonoMethod *method, gboolean create_typespec)
{
guint32 token;
MonoMethodSignature *sig;
create_typespec = create_typespec && method->is_generic && method->klass->image != &assembly->image;
if (create_typespec) {
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, GUINT_TO_POINTER (GPOINTER_TO_UINT (method) + 1)));
if (token)
return token;
}
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, method));
if (token && !create_typespec)
return token;
g_assert (!method->is_inflated);
if (!token) {
/*
* A methodref signature can't contain an unmanaged calling convention.
*/
sig = mono_metadata_signature_dup (mono_method_signature (method));
if ((sig->call_convention != MONO_CALL_DEFAULT) && (sig->call_convention != MONO_CALL_VARARG))
sig->call_convention = MONO_CALL_DEFAULT;
token = mono_image_get_memberref_token (assembly, &method->klass->byval_arg,
method->name, method_encode_signature (assembly, sig));
g_free (sig);
g_hash_table_insert (assembly->handleref, method, GUINT_TO_POINTER(token));
}
if (create_typespec) {
MonoDynamicTable *table = &assembly->tables [MONO_TABLE_METHODSPEC];
g_assert (mono_metadata_token_table (token) == MONO_TABLE_MEMBERREF);
token = (mono_metadata_token_index (token) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODREF;
if (assembly->save) {
guint32 *values;
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_METHODSPEC_SIZE;
values [MONO_METHODSPEC_METHOD] = token;
values [MONO_METHODSPEC_SIGNATURE] = encode_generic_method_sig (assembly, &mono_method_get_generic_container (method)->context);
}
token = MONO_TOKEN_METHOD_SPEC | table->next_idx;
table->next_idx ++;
/*methodspec and memberef tokens are diferent, */
g_hash_table_insert (assembly->handleref, GUINT_TO_POINTER (GPOINTER_TO_UINT (method) + 1), GUINT_TO_POINTER (token));
return token;
}
return token;
}
static guint32
mono_image_get_methodref_token_for_methodbuilder (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *method)
{
guint32 token, parent, sig;
ReflectionMethodBuilder rmb;
char *name;
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)method->type;
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, method));
if (token)
return token;
name = mono_string_to_utf8 (method->name);
reflection_methodbuilder_from_method_builder (&rmb, method);
/*
* A methodref signature can't contain an unmanaged calling convention.
* Since some flags are encoded as part of call_conv, we need to check against it.
*/
if ((rmb.call_conv & ~0x60) != MONO_CALL_DEFAULT && (rmb.call_conv & ~0x60) != MONO_CALL_VARARG)
rmb.call_conv = (rmb.call_conv & 0x60) | MONO_CALL_DEFAULT;
sig = method_builder_encode_signature (assembly, &rmb);
if (tb->generic_params)
parent = create_generic_typespec (assembly, tb);
else
parent = mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)rmb.type));
token = mono_image_add_memberef_row (assembly, parent, name, sig);
g_free (name);
g_hash_table_insert (assembly->handleref, method, GUINT_TO_POINTER(token));
return token;
}
static guint32
mono_image_get_varargs_method_token (MonoDynamicImage *assembly, guint32 original,
const gchar *name, guint32 sig)
{
MonoDynamicTable *table;
guint32 token;
guint32 *values;
table = &assembly->tables [MONO_TABLE_MEMBERREF];
if (assembly->save) {
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_MEMBERREF_SIZE;
values [MONO_MEMBERREF_CLASS] = original;
values [MONO_MEMBERREF_NAME] = string_heap_insert (&assembly->sheap, name);
values [MONO_MEMBERREF_SIGNATURE] = sig;
}
token = MONO_TOKEN_MEMBER_REF | table->next_idx;
table->next_idx ++;
return token;
}
static guint32
encode_generic_method_definition_sig (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb)
{
SigBuffer buf;
int i;
guint32 nparams = mono_array_length (mb->generic_params);
guint32 idx;
if (!assembly->save)
return 0;
sigbuffer_init (&buf, 32);
sigbuffer_add_value (&buf, 0xa);
sigbuffer_add_value (&buf, nparams);
for (i = 0; i < nparams; i++) {
sigbuffer_add_value (&buf, MONO_TYPE_MVAR);
sigbuffer_add_value (&buf, i);
}
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
static guint32
mono_image_get_methodspec_token_for_generic_method_definition (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb)
{
MonoDynamicTable *table;
guint32 *values;
guint32 token, mtoken = 0;
token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->methodspec, mb));
if (token)
return token;
table = &assembly->tables [MONO_TABLE_METHODSPEC];
mtoken = mono_image_get_methodref_token_for_methodbuilder (assembly, mb);
switch (mono_metadata_token_table (mtoken)) {
case MONO_TABLE_MEMBERREF:
mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODREF;
break;
case MONO_TABLE_METHOD:
mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODDEF;
break;
default:
g_assert_not_reached ();
}
if (assembly->save) {
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_METHODSPEC_SIZE;
values [MONO_METHODSPEC_METHOD] = mtoken;
values [MONO_METHODSPEC_SIGNATURE] = encode_generic_method_definition_sig (assembly, mb);
}
token = MONO_TOKEN_METHOD_SPEC | table->next_idx;
table->next_idx ++;
mono_g_hash_table_insert (assembly->methodspec, mb, GUINT_TO_POINTER(token));
return token;
}
static guint32
mono_image_get_methodbuilder_token (MonoDynamicImage *assembly, MonoReflectionMethodBuilder *mb, gboolean create_methodspec)
{
guint32 token;
if (mb->generic_params && create_methodspec)
return mono_image_get_methodspec_token_for_generic_method_definition (assembly, mb);
token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->handleref_managed, mb));
if (token)
return token;
token = mono_image_get_methodref_token_for_methodbuilder (assembly, mb);
mono_g_hash_table_insert (assembly->handleref_managed, mb, GUINT_TO_POINTER(token));
return token;
}
static guint32
mono_image_get_ctorbuilder_token (MonoDynamicImage *assembly, MonoReflectionCtorBuilder *mb)
{
guint32 token, parent, sig;
ReflectionMethodBuilder rmb;
char *name;
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)mb->type;
token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->handleref_managed, mb));
if (token)
return token;
g_assert (tb->generic_params);
reflection_methodbuilder_from_ctor_builder (&rmb, mb);
parent = create_generic_typespec (assembly, tb);
name = mono_string_to_utf8 (rmb.name);
sig = method_builder_encode_signature (assembly, &rmb);
token = mono_image_add_memberef_row (assembly, parent, name, sig);
g_free (name);
mono_g_hash_table_insert (assembly->handleref_managed, mb, GUINT_TO_POINTER(token));
return token;
}
#endif
static gboolean
is_field_on_inst (MonoClassField *field)
{
return (field->parent->generic_class && field->parent->generic_class->is_dynamic && ((MonoDynamicGenericClass*)field->parent->generic_class)->fields);
}
/*
* If FIELD is a field of a MonoDynamicGenericClass, return its non-inflated type.
*/
static MonoType*
get_field_on_inst_generic_type (MonoClassField *field)
{
MonoClass *class, *gtd;
MonoDynamicGenericClass *dgclass;
int field_index;
g_assert (is_field_on_inst (field));
dgclass = (MonoDynamicGenericClass*)field->parent->generic_class;
if (field >= dgclass->fields && field - dgclass->fields < dgclass->count_fields) {
field_index = field - dgclass->fields;
return dgclass->field_generic_types [field_index];
}
class = field->parent;
gtd = class->generic_class->container_class;
if (field >= class->fields && field - class->fields < class->field.count) {
field_index = field - class->fields;
return gtd->fields [field_index].type;
}
g_assert_not_reached ();
return 0;
}
#ifndef DISABLE_REFLECTION_EMIT
static guint32
mono_image_get_fieldref_token (MonoDynamicImage *assembly, MonoObject *f, MonoClassField *field)
{
MonoType *type;
guint32 token;
g_assert (field);
g_assert (field->parent);
token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->handleref_managed, f));
if (token)
return token;
if (field->parent->generic_class && field->parent->generic_class->container_class && field->parent->generic_class->container_class->fields) {
int index = field - field->parent->fields;
type = field->parent->generic_class->container_class->fields [index].type;
} else {
if (is_field_on_inst (field))
type = get_field_on_inst_generic_type (field);
else
type = field->type;
}
token = mono_image_get_memberref_token (assembly, &field->parent->byval_arg,
mono_field_get_name (field),
fieldref_encode_signature (assembly, field->parent->image, type));
mono_g_hash_table_insert (assembly->handleref_managed, f, GUINT_TO_POINTER(token));
return token;
}
static guint32
mono_image_get_field_on_inst_token (MonoDynamicImage *assembly, MonoReflectionFieldOnTypeBuilderInst *f)
{
guint32 token;
MonoClass *klass;
MonoGenericClass *gclass;
MonoDynamicGenericClass *dgclass;
MonoType *type;
char *name;
token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->handleref_managed, f));
if (token)
return token;
if (is_sre_field_builder (mono_object_class (f->fb))) {
MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder *)f->fb;
type = mono_reflection_type_get_handle ((MonoReflectionType*)f->inst);
klass = mono_class_from_mono_type (type);
gclass = type->data.generic_class;
g_assert (gclass->is_dynamic);
dgclass = (MonoDynamicGenericClass *) gclass;
name = mono_string_to_utf8 (fb->name);
token = mono_image_get_memberref_token (assembly, &klass->byval_arg, name,
field_encode_signature (assembly, fb));
g_free (name);
} else if (is_sr_mono_field (mono_object_class (f->fb))) {
guint32 sig;
MonoClassField *field = ((MonoReflectionField *)f->fb)->field;
type = mono_reflection_type_get_handle ((MonoReflectionType*)f->inst);
klass = mono_class_from_mono_type (type);
sig = fieldref_encode_signature (assembly, field->parent->image, field->type);
token = mono_image_get_memberref_token (assembly, &klass->byval_arg, field->name, sig);
} else {
char *name = mono_type_get_full_name (mono_object_class (f->fb));
g_error ("mono_image_get_field_on_inst_token: don't know how to handle %s", name);
}
mono_g_hash_table_insert (assembly->handleref_managed, f, GUINT_TO_POINTER (token));
return token;
}
static guint32
mono_image_get_ctor_on_inst_token (MonoDynamicImage *assembly, MonoReflectionCtorOnTypeBuilderInst *c, gboolean create_methodspec)
{
guint32 sig, token;
MonoClass *klass;
MonoGenericClass *gclass;
MonoType *type;
/* A ctor cannot be a generic method, so we can ignore create_methodspec */
token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->handleref_managed, c));
if (token)
return token;
if (is_sre_ctor_builder (mono_object_class (c->cb))) {
MonoReflectionCtorBuilder *cb = (MonoReflectionCtorBuilder *)c->cb;
MonoDynamicGenericClass *dgclass;
ReflectionMethodBuilder rmb;
char *name;
type = mono_reflection_type_get_handle ((MonoReflectionType*)c->inst);
klass = mono_class_from_mono_type (type);
gclass = type->data.generic_class;
g_assert (gclass->is_dynamic);
dgclass = (MonoDynamicGenericClass *) gclass;
reflection_methodbuilder_from_ctor_builder (&rmb, cb);
name = mono_string_to_utf8 (rmb.name);
sig = method_builder_encode_signature (assembly, &rmb);
token = mono_image_get_memberref_token (assembly, &klass->byval_arg, name, sig);
g_free (name);
} else if (is_sr_mono_cmethod (mono_object_class (c->cb))) {
MonoMethod *mm = ((MonoReflectionMethod *)c->cb)->method;
type = mono_reflection_type_get_handle ((MonoReflectionType*)c->inst);
klass = mono_class_from_mono_type (type);
sig = method_encode_signature (assembly, mono_method_signature (mm));
token = mono_image_get_memberref_token (assembly, &klass->byval_arg, mm->name, sig);
} else {
char *name = mono_type_get_full_name (mono_object_class (c->cb));
g_error ("mono_image_get_method_on_inst_token: don't know how to handle %s", name);
}
mono_g_hash_table_insert (assembly->handleref_managed, c, GUINT_TO_POINTER (token));
return token;
}
static MonoMethod*
mono_reflection_method_on_tb_inst_get_handle (MonoReflectionMethodOnTypeBuilderInst *m)
{
MonoClass *klass;
MonoGenericContext tmp_context;
MonoType **type_argv;
MonoGenericInst *ginst;
MonoMethod *method, *inflated;
int count, i;
init_type_builder_generics ((MonoObject*)m->inst);
method = inflate_method (m->inst, (MonoObject*)m->mb);
klass = method->klass;
if (m->method_args == NULL)
return method;
if (method->is_inflated)
method = ((MonoMethodInflated *) method)->declaring;
count = mono_array_length (m->method_args);
type_argv = g_new0 (MonoType *, count);
for (i = 0; i < count; i++) {
MonoReflectionType *garg = mono_array_get (m->method_args, gpointer, i);
type_argv [i] = mono_reflection_type_get_handle (garg);
}
ginst = mono_metadata_get_generic_inst (count, type_argv);
g_free (type_argv);
tmp_context.class_inst = klass->generic_class ? klass->generic_class->context.class_inst : NULL;
tmp_context.method_inst = ginst;
inflated = mono_class_inflate_generic_method (method, &tmp_context);
return inflated;
}
static guint32
mono_image_get_method_on_inst_token (MonoDynamicImage *assembly, MonoReflectionMethodOnTypeBuilderInst *m, gboolean create_methodspec)
{
guint32 sig, token = 0;
MonoType *type;
MonoClass *klass;
if (m->method_args) {
MonoMethod *inflated;
inflated = mono_reflection_method_on_tb_inst_get_handle (m);
if (create_methodspec)
token = mono_image_get_methodspec_token (assembly, inflated);
else
token = mono_image_get_inflated_method_token (assembly, inflated);
return token;
}
token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->handleref_managed, m));
if (token)
return token;
if (is_sre_method_builder (mono_object_class (m->mb))) {
MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder *)m->mb;
MonoGenericClass *gclass;
ReflectionMethodBuilder rmb;
char *name;
type = mono_reflection_type_get_handle ((MonoReflectionType*)m->inst);
klass = mono_class_from_mono_type (type);
gclass = type->data.generic_class;
g_assert (gclass->is_dynamic);
reflection_methodbuilder_from_method_builder (&rmb, mb);
name = mono_string_to_utf8 (rmb.name);
sig = method_builder_encode_signature (assembly, &rmb);
token = mono_image_get_memberref_token (assembly, &klass->byval_arg, name, sig);
g_free (name);
} else if (is_sr_mono_method (mono_object_class (m->mb))) {
MonoMethod *mm = ((MonoReflectionMethod *)m->mb)->method;
type = mono_reflection_type_get_handle ((MonoReflectionType*)m->inst);
klass = mono_class_from_mono_type (type);
sig = method_encode_signature (assembly, mono_method_signature (mm));
token = mono_image_get_memberref_token (assembly, &klass->byval_arg, mm->name, sig);
} else {
char *name = mono_type_get_full_name (mono_object_class (m->mb));
g_error ("mono_image_get_method_on_inst_token: don't know how to handle %s", name);
}
mono_g_hash_table_insert (assembly->handleref_managed, m, GUINT_TO_POINTER (token));
return token;
}
static guint32
encode_generic_method_sig (MonoDynamicImage *assembly, MonoGenericContext *context)
{
SigBuffer buf;
int i;
guint32 nparams = context->method_inst->type_argc;
guint32 idx;
if (!assembly->save)
return 0;
sigbuffer_init (&buf, 32);
/*
* FIXME: vararg, explicit_this, differenc call_conv values...
*/
sigbuffer_add_value (&buf, 0xa); /* FIXME FIXME FIXME */
sigbuffer_add_value (&buf, nparams);
for (i = 0; i < nparams; i++)
encode_type (assembly, context->method_inst->type_argv [i], &buf);
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
static guint32
method_encode_methodspec (MonoDynamicImage *assembly, MonoMethod *method)
{
MonoDynamicTable *table;
guint32 *values;
guint32 token, mtoken = 0, sig;
MonoMethodInflated *imethod;
MonoMethod *declaring;
table = &assembly->tables [MONO_TABLE_METHODSPEC];
g_assert (method->is_inflated);
imethod = (MonoMethodInflated *) method;
declaring = imethod->declaring;
sig = method_encode_signature (assembly, mono_method_signature (declaring));
mtoken = mono_image_get_memberref_token (assembly, &method->klass->byval_arg, declaring->name, sig);
if (!mono_method_signature (declaring)->generic_param_count)
return mtoken;
switch (mono_metadata_token_table (mtoken)) {
case MONO_TABLE_MEMBERREF:
mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODREF;
break;
case MONO_TABLE_METHOD:
mtoken = (mono_metadata_token_index (mtoken) << MONO_METHODDEFORREF_BITS) | MONO_METHODDEFORREF_METHODDEF;
break;
default:
g_assert_not_reached ();
}
sig = encode_generic_method_sig (assembly, mono_method_get_context (method));
if (assembly->save) {
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_METHODSPEC_SIZE;
values [MONO_METHODSPEC_METHOD] = mtoken;
values [MONO_METHODSPEC_SIGNATURE] = sig;
}
token = MONO_TOKEN_METHOD_SPEC | table->next_idx;
table->next_idx ++;
return token;
}
static guint32
mono_image_get_methodspec_token (MonoDynamicImage *assembly, MonoMethod *method)
{
MonoMethodInflated *imethod;
guint32 token;
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->handleref, method));
if (token)
return token;
g_assert (method->is_inflated);
imethod = (MonoMethodInflated *) method;
if (mono_method_signature (imethod->declaring)->generic_param_count) {
token = method_encode_methodspec (assembly, method);
} else {
guint32 sig = method_encode_signature (
assembly, mono_method_signature (imethod->declaring));
token = mono_image_get_memberref_token (
assembly, &method->klass->byval_arg, method->name, sig);
}
g_hash_table_insert (assembly->handleref, method, GUINT_TO_POINTER(token));
return token;
}
static guint32
mono_image_get_inflated_method_token (MonoDynamicImage *assembly, MonoMethod *m)
{
MonoMethodInflated *imethod = (MonoMethodInflated *) m;
guint32 sig, token;
sig = method_encode_signature (assembly, mono_method_signature (imethod->declaring));
token = mono_image_get_memberref_token (
assembly, &m->klass->byval_arg, m->name, sig);
return token;
}
static guint32
create_generic_typespec (MonoDynamicImage *assembly, MonoReflectionTypeBuilder *tb)
{
MonoDynamicTable *table;
MonoClass *klass;
MonoType *type;
guint32 *values;
guint32 token;
SigBuffer buf;
int count, i;
/*
* We're creating a TypeSpec for the TypeBuilder of a generic type declaration,
* ie. what we'd normally use as the generic type in a TypeSpec signature.
* Because of this, we must not insert it into the `typeref' hash table.
*/
type = mono_reflection_type_get_handle ((MonoReflectionType*)tb);
token = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->typespec, type));
if (token)
return token;
sigbuffer_init (&buf, 32);
g_assert (tb->generic_params);
klass = mono_class_from_mono_type (type);
if (tb->generic_container)
mono_reflection_create_generic_class (tb);
sigbuffer_add_value (&buf, MONO_TYPE_GENERICINST);
g_assert (klass->generic_container);
sigbuffer_add_value (&buf, klass->byval_arg.type);
sigbuffer_add_value (&buf, mono_image_typedef_or_ref_full (assembly, &klass->byval_arg, FALSE));
count = mono_array_length (tb->generic_params);
sigbuffer_add_value (&buf, count);
for (i = 0; i < count; i++) {
MonoReflectionGenericParam *gparam;
gparam = mono_array_get (tb->generic_params, MonoReflectionGenericParam *, i);
encode_type (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)gparam), &buf);
}
table = &assembly->tables [MONO_TABLE_TYPESPEC];
if (assembly->save) {
token = sigbuffer_add_to_blob_cached (assembly, &buf);
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_TYPESPEC_SIZE;
values [MONO_TYPESPEC_SIGNATURE] = token;
}
sigbuffer_free (&buf);
token = MONO_TYPEDEFORREF_TYPESPEC | (table->next_idx << MONO_TYPEDEFORREF_BITS);
g_hash_table_insert (assembly->typespec, type, GUINT_TO_POINTER(token));
table->next_idx ++;
return token;
}
/*
* Return a copy of TYPE, adding the custom modifiers in MODREQ and MODOPT.
*/
static MonoType*
add_custom_modifiers (MonoDynamicImage *assembly, MonoType *type, MonoArray *modreq, MonoArray *modopt)
{
int i, count, len, pos;
MonoType *t;
count = 0;
if (modreq)
count += mono_array_length (modreq);
if (modopt)
count += mono_array_length (modopt);
if (count == 0)
return mono_metadata_type_dup (NULL, type);
len = MONO_SIZEOF_TYPE + ((gint32)count) * sizeof (MonoCustomMod);
t = g_malloc (len);
memcpy (t, type, MONO_SIZEOF_TYPE);
t->num_mods = count;
pos = 0;
if (modreq) {
for (i = 0; i < mono_array_length (modreq); ++i) {
MonoType *mod = mono_type_array_get_and_resolve (modreq, i);
t->modifiers [pos].required = 1;
t->modifiers [pos].token = mono_image_typedef_or_ref (assembly, mod);
pos ++;
}
}
if (modopt) {
for (i = 0; i < mono_array_length (modopt); ++i) {
MonoType *mod = mono_type_array_get_and_resolve (modopt, i);
t->modifiers [pos].required = 0;
t->modifiers [pos].token = mono_image_typedef_or_ref (assembly, mod);
pos ++;
}
}
return t;
}
static void
init_type_builder_generics (MonoObject *type)
{
MonoReflectionTypeBuilder *tb;
if (!is_sre_type_builder(mono_object_class (type)))
return;
tb = (MonoReflectionTypeBuilder *)type;
if (tb && tb->generic_container)
mono_reflection_create_generic_class (tb);
}
static guint32
mono_image_get_generic_field_token (MonoDynamicImage *assembly, MonoReflectionFieldBuilder *fb)
{
MonoDynamicTable *table;
MonoClass *klass;
MonoType *custom = NULL, *type;
guint32 *values;
guint32 token, pclass, parent, sig;
gchar *name;
token = GPOINTER_TO_UINT (mono_g_hash_table_lookup (assembly->handleref_managed, fb));
if (token)
return token;
klass = mono_class_from_mono_type (mono_reflection_type_get_handle (fb->typeb));
name = mono_string_to_utf8 (fb->name);
/*FIXME this is one more layer of ugliness due how types are created.*/
init_type_builder_generics (fb->type);
/* fb->type does not include the custom modifiers */
/* FIXME: We should do this in one place when a fieldbuilder is created */
type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type);
if (fb->modreq || fb->modopt)
type = custom = add_custom_modifiers (assembly, type, fb->modreq, fb->modopt);
sig = fieldref_encode_signature (assembly, NULL, type);
g_free (custom);
parent = create_generic_typespec (assembly, (MonoReflectionTypeBuilder *) fb->typeb);
g_assert ((parent & MONO_TYPEDEFORREF_MASK) == MONO_TYPEDEFORREF_TYPESPEC);
pclass = MONO_MEMBERREF_PARENT_TYPESPEC;
parent >>= MONO_TYPEDEFORREF_BITS;
table = &assembly->tables [MONO_TABLE_MEMBERREF];
if (assembly->save) {
alloc_table (table, table->rows + 1);
values = table->values + table->next_idx * MONO_MEMBERREF_SIZE;
values [MONO_MEMBERREF_CLASS] = pclass | (parent << MONO_MEMBERREF_PARENT_BITS);
values [MONO_MEMBERREF_NAME] = string_heap_insert (&assembly->sheap, name);
values [MONO_MEMBERREF_SIGNATURE] = sig;
}
token = MONO_TOKEN_MEMBER_REF | table->next_idx;
table->next_idx ++;
mono_g_hash_table_insert (assembly->handleref_managed, fb, GUINT_TO_POINTER(token));
g_free (name);
return token;
}
static guint32
mono_reflection_encode_sighelper (MonoDynamicImage *assembly, MonoReflectionSigHelper *helper)
{
SigBuffer buf;
guint32 nargs;
guint32 size;
guint32 i, idx;
if (!assembly->save)
return 0;
/* FIXME: this means SignatureHelper.SignatureHelpType.HELPER_METHOD */
g_assert (helper->type == 2);
if (helper->arguments)
nargs = mono_array_length (helper->arguments);
else
nargs = 0;
size = 10 + (nargs * 10);
sigbuffer_init (&buf, 32);
/* Encode calling convention */
/* Change Any to Standard */
if ((helper->call_conv & 0x03) == 0x03)
helper->call_conv = 0x01;
/* explicit_this implies has_this */
if (helper->call_conv & 0x40)
helper->call_conv &= 0x20;
if (helper->call_conv == 0) { /* Unmanaged */
idx = helper->unmanaged_call_conv - 1;
} else {
/* Managed */
idx = helper->call_conv & 0x60; /* has_this + explicit_this */
if (helper->call_conv & 0x02) /* varargs */
idx += 0x05;
}
sigbuffer_add_byte (&buf, idx);
sigbuffer_add_value (&buf, nargs);
encode_reflection_type (assembly, helper->return_type, &buf);
for (i = 0; i < nargs; ++i) {
MonoArray *modreqs = NULL;
MonoArray *modopts = NULL;
MonoReflectionType *pt;
if (helper->modreqs && (i < mono_array_length (helper->modreqs)))
modreqs = mono_array_get (helper->modreqs, MonoArray*, i);
if (helper->modopts && (i < mono_array_length (helper->modopts)))
modopts = mono_array_get (helper->modopts, MonoArray*, i);
encode_custom_modifiers (assembly, modreqs, modopts, &buf);
pt = mono_array_get (helper->arguments, MonoReflectionType*, i);
encode_reflection_type (assembly, pt, &buf);
}
idx = sigbuffer_add_to_blob_cached (assembly, &buf);
sigbuffer_free (&buf);
return idx;
}
static guint32
mono_image_get_sighelper_token (MonoDynamicImage *assembly, MonoReflectionSigHelper *helper)
{
guint32 idx;
MonoDynamicTable *table;
guint32 *values;
table = &assembly->tables [MONO_TABLE_STANDALONESIG];
idx = table->next_idx ++;
table->rows ++;
alloc_table (table, table->rows);
values = table->values + idx * MONO_STAND_ALONE_SIGNATURE_SIZE;
values [MONO_STAND_ALONE_SIGNATURE] =
mono_reflection_encode_sighelper (assembly, helper);
return idx;
}
static int
reflection_cc_to_file (int call_conv) {
switch (call_conv & 0x3) {
case 0:
case 1: return MONO_CALL_DEFAULT;
case 2: return MONO_CALL_VARARG;
default:
g_assert_not_reached ();
}
return 0;
}
#endif /* !DISABLE_REFLECTION_EMIT */
typedef struct {
MonoType *parent;
MonoMethodSignature *sig;
char *name;
guint32 token;
} ArrayMethod;
#ifndef DISABLE_REFLECTION_EMIT
static guint32
mono_image_get_array_token (MonoDynamicImage *assembly, MonoReflectionArrayMethod *m)
{
guint32 nparams, i;
GList *tmp;
char *name;
MonoMethodSignature *sig;
ArrayMethod *am;
MonoType *mtype;
name = mono_string_to_utf8 (m->name);
nparams = mono_array_length (m->parameters);
sig = g_malloc0 (MONO_SIZEOF_METHOD_SIGNATURE + sizeof (MonoType*) * nparams);
sig->hasthis = 1;
sig->sentinelpos = -1;
sig->call_convention = reflection_cc_to_file (m->call_conv);
sig->param_count = nparams;
sig->ret = m->ret ? mono_reflection_type_get_handle (m->ret): &mono_defaults.void_class->byval_arg;
mtype = mono_reflection_type_get_handle (m->parent);
for (i = 0; i < nparams; ++i)
sig->params [i] = mono_type_array_get_and_resolve (m->parameters, i);
for (tmp = assembly->array_methods; tmp; tmp = tmp->next) {
am = tmp->data;
if (strcmp (name, am->name) == 0 &&
mono_metadata_type_equal (am->parent, mtype) &&
mono_metadata_signature_equal (am->sig, sig)) {
g_free (name);
g_free (sig);
m->table_idx = am->token & 0xffffff;
return am->token;
}
}
am = g_new0 (ArrayMethod, 1);
am->name = name;
am->sig = sig;
am->parent = mtype;
am->token = mono_image_get_memberref_token (assembly, am->parent, name,
method_encode_signature (assembly, sig));
assembly->array_methods = g_list_prepend (assembly->array_methods, am);
m->table_idx = am->token & 0xffffff;
return am->token;
}
/*
* Insert into the metadata tables all the info about the TypeBuilder tb.
* Data in the tables is inserted in a predefined order, since some tables need to be sorted.
*/
static void
mono_image_get_type_info (MonoDomain *domain, MonoReflectionTypeBuilder *tb, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint *values;
int i, is_object = 0, is_system = 0;
char *n;
table = &assembly->tables [MONO_TABLE_TYPEDEF];
values = table->values + tb->table_idx * MONO_TYPEDEF_SIZE;
values [MONO_TYPEDEF_FLAGS] = tb->attrs;
n = mono_string_to_utf8 (tb->name);
if (strcmp (n, "Object") == 0)
is_object++;
values [MONO_TYPEDEF_NAME] = string_heap_insert (&assembly->sheap, n);
g_free (n);
n = mono_string_to_utf8 (tb->nspace);
if (strcmp (n, "System") == 0)
is_system++;
values [MONO_TYPEDEF_NAMESPACE] = string_heap_insert (&assembly->sheap, n);
g_free (n);
if (tb->parent && !(is_system && is_object) &&
!(tb->attrs & TYPE_ATTRIBUTE_INTERFACE)) { /* interfaces don't have a parent */
values [MONO_TYPEDEF_EXTENDS] = mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle ((MonoReflectionType*)tb->parent));
} else {
values [MONO_TYPEDEF_EXTENDS] = 0;
}
values [MONO_TYPEDEF_FIELD_LIST] = assembly->tables [MONO_TABLE_FIELD].next_idx;
values [MONO_TYPEDEF_METHOD_LIST] = assembly->tables [MONO_TABLE_METHOD].next_idx;
/*
* if we have explicitlayout or sequentiallayouts, output data in the
* ClassLayout table.
*/
if (((tb->attrs & TYPE_ATTRIBUTE_LAYOUT_MASK) != TYPE_ATTRIBUTE_AUTO_LAYOUT) &&
((tb->class_size > 0) || (tb->packing_size > 0))) {
table = &assembly->tables [MONO_TABLE_CLASSLAYOUT];
table->rows++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_CLASS_LAYOUT_SIZE;
values [MONO_CLASS_LAYOUT_PARENT] = tb->table_idx;
values [MONO_CLASS_LAYOUT_CLASS_SIZE] = tb->class_size;
values [MONO_CLASS_LAYOUT_PACKING_SIZE] = tb->packing_size;
}
/* handle interfaces */
if (tb->interfaces) {
table = &assembly->tables [MONO_TABLE_INTERFACEIMPL];
i = table->rows;
table->rows += mono_array_length (tb->interfaces);
alloc_table (table, table->rows);
values = table->values + (i + 1) * MONO_INTERFACEIMPL_SIZE;
for (i = 0; i < mono_array_length (tb->interfaces); ++i) {
MonoReflectionType* iface = (MonoReflectionType*) mono_array_get (tb->interfaces, gpointer, i);
values [MONO_INTERFACEIMPL_CLASS] = tb->table_idx;
values [MONO_INTERFACEIMPL_INTERFACE] = mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle (iface));
values += MONO_INTERFACEIMPL_SIZE;
}
}
/* handle fields */
if (tb->fields) {
table = &assembly->tables [MONO_TABLE_FIELD];
table->rows += tb->num_fields;
alloc_table (table, table->rows);
for (i = 0; i < tb->num_fields; ++i)
mono_image_get_field_info (
mono_array_get (tb->fields, MonoReflectionFieldBuilder*, i), assembly);
}
/* handle constructors */
if (tb->ctors) {
table = &assembly->tables [MONO_TABLE_METHOD];
table->rows += mono_array_length (tb->ctors);
alloc_table (table, table->rows);
for (i = 0; i < mono_array_length (tb->ctors); ++i)
mono_image_get_ctor_info (domain,
mono_array_get (tb->ctors, MonoReflectionCtorBuilder*, i), assembly);
}
/* handle methods */
if (tb->methods) {
table = &assembly->tables [MONO_TABLE_METHOD];
table->rows += tb->num_methods;
alloc_table (table, table->rows);
for (i = 0; i < tb->num_methods; ++i)
mono_image_get_method_info (
mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i), assembly);
}
/* Do the same with properties etc.. */
if (tb->events && mono_array_length (tb->events)) {
table = &assembly->tables [MONO_TABLE_EVENT];
table->rows += mono_array_length (tb->events);
alloc_table (table, table->rows);
table = &assembly->tables [MONO_TABLE_EVENTMAP];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_EVENT_MAP_SIZE;
values [MONO_EVENT_MAP_PARENT] = tb->table_idx;
values [MONO_EVENT_MAP_EVENTLIST] = assembly->tables [MONO_TABLE_EVENT].next_idx;
for (i = 0; i < mono_array_length (tb->events); ++i)
mono_image_get_event_info (
mono_array_get (tb->events, MonoReflectionEventBuilder*, i), assembly);
}
if (tb->properties && mono_array_length (tb->properties)) {
table = &assembly->tables [MONO_TABLE_PROPERTY];
table->rows += mono_array_length (tb->properties);
alloc_table (table, table->rows);
table = &assembly->tables [MONO_TABLE_PROPERTYMAP];
table->rows ++;
alloc_table (table, table->rows);
values = table->values + table->rows * MONO_PROPERTY_MAP_SIZE;
values [MONO_PROPERTY_MAP_PARENT] = tb->table_idx;
values [MONO_PROPERTY_MAP_PROPERTY_LIST] = assembly->tables [MONO_TABLE_PROPERTY].next_idx;
for (i = 0; i < mono_array_length (tb->properties); ++i)
mono_image_get_property_info (
mono_array_get (tb->properties, MonoReflectionPropertyBuilder*, i), assembly);
}
/* handle generic parameters */
if (tb->generic_params) {
table = &assembly->tables [MONO_TABLE_GENERICPARAM];
table->rows += mono_array_length (tb->generic_params);
alloc_table (table, table->rows);
for (i = 0; i < mono_array_length (tb->generic_params); ++i) {
guint32 owner = MONO_TYPEORMETHOD_TYPE | (tb->table_idx << MONO_TYPEORMETHOD_BITS);
mono_image_get_generic_param_info (
mono_array_get (tb->generic_params, MonoReflectionGenericParam*, i), owner, assembly);
}
}
mono_image_add_decl_security (assembly,
mono_metadata_make_token (MONO_TABLE_TYPEDEF, tb->table_idx), tb->permissions);
if (tb->subtypes) {
MonoDynamicTable *ntable;
ntable = &assembly->tables [MONO_TABLE_NESTEDCLASS];
ntable->rows += mono_array_length (tb->subtypes);
alloc_table (ntable, ntable->rows);
values = ntable->values + ntable->next_idx * MONO_NESTED_CLASS_SIZE;
for (i = 0; i < mono_array_length (tb->subtypes); ++i) {
MonoReflectionTypeBuilder *subtype = mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i);
values [MONO_NESTED_CLASS_NESTED] = subtype->table_idx;
values [MONO_NESTED_CLASS_ENCLOSING] = tb->table_idx;
/*g_print ("nesting %s (%d) in %s (%d) (rows %d/%d)\n",
mono_string_to_utf8 (subtype->name), subtype->table_idx,
mono_string_to_utf8 (tb->name), tb->table_idx,
ntable->next_idx, ntable->rows);*/
values += MONO_NESTED_CLASS_SIZE;
ntable->next_idx++;
}
}
}
#endif
static void
collect_types (MonoPtrArray *types, MonoReflectionTypeBuilder *type)
{
int i;
mono_ptr_array_append (*types, type);
if (!type->subtypes)
return;
for (i = 0; i < mono_array_length (type->subtypes); ++i) {
MonoReflectionTypeBuilder *subtype = mono_array_get (type->subtypes, MonoReflectionTypeBuilder*, i);
collect_types (types, subtype);
}
}
static gint
compare_types_by_table_idx (MonoReflectionTypeBuilder **type1, MonoReflectionTypeBuilder **type2)
{
if ((*type1)->table_idx < (*type2)->table_idx)
return -1;
else
if ((*type1)->table_idx > (*type2)->table_idx)
return 1;
else
return 0;
}
static void
params_add_cattrs (MonoDynamicImage *assembly, MonoArray *pinfo) {
int i;
if (!pinfo)
return;
for (i = 0; i < mono_array_length (pinfo); ++i) {
MonoReflectionParamBuilder *pb;
pb = mono_array_get (pinfo, MonoReflectionParamBuilder *, i);
if (!pb)
continue;
mono_image_add_cattrs (assembly, pb->table_idx, MONO_CUSTOM_ATTR_PARAMDEF, pb->cattrs);
}
}
static void
type_add_cattrs (MonoDynamicImage *assembly, MonoReflectionTypeBuilder *tb) {
int i;
mono_image_add_cattrs (assembly, tb->table_idx, MONO_CUSTOM_ATTR_TYPEDEF, tb->cattrs);
if (tb->fields) {
for (i = 0; i < tb->num_fields; ++i) {
MonoReflectionFieldBuilder* fb;
fb = mono_array_get (tb->fields, MonoReflectionFieldBuilder*, i);
mono_image_add_cattrs (assembly, fb->table_idx, MONO_CUSTOM_ATTR_FIELDDEF, fb->cattrs);
}
}
if (tb->events) {
for (i = 0; i < mono_array_length (tb->events); ++i) {
MonoReflectionEventBuilder* eb;
eb = mono_array_get (tb->events, MonoReflectionEventBuilder*, i);
mono_image_add_cattrs (assembly, eb->table_idx, MONO_CUSTOM_ATTR_EVENT, eb->cattrs);
}
}
if (tb->properties) {
for (i = 0; i < mono_array_length (tb->properties); ++i) {
MonoReflectionPropertyBuilder* pb;
pb = mono_array_get (tb->properties, MonoReflectionPropertyBuilder*, i);
mono_image_add_cattrs (assembly, pb->table_idx, MONO_CUSTOM_ATTR_PROPERTY, pb->cattrs);
}
}
if (tb->ctors) {
for (i = 0; i < mono_array_length (tb->ctors); ++i) {
MonoReflectionCtorBuilder* cb;
cb = mono_array_get (tb->ctors, MonoReflectionCtorBuilder*, i);
mono_image_add_cattrs (assembly, cb->table_idx, MONO_CUSTOM_ATTR_METHODDEF, cb->cattrs);
params_add_cattrs (assembly, cb->pinfo);
}
}
if (tb->methods) {
for (i = 0; i < tb->num_methods; ++i) {
MonoReflectionMethodBuilder* mb;
mb = mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i);
mono_image_add_cattrs (assembly, mb->table_idx, MONO_CUSTOM_ATTR_METHODDEF, mb->cattrs);
params_add_cattrs (assembly, mb->pinfo);
}
}
if (tb->subtypes) {
for (i = 0; i < mono_array_length (tb->subtypes); ++i)
type_add_cattrs (assembly, mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i));
}
}
static void
module_add_cattrs (MonoDynamicImage *assembly, MonoReflectionModuleBuilder *moduleb)
{
int i;
mono_image_add_cattrs (assembly, moduleb->table_idx, MONO_CUSTOM_ATTR_MODULE, moduleb->cattrs);
if (moduleb->global_methods) {
for (i = 0; i < mono_array_length (moduleb->global_methods); ++i) {
MonoReflectionMethodBuilder* mb = mono_array_get (moduleb->global_methods, MonoReflectionMethodBuilder*, i);
mono_image_add_cattrs (assembly, mb->table_idx, MONO_CUSTOM_ATTR_METHODDEF, mb->cattrs);
params_add_cattrs (assembly, mb->pinfo);
}
}
if (moduleb->global_fields) {
for (i = 0; i < mono_array_length (moduleb->global_fields); ++i) {
MonoReflectionFieldBuilder *fb = mono_array_get (moduleb->global_fields, MonoReflectionFieldBuilder*, i);
mono_image_add_cattrs (assembly, fb->table_idx, MONO_CUSTOM_ATTR_FIELDDEF, fb->cattrs);
}
}
if (moduleb->types) {
for (i = 0; i < moduleb->num_types; ++i)
type_add_cattrs (assembly, mono_array_get (moduleb->types, MonoReflectionTypeBuilder*, i));
}
}
static void
mono_image_fill_file_table (MonoDomain *domain, MonoReflectionModule *module, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
char blob_size [6];
guchar hash [20];
char *b = blob_size;
char *dir, *path;
table = &assembly->tables [MONO_TABLE_FILE];
table->rows++;
alloc_table (table, table->rows);
values = table->values + table->next_idx * MONO_FILE_SIZE;
values [MONO_FILE_FLAGS] = FILE_CONTAINS_METADATA;
values [MONO_FILE_NAME] = string_heap_insert (&assembly->sheap, module->image->module_name);
if (module->image->dynamic) {
/* This depends on the fact that the main module is emitted last */
dir = mono_string_to_utf8 (((MonoReflectionModuleBuilder*)module)->assemblyb->dir);
path = g_strdup_printf ("%s%c%s", dir, G_DIR_SEPARATOR, module->image->module_name);
} else {
dir = NULL;
path = g_strdup (module->image->name);
}
mono_sha1_get_digest_from_file (path, hash);
g_free (dir);
g_free (path);
mono_metadata_encode_value (20, b, &b);
values [MONO_FILE_HASH_VALUE] = mono_image_add_stream_data (&assembly->blob, blob_size, b-blob_size);
mono_image_add_stream_data (&assembly->blob, (char*)hash, 20);
table->next_idx ++;
}
static void
mono_image_fill_module_table (MonoDomain *domain, MonoReflectionModuleBuilder *mb, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
int i;
table = &assembly->tables [MONO_TABLE_MODULE];
mb->table_idx = table->next_idx ++;
table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_NAME] = string_heap_insert_mstring (&assembly->sheap, mb->module.name);
i = mono_image_add_stream_data (&assembly->guid, mono_array_addr (mb->guid, char, 0), 16);
i /= 16;
++i;
table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_GENERATION] = 0;
table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_MVID] = i;
table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_ENC] = 0;
table->values [mb->table_idx * MONO_MODULE_SIZE + MONO_MODULE_ENCBASE] = 0;
}
static guint32
mono_image_fill_export_table_from_class (MonoDomain *domain, MonoClass *klass,
guint32 module_index, guint32 parent_index, MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
guint32 visib, res;
visib = klass->flags & TYPE_ATTRIBUTE_VISIBILITY_MASK;
if (! ((visib & TYPE_ATTRIBUTE_PUBLIC) || (visib & TYPE_ATTRIBUTE_NESTED_PUBLIC)))
return 0;
table = &assembly->tables [MONO_TABLE_EXPORTEDTYPE];
table->rows++;
alloc_table (table, table->rows);
values = table->values + table->next_idx * MONO_EXP_TYPE_SIZE;
values [MONO_EXP_TYPE_FLAGS] = klass->flags;
values [MONO_EXP_TYPE_TYPEDEF] = klass->type_token;
if (klass->nested_in)
values [MONO_EXP_TYPE_IMPLEMENTATION] = (parent_index << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_EXP_TYPE;
else
values [MONO_EXP_TYPE_IMPLEMENTATION] = (module_index << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_FILE;
values [MONO_EXP_TYPE_NAME] = string_heap_insert (&assembly->sheap, klass->name);
values [MONO_EXP_TYPE_NAMESPACE] = string_heap_insert (&assembly->sheap, klass->name_space);
res = table->next_idx;
table->next_idx ++;
/* Emit nested types */
if (klass->ext && klass->ext->nested_classes) {
GList *tmp;
for (tmp = klass->ext->nested_classes; tmp; tmp = tmp->next)
mono_image_fill_export_table_from_class (domain, tmp->data, module_index, table->next_idx - 1, assembly);
}
return res;
}
static void
mono_image_fill_export_table (MonoDomain *domain, MonoReflectionTypeBuilder *tb,
guint32 module_index, guint32 parent_index, MonoDynamicImage *assembly)
{
MonoClass *klass;
guint32 idx, i;
klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb));
klass->type_token = mono_metadata_make_token (MONO_TABLE_TYPEDEF, tb->table_idx);
idx = mono_image_fill_export_table_from_class (domain, klass, module_index,
parent_index, assembly);
/*
* Emit nested types
* We need to do this ourselves since klass->nested_classes is not set up.
*/
if (tb->subtypes) {
for (i = 0; i < mono_array_length (tb->subtypes); ++i)
mono_image_fill_export_table (domain, mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i), module_index, idx, assembly);
}
}
static void
mono_image_fill_export_table_from_module (MonoDomain *domain, MonoReflectionModule *module,
guint32 module_index, MonoDynamicImage *assembly)
{
MonoImage *image = module->image;
MonoTableInfo *t;
guint32 i;
t = &image->tables [MONO_TABLE_TYPEDEF];
for (i = 0; i < t->rows; ++i) {
MonoClass *klass = mono_class_get (image, mono_metadata_make_token (MONO_TABLE_TYPEDEF, i + 1));
if (klass->flags & TYPE_ATTRIBUTE_PUBLIC)
mono_image_fill_export_table_from_class (domain, klass, module_index, 0, assembly);
}
}
static void
add_exported_type (MonoReflectionAssemblyBuilder *assemblyb, MonoDynamicImage *assembly, MonoClass *klass, guint32 parent_index)
{
MonoDynamicTable *table;
guint32 *values;
guint32 scope, scope_idx, impl, current_idx;
gboolean forwarder = TRUE;
gpointer iter = NULL;
MonoClass *nested;
if (klass->nested_in) {
impl = (parent_index << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_EXP_TYPE;
forwarder = FALSE;
} else {
scope = resolution_scope_from_image (assembly, klass->image);
g_assert ((scope & MONO_RESOLTION_SCOPE_MASK) == MONO_RESOLTION_SCOPE_ASSEMBLYREF);
scope_idx = scope >> MONO_RESOLTION_SCOPE_BITS;
impl = (scope_idx << MONO_IMPLEMENTATION_BITS) + MONO_IMPLEMENTATION_ASSEMBLYREF;
}
table = &assembly->tables [MONO_TABLE_EXPORTEDTYPE];
table->rows++;
alloc_table (table, table->rows);
current_idx = table->next_idx;
values = table->values + current_idx * MONO_EXP_TYPE_SIZE;
values [MONO_EXP_TYPE_FLAGS] = forwarder ? TYPE_ATTRIBUTE_FORWARDER : 0;
values [MONO_EXP_TYPE_TYPEDEF] = 0;
values [MONO_EXP_TYPE_IMPLEMENTATION] = impl;
values [MONO_EXP_TYPE_NAME] = string_heap_insert (&assembly->sheap, klass->name);
values [MONO_EXP_TYPE_NAMESPACE] = string_heap_insert (&assembly->sheap, klass->name_space);
table->next_idx++;
while ((nested = mono_class_get_nested_types (klass, &iter)))
add_exported_type (assemblyb, assembly, nested, current_idx);
}
static void
mono_image_fill_export_table_from_type_forwarders (MonoReflectionAssemblyBuilder *assemblyb, MonoDynamicImage *assembly)
{
MonoClass *klass;
int i;
if (!assemblyb->type_forwarders)
return;
for (i = 0; i < mono_array_length (assemblyb->type_forwarders); ++i) {
MonoReflectionType *t = mono_array_get (assemblyb->type_forwarders, MonoReflectionType *, i);
MonoType *type;
if (!t)
continue;
type = mono_reflection_type_get_handle (t);
g_assert (type);
klass = mono_class_from_mono_type (type);
add_exported_type (assemblyb, assembly, klass, 0);
}
}
#define align_pointer(base,p)\
do {\
guint32 __diff = (unsigned char*)(p)-(unsigned char*)(base);\
if (__diff & 3)\
(p) += 4 - (__diff & 3);\
} while (0)
static int
compare_constants (const void *a, const void *b)
{
const guint32 *a_values = a;
const guint32 *b_values = b;
return a_values [MONO_CONSTANT_PARENT] - b_values [MONO_CONSTANT_PARENT];
}
static int
compare_semantics (const void *a, const void *b)
{
const guint32 *a_values = a;
const guint32 *b_values = b;
int assoc = a_values [MONO_METHOD_SEMA_ASSOCIATION] - b_values [MONO_METHOD_SEMA_ASSOCIATION];
if (assoc)
return assoc;
return a_values [MONO_METHOD_SEMA_SEMANTICS] - b_values [MONO_METHOD_SEMA_SEMANTICS];
}
static int
compare_custom_attrs (const void *a, const void *b)
{
const guint32 *a_values = a;
const guint32 *b_values = b;
return a_values [MONO_CUSTOM_ATTR_PARENT] - b_values [MONO_CUSTOM_ATTR_PARENT];
}
static int
compare_field_marshal (const void *a, const void *b)
{
const guint32 *a_values = a;
const guint32 *b_values = b;
return a_values [MONO_FIELD_MARSHAL_PARENT] - b_values [MONO_FIELD_MARSHAL_PARENT];
}
static int
compare_nested (const void *a, const void *b)
{
const guint32 *a_values = a;
const guint32 *b_values = b;
return a_values [MONO_NESTED_CLASS_NESTED] - b_values [MONO_NESTED_CLASS_NESTED];
}
static int
compare_genericparam (const void *a, const void *b)
{
const GenericParamTableEntry **a_entry = (const GenericParamTableEntry **) a;
const GenericParamTableEntry **b_entry = (const GenericParamTableEntry **) b;
if ((*b_entry)->owner == (*a_entry)->owner)
return
mono_type_get_generic_param_num (mono_reflection_type_get_handle ((MonoReflectionType*)(*a_entry)->gparam)) -
mono_type_get_generic_param_num (mono_reflection_type_get_handle ((MonoReflectionType*)(*b_entry)->gparam));
else
return (*a_entry)->owner - (*b_entry)->owner;
}
static int
compare_declsecurity_attrs (const void *a, const void *b)
{
const guint32 *a_values = a;
const guint32 *b_values = b;
return a_values [MONO_DECL_SECURITY_PARENT] - b_values [MONO_DECL_SECURITY_PARENT];
}
static int
compare_interface_impl (const void *a, const void *b)
{
const guint32 *a_values = a;
const guint32 *b_values = b;
int klass = a_values [MONO_INTERFACEIMPL_CLASS] - b_values [MONO_INTERFACEIMPL_CLASS];
if (klass)
return klass;
return a_values [MONO_INTERFACEIMPL_INTERFACE] - b_values [MONO_INTERFACEIMPL_INTERFACE];
}
static void
pad_heap (MonoDynamicStream *sh)
{
if (sh->index & 3) {
int sz = 4 - (sh->index & 3);
memset (sh->data + sh->index, 0, sz);
sh->index += sz;
}
}
struct StreamDesc {
const char *name;
MonoDynamicStream *stream;
};
/*
* build_compressed_metadata() fills in the blob of data that represents the
* raw metadata as it will be saved in the PE file. The five streams are output
* and the metadata tables are comnpressed from the guint32 array representation,
* to the compressed on-disk format.
*/
static void
build_compressed_metadata (MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
int i;
guint64 valid_mask = 0;
guint64 sorted_mask;
guint32 heapt_size = 0;
guint32 meta_size = 256; /* allow for header and other stuff */
guint32 table_offset;
guint32 ntables = 0;
guint64 *int64val;
guint32 *int32val;
guint16 *int16val;
MonoImage *meta;
unsigned char *p;
struct StreamDesc stream_desc [5];
qsort (assembly->gen_params->pdata, assembly->gen_params->len, sizeof (gpointer), compare_genericparam);
for (i = 0; i < assembly->gen_params->len; i++){
GenericParamTableEntry *entry = g_ptr_array_index (assembly->gen_params, i);
write_generic_param_entry (assembly, entry);
}
stream_desc [0].name = "#~";
stream_desc [0].stream = &assembly->tstream;
stream_desc [1].name = "#Strings";
stream_desc [1].stream = &assembly->sheap;
stream_desc [2].name = "#US";
stream_desc [2].stream = &assembly->us;
stream_desc [3].name = "#Blob";
stream_desc [3].stream = &assembly->blob;
stream_desc [4].name = "#GUID";
stream_desc [4].stream = &assembly->guid;
/* tables that are sorted */
sorted_mask = ((guint64)1 << MONO_TABLE_CONSTANT) | ((guint64)1 << MONO_TABLE_FIELDMARSHAL)
| ((guint64)1 << MONO_TABLE_METHODSEMANTICS) | ((guint64)1 << MONO_TABLE_CLASSLAYOUT)
| ((guint64)1 << MONO_TABLE_FIELDLAYOUT) | ((guint64)1 << MONO_TABLE_FIELDRVA)
| ((guint64)1 << MONO_TABLE_IMPLMAP) | ((guint64)1 << MONO_TABLE_NESTEDCLASS)
| ((guint64)1 << MONO_TABLE_METHODIMPL) | ((guint64)1 << MONO_TABLE_CUSTOMATTRIBUTE)
| ((guint64)1 << MONO_TABLE_DECLSECURITY) | ((guint64)1 << MONO_TABLE_GENERICPARAM)
| ((guint64)1 << MONO_TABLE_INTERFACEIMPL);
/* Compute table sizes */
/* the MonoImage has already been created in mono_image_basic_init() */
meta = &assembly->image;
/* sizes should be multiple of 4 */
pad_heap (&assembly->blob);
pad_heap (&assembly->guid);
pad_heap (&assembly->sheap);
pad_heap (&assembly->us);
/* Setup the info used by compute_sizes () */
meta->idx_blob_wide = assembly->blob.index >= 65536 ? 1 : 0;
meta->idx_guid_wide = assembly->guid.index >= 65536 ? 1 : 0;
meta->idx_string_wide = assembly->sheap.index >= 65536 ? 1 : 0;
meta_size += assembly->blob.index;
meta_size += assembly->guid.index;
meta_size += assembly->sheap.index;
meta_size += assembly->us.index;
for (i=0; i < MONO_TABLE_NUM; ++i)
meta->tables [i].rows = assembly->tables [i].rows;
for (i = 0; i < MONO_TABLE_NUM; i++){
if (meta->tables [i].rows == 0)
continue;
valid_mask |= (guint64)1 << i;
ntables ++;
meta->tables [i].row_size = mono_metadata_compute_size (
meta, i, &meta->tables [i].size_bitfield);
heapt_size += meta->tables [i].row_size * meta->tables [i].rows;
}
heapt_size += 24; /* #~ header size */
heapt_size += ntables * 4;
/* make multiple of 4 */
heapt_size += 3;
heapt_size &= ~3;
meta_size += heapt_size;
meta->raw_metadata = g_malloc0 (meta_size);
p = (unsigned char*)meta->raw_metadata;
/* the metadata signature */
*p++ = 'B'; *p++ = 'S'; *p++ = 'J'; *p++ = 'B';
/* version numbers and 4 bytes reserved */
int16val = (guint16*)p;
*int16val++ = GUINT16_TO_LE (meta->md_version_major);
*int16val = GUINT16_TO_LE (meta->md_version_minor);
p += 8;
/* version string */
int32val = (guint32*)p;
*int32val = GUINT32_TO_LE ((strlen (meta->version) + 3) & (~3)); /* needs to be multiple of 4 */
p += 4;
memcpy (p, meta->version, strlen (meta->version));
p += GUINT32_FROM_LE (*int32val);
align_pointer (meta->raw_metadata, p);
int16val = (guint16*)p;
*int16val++ = GUINT16_TO_LE (0); /* flags must be 0 */
*int16val = GUINT16_TO_LE (5); /* number of streams */
p += 4;
/*
* write the stream info.
*/
table_offset = (p - (unsigned char*)meta->raw_metadata) + 5 * 8 + 40; /* room needed for stream headers */
table_offset += 3; table_offset &= ~3;
assembly->tstream.index = heapt_size;
for (i = 0; i < 5; ++i) {
int32val = (guint32*)p;
stream_desc [i].stream->offset = table_offset;
*int32val++ = GUINT32_TO_LE (table_offset);
*int32val = GUINT32_TO_LE (stream_desc [i].stream->index);
table_offset += GUINT32_FROM_LE (*int32val);
table_offset += 3; table_offset &= ~3;
p += 8;
strcpy ((char*)p, stream_desc [i].name);
p += strlen (stream_desc [i].name) + 1;
align_pointer (meta->raw_metadata, p);
}
/*
* now copy the data, the table stream header and contents goes first.
*/
g_assert ((p - (unsigned char*)meta->raw_metadata) < assembly->tstream.offset);
p = (guchar*)meta->raw_metadata + assembly->tstream.offset;
int32val = (guint32*)p;
*int32val = GUINT32_TO_LE (0); /* reserved */
p += 4;
*p++ = 2; /* version */
*p++ = 0;
if (meta->idx_string_wide)
*p |= 0x01;
if (meta->idx_guid_wide)
*p |= 0x02;
if (meta->idx_blob_wide)
*p |= 0x04;
++p;
*p++ = 1; /* reserved */
int64val = (guint64*)p;
*int64val++ = GUINT64_TO_LE (valid_mask);
*int64val++ = GUINT64_TO_LE (valid_mask & sorted_mask); /* bitvector of sorted tables */
p += 16;
int32val = (guint32*)p;
for (i = 0; i < MONO_TABLE_NUM; i++){
if (meta->tables [i].rows == 0)
continue;
*int32val++ = GUINT32_TO_LE (meta->tables [i].rows);
}
p = (unsigned char*)int32val;
/* sort the tables that still need sorting */
table = &assembly->tables [MONO_TABLE_CONSTANT];
if (table->rows)
qsort (table->values + MONO_CONSTANT_SIZE, table->rows, sizeof (guint32) * MONO_CONSTANT_SIZE, compare_constants);
table = &assembly->tables [MONO_TABLE_METHODSEMANTICS];
if (table->rows)
qsort (table->values + MONO_METHOD_SEMA_SIZE, table->rows, sizeof (guint32) * MONO_METHOD_SEMA_SIZE, compare_semantics);
table = &assembly->tables [MONO_TABLE_CUSTOMATTRIBUTE];
if (table->rows)
qsort (table->values + MONO_CUSTOM_ATTR_SIZE, table->rows, sizeof (guint32) * MONO_CUSTOM_ATTR_SIZE, compare_custom_attrs);
table = &assembly->tables [MONO_TABLE_FIELDMARSHAL];
if (table->rows)
qsort (table->values + MONO_FIELD_MARSHAL_SIZE, table->rows, sizeof (guint32) * MONO_FIELD_MARSHAL_SIZE, compare_field_marshal);
table = &assembly->tables [MONO_TABLE_NESTEDCLASS];
if (table->rows)
qsort (table->values + MONO_NESTED_CLASS_SIZE, table->rows, sizeof (guint32) * MONO_NESTED_CLASS_SIZE, compare_nested);
/* Section 21.11 DeclSecurity in Partition II doesn't specify this to be sorted by MS implementation requires it */
table = &assembly->tables [MONO_TABLE_DECLSECURITY];
if (table->rows)
qsort (table->values + MONO_DECL_SECURITY_SIZE, table->rows, sizeof (guint32) * MONO_DECL_SECURITY_SIZE, compare_declsecurity_attrs);
table = &assembly->tables [MONO_TABLE_INTERFACEIMPL];
if (table->rows)
qsort (table->values + MONO_INTERFACEIMPL_SIZE, table->rows, sizeof (guint32) * MONO_INTERFACEIMPL_SIZE, compare_interface_impl);
/* compress the tables */
for (i = 0; i < MONO_TABLE_NUM; i++){
int row, col;
guint32 *values;
guint32 bitfield = meta->tables [i].size_bitfield;
if (!meta->tables [i].rows)
continue;
if (assembly->tables [i].columns != mono_metadata_table_count (bitfield))
g_error ("col count mismatch in %d: %d %d", i, assembly->tables [i].columns, mono_metadata_table_count (bitfield));
meta->tables [i].base = (char*)p;
for (row = 1; row <= meta->tables [i].rows; ++row) {
values = assembly->tables [i].values + row * assembly->tables [i].columns;
for (col = 0; col < assembly->tables [i].columns; ++col) {
switch (mono_metadata_table_size (bitfield, col)) {
case 1:
*p++ = values [col];
break;
case 2:
*p++ = values [col] & 0xff;
*p++ = (values [col] >> 8) & 0xff;
break;
case 4:
*p++ = values [col] & 0xff;
*p++ = (values [col] >> 8) & 0xff;
*p++ = (values [col] >> 16) & 0xff;
*p++ = (values [col] >> 24) & 0xff;
break;
default:
g_assert_not_reached ();
}
}
}
g_assert ((p - (const unsigned char*)meta->tables [i].base) == (meta->tables [i].rows * meta->tables [i].row_size));
}
g_assert (assembly->guid.offset + assembly->guid.index < meta_size);
memcpy (meta->raw_metadata + assembly->sheap.offset, assembly->sheap.data, assembly->sheap.index);
memcpy (meta->raw_metadata + assembly->us.offset, assembly->us.data, assembly->us.index);
memcpy (meta->raw_metadata + assembly->blob.offset, assembly->blob.data, assembly->blob.index);
memcpy (meta->raw_metadata + assembly->guid.offset, assembly->guid.data, assembly->guid.index);
assembly->meta_size = assembly->guid.offset + assembly->guid.index;
}
/*
* Some tables in metadata need to be sorted according to some criteria, but
* when methods and fields are first created with reflection, they may be assigned a token
* that doesn't correspond to the final token they will get assigned after the sorting.
* ILGenerator.cs keeps a fixup table that maps the position of tokens in the IL code stream
* with the reflection objects that represent them. Once all the tables are set up, the
* reflection objects will contains the correct table index. fixup_method() will fixup the
* tokens for the method with ILGenerator @ilgen.
*/
static void
fixup_method (MonoReflectionILGen *ilgen, gpointer value, MonoDynamicImage *assembly)
{
guint32 code_idx = GPOINTER_TO_UINT (value);
MonoReflectionILTokenInfo *iltoken;
MonoReflectionFieldBuilder *field;
MonoReflectionCtorBuilder *ctor;
MonoReflectionMethodBuilder *method;
MonoReflectionTypeBuilder *tb;
MonoReflectionArrayMethod *am;
guint32 i, idx = 0;
unsigned char *target;
for (i = 0; i < ilgen->num_token_fixups; ++i) {
iltoken = (MonoReflectionILTokenInfo *)mono_array_addr_with_size (ilgen->token_fixups, sizeof (MonoReflectionILTokenInfo), i);
target = (guchar*)assembly->code.data + code_idx + iltoken->code_pos;
switch (target [3]) {
case MONO_TABLE_FIELD:
if (!strcmp (iltoken->member->vtable->klass->name, "FieldBuilder")) {
field = (MonoReflectionFieldBuilder *)iltoken->member;
idx = field->table_idx;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MonoField")) {
MonoClassField *f = ((MonoReflectionField*)iltoken->member)->field;
idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->field_to_table_idx, f));
} else {
g_assert_not_reached ();
}
break;
case MONO_TABLE_METHOD:
if (!strcmp (iltoken->member->vtable->klass->name, "MethodBuilder")) {
method = (MonoReflectionMethodBuilder *)iltoken->member;
idx = method->table_idx;
} else if (!strcmp (iltoken->member->vtable->klass->name, "ConstructorBuilder")) {
ctor = (MonoReflectionCtorBuilder *)iltoken->member;
idx = ctor->table_idx;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MonoMethod") ||
!strcmp (iltoken->member->vtable->klass->name, "MonoCMethod")) {
MonoMethod *m = ((MonoReflectionMethod*)iltoken->member)->method;
idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->method_to_table_idx, m));
} else {
g_assert_not_reached ();
}
break;
case MONO_TABLE_TYPEDEF:
if (strcmp (iltoken->member->vtable->klass->name, "TypeBuilder"))
g_assert_not_reached ();
tb = (MonoReflectionTypeBuilder *)iltoken->member;
idx = tb->table_idx;
break;
case MONO_TABLE_MEMBERREF:
if (!strcmp (iltoken->member->vtable->klass->name, "MonoArrayMethod")) {
am = (MonoReflectionArrayMethod*)iltoken->member;
idx = am->table_idx;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MonoMethod") ||
!strcmp (iltoken->member->vtable->klass->name, "MonoCMethod") ||
!strcmp (iltoken->member->vtable->klass->name, "MonoGenericMethod") ||
!strcmp (iltoken->member->vtable->klass->name, "MonoGenericCMethod")) {
MonoMethod *m = ((MonoReflectionMethod*)iltoken->member)->method;
g_assert (m->klass->generic_class || m->klass->generic_container);
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "FieldBuilder")) {
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MonoField")) {
MonoClassField *f = ((MonoReflectionField*)iltoken->member)->field;
g_assert (is_field_on_inst (f));
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MethodBuilder") ||
!strcmp (iltoken->member->vtable->klass->name, "ConstructorBuilder")) {
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "FieldOnTypeBuilderInst")) {
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MethodOnTypeBuilderInst")) {
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "ConstructorOnTypeBuilderInst")) {
continue;
} else {
g_assert_not_reached ();
}
break;
case MONO_TABLE_METHODSPEC:
if (!strcmp (iltoken->member->vtable->klass->name, "MonoGenericMethod")) {
MonoMethod *m = ((MonoReflectionMethod*)iltoken->member)->method;
g_assert (mono_method_signature (m)->generic_param_count);
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MethodBuilder")) {
continue;
} else if (!strcmp (iltoken->member->vtable->klass->name, "MethodOnTypeBuilderInst")) {
continue;
} else {
g_assert_not_reached ();
}
break;
default:
g_error ("got unexpected table 0x%02x in fixup", target [3]);
}
target [0] = idx & 0xff;
target [1] = (idx >> 8) & 0xff;
target [2] = (idx >> 16) & 0xff;
}
}
/*
* fixup_cattrs:
*
* The CUSTOM_ATTRIBUTE table might contain METHODDEF tokens whose final
* value is not known when the table is emitted.
*/
static void
fixup_cattrs (MonoDynamicImage *assembly)
{
MonoDynamicTable *table;
guint32 *values;
guint32 type, i, idx, token;
MonoObject *ctor;
table = &assembly->tables [MONO_TABLE_CUSTOMATTRIBUTE];
for (i = 0; i < table->rows; ++i) {
values = table->values + ((i + 1) * MONO_CUSTOM_ATTR_SIZE);
type = values [MONO_CUSTOM_ATTR_TYPE];
if ((type & MONO_CUSTOM_ATTR_TYPE_MASK) == MONO_CUSTOM_ATTR_TYPE_METHODDEF) {
idx = type >> MONO_CUSTOM_ATTR_TYPE_BITS;
token = mono_metadata_make_token (MONO_TABLE_METHOD, idx);
ctor = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token));
g_assert (ctor);
if (!strcmp (ctor->vtable->klass->name, "MonoCMethod")) {
MonoMethod *m = ((MonoReflectionMethod*)ctor)->method;
idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->method_to_table_idx, m));
values [MONO_CUSTOM_ATTR_TYPE] = (idx << MONO_CUSTOM_ATTR_TYPE_BITS) | MONO_CUSTOM_ATTR_TYPE_METHODDEF;
}
}
}
}
static void
assembly_add_resource_manifest (MonoReflectionModuleBuilder *mb, MonoDynamicImage *assembly, MonoReflectionResource *rsrc, guint32 implementation)
{
MonoDynamicTable *table;
guint32 *values;
table = &assembly->tables [MONO_TABLE_MANIFESTRESOURCE];
table->rows++;
alloc_table (table, table->rows);
values = table->values + table->next_idx * MONO_MANIFEST_SIZE;
values [MONO_MANIFEST_OFFSET] = rsrc->offset;
values [MONO_MANIFEST_FLAGS] = rsrc->attrs;
values [MONO_MANIFEST_NAME] = string_heap_insert_mstring (&assembly->sheap, rsrc->name);
values [MONO_MANIFEST_IMPLEMENTATION] = implementation;
table->next_idx++;
}
static void
assembly_add_resource (MonoReflectionModuleBuilder *mb, MonoDynamicImage *assembly, MonoReflectionResource *rsrc)
{
MonoDynamicTable *table;
guint32 *values;
char blob_size [6];
guchar hash [20];
char *b = blob_size;
char *name, *sname;
guint32 idx, offset;
if (rsrc->filename) {
name = mono_string_to_utf8 (rsrc->filename);
sname = g_path_get_basename (name);
table = &assembly->tables [MONO_TABLE_FILE];
table->rows++;
alloc_table (table, table->rows);
values = table->values + table->next_idx * MONO_FILE_SIZE;
values [MONO_FILE_FLAGS] = FILE_CONTAINS_NO_METADATA;
values [MONO_FILE_NAME] = string_heap_insert (&assembly->sheap, sname);
g_free (sname);
mono_sha1_get_digest_from_file (name, hash);
mono_metadata_encode_value (20, b, &b);
values [MONO_FILE_HASH_VALUE] = mono_image_add_stream_data (&assembly->blob, blob_size, b-blob_size);
mono_image_add_stream_data (&assembly->blob, (char*)hash, 20);
g_free (name);
idx = table->next_idx++;
rsrc->offset = 0;
idx = MONO_IMPLEMENTATION_FILE | (idx << MONO_IMPLEMENTATION_BITS);
} else {
char sizebuf [4];
char *data;
guint len;
if (rsrc->data) {
data = mono_array_addr (rsrc->data, char, 0);
len = mono_array_length (rsrc->data);
} else {
data = NULL;
len = 0;
}
offset = len;
sizebuf [0] = offset; sizebuf [1] = offset >> 8;
sizebuf [2] = offset >> 16; sizebuf [3] = offset >> 24;
rsrc->offset = mono_image_add_stream_data (&assembly->resources, sizebuf, 4);
mono_image_add_stream_data (&assembly->resources, data, len);
if (!mb->is_main)
/*
* The entry should be emitted into the MANIFESTRESOURCE table of
* the main module, but that needs to reference the FILE table
* which isn't emitted yet.
*/
return;
else
idx = 0;
}
assembly_add_resource_manifest (mb, assembly, rsrc, idx);
}
static void
set_version_from_string (MonoString *version, guint32 *values)
{
gchar *ver, *p, *str;
guint32 i;
values [MONO_ASSEMBLY_MAJOR_VERSION] = 0;
values [MONO_ASSEMBLY_MINOR_VERSION] = 0;
values [MONO_ASSEMBLY_REV_NUMBER] = 0;
values [MONO_ASSEMBLY_BUILD_NUMBER] = 0;
if (!version)
return;
ver = str = mono_string_to_utf8 (version);
for (i = 0; i < 4; ++i) {
values [MONO_ASSEMBLY_MAJOR_VERSION + i] = strtol (ver, &p, 10);
switch (*p) {
case '.':
p++;
break;
case '*':
/* handle Revision and Build */
p++;
break;
}
ver = p;
}
g_free (str);
}
static guint32
load_public_key (MonoArray *pkey, MonoDynamicImage *assembly) {
gsize len;
guint32 token = 0;
char blob_size [6];
char *b = blob_size;
if (!pkey)
return token;
len = mono_array_length (pkey);
mono_metadata_encode_value (len, b, &b);
token = mono_image_add_stream_data (&assembly->blob, blob_size, b - blob_size);
mono_image_add_stream_data (&assembly->blob, mono_array_addr (pkey, char, 0), len);
assembly->public_key = g_malloc (len);
memcpy (assembly->public_key, mono_array_addr (pkey, char, 0), len);
assembly->public_key_len = len;
/* Special case: check for ECMA key (16 bytes) */
if ((len == MONO_ECMA_KEY_LENGTH) && mono_is_ecma_key (mono_array_addr (pkey, char, 0), len)) {
/* In this case we must reserve 128 bytes (1024 bits) for the signature */
assembly->strong_name_size = MONO_DEFAULT_PUBLIC_KEY_LENGTH;
} else if (len >= MONO_PUBLIC_KEY_HEADER_LENGTH + MONO_MINIMUM_PUBLIC_KEY_LENGTH) {
/* minimum key size (in 2.0) is 384 bits */
assembly->strong_name_size = len - MONO_PUBLIC_KEY_HEADER_LENGTH;
} else {
/* FIXME - verifier */
g_warning ("Invalid public key length: %d bits (total: %d)", (int)MONO_PUBLIC_KEY_BIT_SIZE (len), (int)len);
assembly->strong_name_size = MONO_DEFAULT_PUBLIC_KEY_LENGTH; /* to be safe */
}
assembly->strong_name = g_malloc0 (assembly->strong_name_size);
return token;
}
static void
mono_image_emit_manifest (MonoReflectionModuleBuilder *moduleb)
{
MonoDynamicTable *table;
MonoDynamicImage *assembly;
MonoReflectionAssemblyBuilder *assemblyb;
MonoDomain *domain;
guint32 *values;
int i;
guint32 module_index;
assemblyb = moduleb->assemblyb;
assembly = moduleb->dynamic_image;
domain = mono_object_domain (assemblyb);
/* Emit ASSEMBLY table */
table = &assembly->tables [MONO_TABLE_ASSEMBLY];
alloc_table (table, 1);
values = table->values + MONO_ASSEMBLY_SIZE;
values [MONO_ASSEMBLY_HASH_ALG] = assemblyb->algid? assemblyb->algid: ASSEMBLY_HASH_SHA1;
values [MONO_ASSEMBLY_NAME] = string_heap_insert_mstring (&assembly->sheap, assemblyb->name);
if (assemblyb->culture) {
values [MONO_ASSEMBLY_CULTURE] = string_heap_insert_mstring (&assembly->sheap, assemblyb->culture);
} else {
values [MONO_ASSEMBLY_CULTURE] = string_heap_insert (&assembly->sheap, "");
}
values [MONO_ASSEMBLY_PUBLIC_KEY] = load_public_key (assemblyb->public_key, assembly);
values [MONO_ASSEMBLY_FLAGS] = assemblyb->flags;
set_version_from_string (assemblyb->version, values);
/* Emit FILE + EXPORTED_TYPE table */
module_index = 0;
for (i = 0; i < mono_array_length (assemblyb->modules); ++i) {
int j;
MonoReflectionModuleBuilder *file_module =
mono_array_get (assemblyb->modules, MonoReflectionModuleBuilder*, i);
if (file_module != moduleb) {
mono_image_fill_file_table (domain, (MonoReflectionModule*)file_module, assembly);
module_index ++;
if (file_module->types) {
for (j = 0; j < file_module->num_types; ++j) {
MonoReflectionTypeBuilder *tb = mono_array_get (file_module->types, MonoReflectionTypeBuilder*, j);
mono_image_fill_export_table (domain, tb, module_index, 0, assembly);
}
}
}
}
if (assemblyb->loaded_modules) {
for (i = 0; i < mono_array_length (assemblyb->loaded_modules); ++i) {
MonoReflectionModule *file_module =
mono_array_get (assemblyb->loaded_modules, MonoReflectionModule*, i);
mono_image_fill_file_table (domain, file_module, assembly);
module_index ++;
mono_image_fill_export_table_from_module (domain, file_module, module_index, assembly);
}
}
if (assemblyb->type_forwarders)
mono_image_fill_export_table_from_type_forwarders (assemblyb, assembly);
/* Emit MANIFESTRESOURCE table */
module_index = 0;
for (i = 0; i < mono_array_length (assemblyb->modules); ++i) {
int j;
MonoReflectionModuleBuilder *file_module =
mono_array_get (assemblyb->modules, MonoReflectionModuleBuilder*, i);
/* The table for the main module is emitted later */
if (file_module != moduleb) {
module_index ++;
if (file_module->resources) {
int len = mono_array_length (file_module->resources);
for (j = 0; j < len; ++j) {
MonoReflectionResource* res = (MonoReflectionResource*)mono_array_addr (file_module->resources, MonoReflectionResource, j);
assembly_add_resource_manifest (file_module, assembly, res, MONO_IMPLEMENTATION_FILE | (module_index << MONO_IMPLEMENTATION_BITS));
}
}
}
}
}
#ifndef DISABLE_REFLECTION_EMIT_SAVE
/*
* mono_image_build_metadata() will fill the info in all the needed metadata tables
* for the modulebuilder @moduleb.
* At the end of the process, method and field tokens are fixed up and the
* on-disk compressed metadata representation is created.
*/
void
mono_image_build_metadata (MonoReflectionModuleBuilder *moduleb)
{
MonoDynamicTable *table;
MonoDynamicImage *assembly;
MonoReflectionAssemblyBuilder *assemblyb;
MonoDomain *domain;
MonoPtrArray types;
guint32 *values;
int i, j;
assemblyb = moduleb->assemblyb;
assembly = moduleb->dynamic_image;
domain = mono_object_domain (assemblyb);
if (assembly->text_rva)
return;
assembly->text_rva = START_TEXT_RVA;
if (moduleb->is_main) {
mono_image_emit_manifest (moduleb);
}
table = &assembly->tables [MONO_TABLE_TYPEDEF];
table->rows = 1; /* .<Module> */
table->next_idx++;
alloc_table (table, table->rows);
/*
* Set the first entry.
*/
values = table->values + table->columns;
values [MONO_TYPEDEF_FLAGS] = 0;
values [MONO_TYPEDEF_NAME] = string_heap_insert (&assembly->sheap, "<Module>") ;
values [MONO_TYPEDEF_NAMESPACE] = string_heap_insert (&assembly->sheap, "") ;
values [MONO_TYPEDEF_EXTENDS] = 0;
values [MONO_TYPEDEF_FIELD_LIST] = 1;
values [MONO_TYPEDEF_METHOD_LIST] = 1;
/*
* handle global methods
* FIXME: test what to do when global methods are defined in multiple modules.
*/
if (moduleb->global_methods) {
table = &assembly->tables [MONO_TABLE_METHOD];
table->rows += mono_array_length (moduleb->global_methods);
alloc_table (table, table->rows);
for (i = 0; i < mono_array_length (moduleb->global_methods); ++i)
mono_image_get_method_info (
mono_array_get (moduleb->global_methods, MonoReflectionMethodBuilder*, i), assembly);
}
if (moduleb->global_fields) {
table = &assembly->tables [MONO_TABLE_FIELD];
table->rows += mono_array_length (moduleb->global_fields);
alloc_table (table, table->rows);
for (i = 0; i < mono_array_length (moduleb->global_fields); ++i)
mono_image_get_field_info (
mono_array_get (moduleb->global_fields, MonoReflectionFieldBuilder*, i), assembly);
}
table = &assembly->tables [MONO_TABLE_MODULE];
alloc_table (table, 1);
mono_image_fill_module_table (domain, moduleb, assembly);
/* Collect all types into a list sorted by their table_idx */
mono_ptr_array_init (types, moduleb->num_types);
if (moduleb->types)
for (i = 0; i < moduleb->num_types; ++i) {
MonoReflectionTypeBuilder *type = mono_array_get (moduleb->types, MonoReflectionTypeBuilder*, i);
collect_types (&types, type);
}
mono_ptr_array_sort (types, (gpointer)compare_types_by_table_idx);
table = &assembly->tables [MONO_TABLE_TYPEDEF];
table->rows += mono_ptr_array_size (types);
alloc_table (table, table->rows);
/*
* Emit type names + namespaces at one place inside the string heap,
* so load_class_names () needs to touch fewer pages.
*/
for (i = 0; i < mono_ptr_array_size (types); ++i) {
MonoReflectionTypeBuilder *tb = mono_ptr_array_get (types, i);
string_heap_insert_mstring (&assembly->sheap, tb->nspace);
}
for (i = 0; i < mono_ptr_array_size (types); ++i) {
MonoReflectionTypeBuilder *tb = mono_ptr_array_get (types, i);
string_heap_insert_mstring (&assembly->sheap, tb->name);
}
for (i = 0; i < mono_ptr_array_size (types); ++i) {
MonoReflectionTypeBuilder *type = mono_ptr_array_get (types, i);
mono_image_get_type_info (domain, type, assembly);
}
/*
* table->rows is already set above and in mono_image_fill_module_table.
*/
/* add all the custom attributes at the end, once all the indexes are stable */
mono_image_add_cattrs (assembly, 1, MONO_CUSTOM_ATTR_ASSEMBLY, assemblyb->cattrs);
/* CAS assembly permissions */
if (assemblyb->permissions_minimum)
mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1), assemblyb->permissions_minimum);
if (assemblyb->permissions_optional)
mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1), assemblyb->permissions_optional);
if (assemblyb->permissions_refused)
mono_image_add_decl_security (assembly, mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1), assemblyb->permissions_refused);
module_add_cattrs (assembly, moduleb);
/* fixup tokens */
mono_g_hash_table_foreach (assembly->token_fixups, (GHFunc)fixup_method, assembly);
/* Create the MethodImpl table. We do this after emitting all methods so we already know
* the final tokens and don't need another fixup pass. */
if (moduleb->global_methods) {
for (i = 0; i < mono_array_length (moduleb->global_methods); ++i) {
MonoReflectionMethodBuilder *mb = mono_array_get (
moduleb->global_methods, MonoReflectionMethodBuilder*, i);
mono_image_add_methodimpl (assembly, mb);
}
}
for (i = 0; i < mono_ptr_array_size (types); ++i) {
MonoReflectionTypeBuilder *type = mono_ptr_array_get (types, i);
if (type->methods) {
for (j = 0; j < type->num_methods; ++j) {
MonoReflectionMethodBuilder *mb = mono_array_get (
type->methods, MonoReflectionMethodBuilder*, j);
mono_image_add_methodimpl (assembly, mb);
}
}
}
mono_ptr_array_destroy (types);
fixup_cattrs (assembly);
}
#else /* DISABLE_REFLECTION_EMIT_SAVE */
void
mono_image_build_metadata (MonoReflectionModuleBuilder *moduleb)
{
g_error ("This mono runtime was configured with --enable-minimal=reflection_emit_save, so saving of dynamic assemblies is not supported.");
}
#endif /* DISABLE_REFLECTION_EMIT_SAVE */
typedef struct {
guint32 import_lookup_table;
guint32 timestamp;
guint32 forwarder;
guint32 name_rva;
guint32 import_address_table_rva;
} MonoIDT;
typedef struct {
guint32 name_rva;
guint32 flags;
} MonoILT;
#ifndef DISABLE_REFLECTION_EMIT
/*
* mono_image_insert_string:
* @module: module builder object
* @str: a string
*
* Insert @str into the user string stream of @module.
*/
guint32
mono_image_insert_string (MonoReflectionModuleBuilder *module, MonoString *str)
{
MonoDynamicImage *assembly;
guint32 idx;
char buf [16];
char *b = buf;
MONO_ARCH_SAVE_REGS;
if (!module->dynamic_image)
mono_image_module_basic_init (module);
assembly = module->dynamic_image;
if (assembly->save) {
mono_metadata_encode_value (1 | (str->length * 2), b, &b);
idx = mono_image_add_stream_data (&assembly->us, buf, b-buf);
#if G_BYTE_ORDER != G_LITTLE_ENDIAN
{
char *swapped = g_malloc (2 * mono_string_length (str));
const char *p = (const char*)mono_string_chars (str);
swap_with_size (swapped, p, 2, mono_string_length (str));
mono_image_add_stream_data (&assembly->us, swapped, str->length * 2);
g_free (swapped);
}
#else
mono_image_add_stream_data (&assembly->us, (const char*)mono_string_chars (str), str->length * 2);
#endif
mono_image_add_stream_data (&assembly->us, "", 1);
} else {
idx = assembly->us.index ++;
}
mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (MONO_TOKEN_STRING | idx), str);
return MONO_TOKEN_STRING | idx;
}
guint32
mono_image_create_method_token (MonoDynamicImage *assembly, MonoObject *obj, MonoArray *opt_param_types)
{
MonoClass *klass;
guint32 token = 0;
MonoMethodSignature *sig;
klass = obj->vtable->klass;
if (strcmp (klass->name, "MonoMethod") == 0) {
MonoMethod *method = ((MonoReflectionMethod *)obj)->method;
MonoMethodSignature *old;
guint32 sig_token, parent;
int nargs, i;
g_assert (opt_param_types && (mono_method_signature (method)->sentinelpos >= 0));
nargs = mono_array_length (opt_param_types);
old = mono_method_signature (method);
sig = mono_metadata_signature_alloc ( &assembly->image, old->param_count + nargs);
sig->hasthis = old->hasthis;
sig->explicit_this = old->explicit_this;
sig->call_convention = old->call_convention;
sig->generic_param_count = old->generic_param_count;
sig->param_count = old->param_count + nargs;
sig->sentinelpos = old->param_count;
sig->ret = old->ret;
for (i = 0; i < old->param_count; i++)
sig->params [i] = old->params [i];
for (i = 0; i < nargs; i++) {
MonoReflectionType *rt = mono_array_get (opt_param_types, MonoReflectionType *, i);
sig->params [old->param_count + i] = mono_reflection_type_get_handle (rt);
}
parent = mono_image_typedef_or_ref (assembly, &method->klass->byval_arg);
g_assert ((parent & MONO_TYPEDEFORREF_MASK) == MONO_MEMBERREF_PARENT_TYPEREF);
parent >>= MONO_TYPEDEFORREF_BITS;
parent <<= MONO_MEMBERREF_PARENT_BITS;
parent |= MONO_MEMBERREF_PARENT_TYPEREF;
sig_token = method_encode_signature (assembly, sig);
token = mono_image_get_varargs_method_token (assembly, parent, method->name, sig_token);
} else if (strcmp (klass->name, "MethodBuilder") == 0) {
MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder *)obj;
ReflectionMethodBuilder rmb;
guint32 parent, sig_token;
int nopt_args, nparams, ngparams, i;
char *name;
reflection_methodbuilder_from_method_builder (&rmb, mb);
rmb.opt_types = opt_param_types;
nopt_args = mono_array_length (opt_param_types);
nparams = rmb.parameters ? mono_array_length (rmb.parameters): 0;
ngparams = rmb.generic_params ? mono_array_length (rmb.generic_params): 0;
sig = mono_metadata_signature_alloc (&assembly->image, nparams + nopt_args);
sig->hasthis = !(rmb.attrs & METHOD_ATTRIBUTE_STATIC);
sig->explicit_this = (rmb.call_conv & 0x40) == 0x40;
sig->call_convention = rmb.call_conv;
sig->generic_param_count = ngparams;
sig->param_count = nparams + nopt_args;
sig->sentinelpos = nparams;
sig->ret = mono_reflection_type_get_handle (rmb.rtype);
for (i = 0; i < nparams; i++) {
MonoReflectionType *rt = mono_array_get (rmb.parameters, MonoReflectionType *, i);
sig->params [i] = mono_reflection_type_get_handle (rt);
}
for (i = 0; i < nopt_args; i++) {
MonoReflectionType *rt = mono_array_get (opt_param_types, MonoReflectionType *, i);
sig->params [nparams + i] = mono_reflection_type_get_handle (rt);
}
sig_token = method_builder_encode_signature (assembly, &rmb);
parent = mono_image_create_token (assembly, obj, TRUE, TRUE);
g_assert (mono_metadata_token_table (parent) == MONO_TABLE_METHOD);
parent = mono_metadata_token_index (parent) << MONO_MEMBERREF_PARENT_BITS;
parent |= MONO_MEMBERREF_PARENT_METHODDEF;
name = mono_string_to_utf8 (rmb.name);
token = mono_image_get_varargs_method_token (
assembly, parent, name, sig_token);
g_free (name);
} else {
g_error ("requested method token for %s\n", klass->name);
}
g_hash_table_insert (assembly->vararg_aux_hash, GUINT_TO_POINTER (token), sig);
mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), obj);
return token;
}
/*
* mono_image_create_token:
* @assembly: a dynamic assembly
* @obj:
* @register_token: Whenever to register the token in the assembly->tokens hash.
*
* Get a token to insert in the IL code stream for the given MemberInfo.
* The metadata emission routines need to pass FALSE as REGISTER_TOKEN, since by that time,
* the table_idx-es were recomputed, so registering the token would overwrite an existing
* entry.
*/
guint32
mono_image_create_token (MonoDynamicImage *assembly, MonoObject *obj,
gboolean create_open_instance, gboolean register_token)
{
MonoClass *klass;
guint32 token = 0;
klass = obj->vtable->klass;
/* Check for user defined reflection objects */
/* TypeDelegator is the only corlib type which doesn't look like a MonoReflectionType */
if (klass->image != mono_defaults.corlib || (strcmp (klass->name, "TypeDelegator") == 0))
mono_raise_exception (mono_get_exception_not_supported ("User defined subclasses of System.Type are not yet supported")); \
if (strcmp (klass->name, "MethodBuilder") == 0) {
MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder *)obj;
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mb->type;
if (tb->module->dynamic_image == assembly && !tb->generic_params && !mb->generic_params)
token = mb->table_idx | MONO_TOKEN_METHOD_DEF;
else
token = mono_image_get_methodbuilder_token (assembly, mb, create_open_instance);
/*g_print ("got token 0x%08x for %s\n", token, mono_string_to_utf8 (mb->name));*/
} else if (strcmp (klass->name, "ConstructorBuilder") == 0) {
MonoReflectionCtorBuilder *mb = (MonoReflectionCtorBuilder *)obj;
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mb->type;
if (tb->module->dynamic_image == assembly && !tb->generic_params)
token = mb->table_idx | MONO_TOKEN_METHOD_DEF;
else
token = mono_image_get_ctorbuilder_token (assembly, mb);
/*g_print ("got token 0x%08x for %s\n", token, mono_string_to_utf8 (mb->name));*/
} else if (strcmp (klass->name, "FieldBuilder") == 0) {
MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder *)obj;
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)fb->typeb;
if (tb->generic_params) {
token = mono_image_get_generic_field_token (assembly, fb);
} else {
if ((tb->module->dynamic_image == assembly)) {
token = fb->table_idx | MONO_TOKEN_FIELD_DEF;
} else {
token = mono_image_get_fieldref_token (assembly, (MonoObject*)fb, fb->handle);
}
}
} else if (strcmp (klass->name, "TypeBuilder") == 0) {
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)obj;
if (create_open_instance && tb->generic_params) {
MonoType *type;
init_type_builder_generics (obj);
type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
token = mono_image_typedef_or_ref_full (assembly, type, TRUE);
token = mono_metadata_token_from_dor (token);
} else {
token = tb->table_idx | MONO_TOKEN_TYPE_DEF;
}
} else if (strcmp (klass->name, "MonoType") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
MonoClass *mc = mono_class_from_mono_type (type);
token = mono_metadata_token_from_dor (
mono_image_typedef_or_ref_full (assembly, type, mc->generic_container == NULL || create_open_instance));
} else if (strcmp (klass->name, "GenericTypeParameterBuilder") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
token = mono_metadata_token_from_dor (
mono_image_typedef_or_ref (assembly, type));
} else if (strcmp (klass->name, "MonoGenericClass") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
token = mono_metadata_token_from_dor (
mono_image_typedef_or_ref (assembly, type));
} else if (strcmp (klass->name, "MonoCMethod") == 0 ||
strcmp (klass->name, "MonoMethod") == 0 ||
strcmp (klass->name, "MonoGenericMethod") == 0 ||
strcmp (klass->name, "MonoGenericCMethod") == 0) {
MonoReflectionMethod *m = (MonoReflectionMethod *)obj;
if (m->method->is_inflated) {
if (create_open_instance)
token = mono_image_get_methodspec_token (assembly, m->method);
else
token = mono_image_get_inflated_method_token (assembly, m->method);
} else if ((m->method->klass->image == &assembly->image) &&
!m->method->klass->generic_class) {
static guint32 method_table_idx = 0xffffff;
if (m->method->klass->wastypebuilder) {
/* we use the same token as the one that was assigned
* to the Methodbuilder.
* FIXME: do the equivalent for Fields.
*/
token = m->method->token;
} else {
/*
* Each token should have a unique index, but the indexes are
* assigned by managed code, so we don't know about them. An
* easy solution is to count backwards...
*/
method_table_idx --;
token = MONO_TOKEN_METHOD_DEF | method_table_idx;
}
} else {
token = mono_image_get_methodref_token (assembly, m->method, create_open_instance);
}
/*g_print ("got token 0x%08x for %s\n", token, m->method->name);*/
} else if (strcmp (klass->name, "MonoField") == 0) {
MonoReflectionField *f = (MonoReflectionField *)obj;
if ((f->field->parent->image == &assembly->image) && !is_field_on_inst (f->field)) {
static guint32 field_table_idx = 0xffffff;
field_table_idx --;
token = MONO_TOKEN_FIELD_DEF | field_table_idx;
} else {
token = mono_image_get_fieldref_token (assembly, (MonoObject*)f, f->field);
}
/*g_print ("got token 0x%08x for %s\n", token, f->field->name);*/
} else if (strcmp (klass->name, "MonoArrayMethod") == 0) {
MonoReflectionArrayMethod *m = (MonoReflectionArrayMethod *)obj;
token = mono_image_get_array_token (assembly, m);
} else if (strcmp (klass->name, "SignatureHelper") == 0) {
MonoReflectionSigHelper *s = (MonoReflectionSigHelper*)obj;
token = MONO_TOKEN_SIGNATURE | mono_image_get_sighelper_token (assembly, s);
} else if (strcmp (klass->name, "EnumBuilder") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
token = mono_metadata_token_from_dor (
mono_image_typedef_or_ref (assembly, type));
} else if (strcmp (klass->name, "FieldOnTypeBuilderInst") == 0) {
MonoReflectionFieldOnTypeBuilderInst *f = (MonoReflectionFieldOnTypeBuilderInst*)obj;
token = mono_image_get_field_on_inst_token (assembly, f);
} else if (strcmp (klass->name, "ConstructorOnTypeBuilderInst") == 0) {
MonoReflectionCtorOnTypeBuilderInst *c = (MonoReflectionCtorOnTypeBuilderInst*)obj;
token = mono_image_get_ctor_on_inst_token (assembly, c, create_open_instance);
} else if (strcmp (klass->name, "MethodOnTypeBuilderInst") == 0) {
MonoReflectionMethodOnTypeBuilderInst *m = (MonoReflectionMethodOnTypeBuilderInst*)obj;
token = mono_image_get_method_on_inst_token (assembly, m, create_open_instance);
} else if (is_sre_array (klass) || is_sre_byref (klass) || is_sre_pointer (klass)) {
MonoReflectionType *type = (MonoReflectionType *)obj;
token = mono_metadata_token_from_dor (
mono_image_typedef_or_ref (assembly, mono_reflection_type_get_handle (type)));
} else {
g_error ("requested token for %s\n", klass->name);
}
if (register_token)
mono_image_register_token (assembly, token, obj);
return token;
}
/*
* mono_image_register_token:
*
* Register the TOKEN->OBJ mapping in the mapping table in ASSEMBLY. This is required for
* the Module.ResolveXXXToken () methods to work.
*/
void
mono_image_register_token (MonoDynamicImage *assembly, guint32 token, MonoObject *obj)
{
MonoObject *prev = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token));
if (prev) {
/* There could be multiple MethodInfo objects with the same token */
//g_assert (prev == obj);
} else {
mono_g_hash_table_insert (assembly->tokens, GUINT_TO_POINTER (token), obj);
}
}
static MonoDynamicImage*
create_dynamic_mono_image (MonoDynamicAssembly *assembly, char *assembly_name, char *module_name)
{
static const guchar entrycode [16] = {0xff, 0x25, 0};
MonoDynamicImage *image;
int i;
const char *version;
if (!strcmp (mono_get_runtime_info ()->framework_version, "2.1"))
version = "v2.0.50727"; /* HACK: SL 2 enforces the .net 2 metadata version */
else
version = mono_get_runtime_info ()->runtime_version;
#if HAVE_BOEHM_GC
/* The MonoGHashTable's need GC tracking */
image = GC_MALLOC (sizeof (MonoDynamicImage));
#else
image = g_new0 (MonoDynamicImage, 1);
#endif
mono_profiler_module_event (&image->image, MONO_PROFILE_START_LOAD);
/*g_print ("created image %p\n", image);*/
/* keep in sync with image.c */
image->image.name = assembly_name;
image->image.assembly_name = image->image.name; /* they may be different */
image->image.module_name = module_name;
image->image.version = g_strdup (version);
image->image.md_version_major = 1;
image->image.md_version_minor = 1;
image->image.dynamic = TRUE;
image->image.references = g_new0 (MonoAssembly*, 1);
image->image.references [0] = NULL;
mono_image_init (&image->image);
image->token_fixups = mono_g_hash_table_new_type ((GHashFunc)mono_object_hash, NULL, MONO_HASH_KEY_GC);
image->method_to_table_idx = g_hash_table_new (NULL, NULL);
image->field_to_table_idx = g_hash_table_new (NULL, NULL);
image->method_aux_hash = g_hash_table_new (NULL, NULL);
image->vararg_aux_hash = g_hash_table_new (NULL, NULL);
image->handleref = g_hash_table_new (NULL, NULL);
image->handleref_managed = mono_g_hash_table_new_type ((GHashFunc)mono_object_hash, NULL, MONO_HASH_KEY_GC);
image->tokens = mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_VALUE_GC);
image->generic_def_objects = mono_g_hash_table_new_type (NULL, NULL, MONO_HASH_VALUE_GC);
image->methodspec = mono_g_hash_table_new_type ((GHashFunc)mono_object_hash, NULL, MONO_HASH_KEY_GC);
image->typespec = g_hash_table_new ((GHashFunc)mono_metadata_type_hash, (GCompareFunc)mono_metadata_type_equal);
image->typeref = g_hash_table_new ((GHashFunc)mono_metadata_type_hash, (GCompareFunc)mono_metadata_type_equal);
image->blob_cache = g_hash_table_new ((GHashFunc)mono_blob_entry_hash, (GCompareFunc)mono_blob_entry_equal);
image->gen_params = g_ptr_array_new ();
/*g_print ("string heap create for image %p (%s)\n", image, module_name);*/
string_heap_init (&image->sheap);
mono_image_add_stream_data (&image->us, "", 1);
add_to_blob_cached (image, (char*) "", 1, NULL, 0);
/* import tables... */
mono_image_add_stream_data (&image->code, (char*)entrycode, sizeof (entrycode));
image->iat_offset = mono_image_add_stream_zero (&image->code, 8); /* two IAT entries */
image->idt_offset = mono_image_add_stream_zero (&image->code, 2 * sizeof (MonoIDT)); /* two IDT entries */
image->imp_names_offset = mono_image_add_stream_zero (&image->code, 2); /* flags for name entry */
mono_image_add_stream_data (&image->code, "_CorExeMain", 12);
mono_image_add_stream_data (&image->code, "mscoree.dll", 12);
image->ilt_offset = mono_image_add_stream_zero (&image->code, 8); /* two ILT entries */
stream_data_align (&image->code);
image->cli_header_offset = mono_image_add_stream_zero (&image->code, sizeof (MonoCLIHeader));
for (i=0; i < MONO_TABLE_NUM; ++i) {
image->tables [i].next_idx = 1;
image->tables [i].columns = table_sizes [i];
}
image->image.assembly = (MonoAssembly*)assembly;
image->run = assembly->run;
image->save = assembly->save;
image->pe_kind = 0x1; /* ILOnly */
image->machine = 0x14c; /* I386 */
mono_profiler_module_loaded (&image->image, MONO_PROFILE_OK);
return image;
}
#endif
static void
free_blob_cache_entry (gpointer key, gpointer val, gpointer user_data)
{
g_free (key);
}
void
mono_dynamic_image_free (MonoDynamicImage *image)
{
MonoDynamicImage *di = image;
GList *list;
int i;
if (di->methodspec)
mono_g_hash_table_destroy (di->methodspec);
if (di->typespec)
g_hash_table_destroy (di->typespec);
if (di->typeref)
g_hash_table_destroy (di->typeref);
if (di->handleref)
g_hash_table_destroy (di->handleref);
if (di->handleref_managed)
mono_g_hash_table_destroy (di->handleref_managed);
if (di->tokens)
mono_g_hash_table_destroy (di->tokens);
if (di->generic_def_objects)
mono_g_hash_table_destroy (di->generic_def_objects);
if (di->blob_cache) {
g_hash_table_foreach (di->blob_cache, free_blob_cache_entry, NULL);
g_hash_table_destroy (di->blob_cache);
}
if (di->standalonesig_cache)
g_hash_table_destroy (di->standalonesig_cache);
for (list = di->array_methods; list; list = list->next) {
ArrayMethod *am = (ArrayMethod *)list->data;
g_free (am->sig);
g_free (am->name);
g_free (am);
}
g_list_free (di->array_methods);
if (di->gen_params) {
for (i = 0; i < di->gen_params->len; i++) {
GenericParamTableEntry *entry = g_ptr_array_index (di->gen_params, i);
mono_gc_deregister_root ((char*) &entry->gparam);
g_free (entry);
}
g_ptr_array_free (di->gen_params, TRUE);
}
if (di->token_fixups)
mono_g_hash_table_destroy (di->token_fixups);
if (di->method_to_table_idx)
g_hash_table_destroy (di->method_to_table_idx);
if (di->field_to_table_idx)
g_hash_table_destroy (di->field_to_table_idx);
if (di->method_aux_hash)
g_hash_table_destroy (di->method_aux_hash);
if (di->vararg_aux_hash)
g_hash_table_destroy (di->vararg_aux_hash);
g_free (di->strong_name);
g_free (di->win32_res);
if (di->public_key)
g_free (di->public_key);
/*g_print ("string heap destroy for image %p\n", di);*/
mono_dynamic_stream_reset (&di->sheap);
mono_dynamic_stream_reset (&di->code);
mono_dynamic_stream_reset (&di->resources);
mono_dynamic_stream_reset (&di->us);
mono_dynamic_stream_reset (&di->blob);
mono_dynamic_stream_reset (&di->tstream);
mono_dynamic_stream_reset (&di->guid);
for (i = 0; i < MONO_TABLE_NUM; ++i) {
g_free (di->tables [i].values);
}
}
#ifndef DISABLE_REFLECTION_EMIT
/*
* mono_image_basic_init:
* @assembly: an assembly builder object
*
* Create the MonoImage that represents the assembly builder and setup some
* of the helper hash table and the basic metadata streams.
*/
void
mono_image_basic_init (MonoReflectionAssemblyBuilder *assemblyb)
{
MonoDynamicAssembly *assembly;
MonoDynamicImage *image;
MonoDomain *domain = mono_object_domain (assemblyb);
MONO_ARCH_SAVE_REGS;
if (assemblyb->dynamic_assembly)
return;
#if HAVE_BOEHM_GC
/* assembly->assembly.image might be GC allocated */
assembly = assemblyb->dynamic_assembly = GC_MALLOC (sizeof (MonoDynamicAssembly));
#else
assembly = assemblyb->dynamic_assembly = g_new0 (MonoDynamicAssembly, 1);
#endif
mono_profiler_assembly_event (&assembly->assembly, MONO_PROFILE_START_LOAD);
assembly->assembly.ref_count = 1;
assembly->assembly.dynamic = TRUE;
assembly->assembly.corlib_internal = assemblyb->corlib_internal;
assemblyb->assembly.assembly = (MonoAssembly*)assembly;
assembly->assembly.basedir = mono_string_to_utf8 (assemblyb->dir);
if (assemblyb->culture)
assembly->assembly.aname.culture = mono_string_to_utf8 (assemblyb->culture);
else
assembly->assembly.aname.culture = g_strdup ("");
if (assemblyb->version) {
char *vstr = mono_string_to_utf8 (assemblyb->version);
char **version = g_strsplit (vstr, ".", 4);
char **parts = version;
assembly->assembly.aname.major = atoi (*parts++);
assembly->assembly.aname.minor = atoi (*parts++);
assembly->assembly.aname.build = *parts != NULL ? atoi (*parts++) : 0;
assembly->assembly.aname.revision = *parts != NULL ? atoi (*parts) : 0;
g_strfreev (version);
g_free (vstr);
} else {
assembly->assembly.aname.major = 0;
assembly->assembly.aname.minor = 0;
assembly->assembly.aname.build = 0;
assembly->assembly.aname.revision = 0;
}
assembly->run = assemblyb->access != 2;
assembly->save = assemblyb->access != 1;
assembly->domain = domain;
image = create_dynamic_mono_image (assembly, mono_string_to_utf8 (assemblyb->name), g_strdup ("RefEmit_YouForgotToDefineAModule"));
image->initial_image = TRUE;
assembly->assembly.aname.name = image->image.name;
assembly->assembly.image = &image->image;
if (assemblyb->pktoken && assemblyb->pktoken->max_length) {
/* -1 to correct for the trailing NULL byte */
if (assemblyb->pktoken->max_length != MONO_PUBLIC_KEY_TOKEN_LENGTH - 1) {
g_error ("Public key token length invalid for assembly %s: %i", assembly->assembly.aname.name, assemblyb->pktoken->max_length);
}
memcpy (&assembly->assembly.aname.public_key_token, mono_array_addr (assemblyb->pktoken, guint8, 0), assemblyb->pktoken->max_length);
}
mono_domain_assemblies_lock (domain);
domain->domain_assemblies = g_slist_prepend (domain->domain_assemblies, assembly);
mono_domain_assemblies_unlock (domain);
register_assembly (mono_object_domain (assemblyb), &assemblyb->assembly, &assembly->assembly);
mono_profiler_assembly_loaded (&assembly->assembly, MONO_PROFILE_OK);
mono_assembly_invoke_load_hook ((MonoAssembly*)assembly);
}
#endif /* !DISABLE_REFLECTION_EMIT */
#ifndef DISABLE_REFLECTION_EMIT_SAVE
static int
calc_section_size (MonoDynamicImage *assembly)
{
int nsections = 0;
/* alignment constraints */
mono_image_add_stream_zero (&assembly->code, 4 - (assembly->code.index % 4));
g_assert ((assembly->code.index % 4) == 0);
assembly->meta_size += 3;
assembly->meta_size &= ~3;
mono_image_add_stream_zero (&assembly->resources, 4 - (assembly->resources.index % 4));
g_assert ((assembly->resources.index % 4) == 0);
assembly->sections [MONO_SECTION_TEXT].size = assembly->meta_size + assembly->code.index + assembly->resources.index + assembly->strong_name_size;
assembly->sections [MONO_SECTION_TEXT].attrs = SECT_FLAGS_HAS_CODE | SECT_FLAGS_MEM_EXECUTE | SECT_FLAGS_MEM_READ;
nsections++;
if (assembly->win32_res) {
guint32 res_size = (assembly->win32_res_size + 3) & ~3;
assembly->sections [MONO_SECTION_RSRC].size = res_size;
assembly->sections [MONO_SECTION_RSRC].attrs = SECT_FLAGS_HAS_INITIALIZED_DATA | SECT_FLAGS_MEM_READ;
nsections++;
}
assembly->sections [MONO_SECTION_RELOC].size = 12;
assembly->sections [MONO_SECTION_RELOC].attrs = SECT_FLAGS_MEM_READ | SECT_FLAGS_MEM_DISCARDABLE | SECT_FLAGS_HAS_INITIALIZED_DATA;
nsections++;
return nsections;
}
typedef struct {
guint32 id;
guint32 offset;
GSList *children;
MonoReflectionWin32Resource *win32_res; /* Only for leaf nodes */
} ResTreeNode;
static int
resource_tree_compare_by_id (gconstpointer a, gconstpointer b)
{
ResTreeNode *t1 = (ResTreeNode*)a;
ResTreeNode *t2 = (ResTreeNode*)b;
return t1->id - t2->id;
}
/*
* resource_tree_create:
*
* Organize the resources into a resource tree.
*/
static ResTreeNode *
resource_tree_create (MonoArray *win32_resources)
{
ResTreeNode *tree, *res_node, *type_node, *lang_node;
GSList *l;
int i;
tree = g_new0 (ResTreeNode, 1);
for (i = 0; i < mono_array_length (win32_resources); ++i) {
MonoReflectionWin32Resource *win32_res =
(MonoReflectionWin32Resource*)mono_array_addr (win32_resources, MonoReflectionWin32Resource, i);
/* Create node */
/* FIXME: BUG: this stores managed references in unmanaged memory */
lang_node = g_new0 (ResTreeNode, 1);
lang_node->id = win32_res->lang_id;
lang_node->win32_res = win32_res;
/* Create type node if neccesary */
type_node = NULL;
for (l = tree->children; l; l = l->next)
if (((ResTreeNode*)(l->data))->id == win32_res->res_type) {
type_node = (ResTreeNode*)l->data;
break;
}
if (!type_node) {
type_node = g_new0 (ResTreeNode, 1);
type_node->id = win32_res->res_type;
/*
* The resource types have to be sorted otherwise
* Windows Explorer can't display the version information.
*/
tree->children = g_slist_insert_sorted (tree->children,
type_node, resource_tree_compare_by_id);
}
/* Create res node if neccesary */
res_node = NULL;
for (l = type_node->children; l; l = l->next)
if (((ResTreeNode*)(l->data))->id == win32_res->res_id) {
res_node = (ResTreeNode*)l->data;
break;
}
if (!res_node) {
res_node = g_new0 (ResTreeNode, 1);
res_node->id = win32_res->res_id;
type_node->children = g_slist_append (type_node->children, res_node);
}
res_node->children = g_slist_append (res_node->children, lang_node);
}
return tree;
}
/*
* resource_tree_encode:
*
* Encode the resource tree into the format used in the PE file.
*/
static void
resource_tree_encode (ResTreeNode *node, char *begin, char *p, char **endbuf)
{
char *entries;
MonoPEResourceDir dir;
MonoPEResourceDirEntry dir_entry;
MonoPEResourceDataEntry data_entry;
GSList *l;
guint32 res_id_entries;
/*
* For the format of the resource directory, see the article
* "An In-Depth Look into the Win32 Portable Executable File Format" by
* Matt Pietrek
*/
memset (&dir, 0, sizeof (dir));
memset (&dir_entry, 0, sizeof (dir_entry));
memset (&data_entry, 0, sizeof (data_entry));
g_assert (sizeof (dir) == 16);
g_assert (sizeof (dir_entry) == 8);
g_assert (sizeof (data_entry) == 16);
node->offset = p - begin;
/* IMAGE_RESOURCE_DIRECTORY */
res_id_entries = g_slist_length (node->children);
dir.res_id_entries = GUINT16_TO_LE (res_id_entries);
memcpy (p, &dir, sizeof (dir));
p += sizeof (dir);
/* Reserve space for entries */
entries = p;
p += sizeof (dir_entry) * res_id_entries;
/* Write children */
for (l = node->children; l; l = l->next) {
ResTreeNode *child = (ResTreeNode*)l->data;
if (child->win32_res) {
guint32 size;
child->offset = p - begin;
/* IMAGE_RESOURCE_DATA_ENTRY */
data_entry.rde_data_offset = GUINT32_TO_LE (p - begin + sizeof (data_entry));
size = mono_array_length (child->win32_res->res_data);
data_entry.rde_size = GUINT32_TO_LE (size);
memcpy (p, &data_entry, sizeof (data_entry));
p += sizeof (data_entry);
memcpy (p, mono_array_addr (child->win32_res->res_data, char, 0), size);
p += size;
} else {
resource_tree_encode (child, begin, p, &p);
}
}
/* IMAGE_RESOURCE_ENTRY */
for (l = node->children; l; l = l->next) {
ResTreeNode *child = (ResTreeNode*)l->data;
MONO_PE_RES_DIR_ENTRY_SET_NAME (dir_entry, FALSE, child->id);
MONO_PE_RES_DIR_ENTRY_SET_DIR (dir_entry, !child->win32_res, child->offset);
memcpy (entries, &dir_entry, sizeof (dir_entry));
entries += sizeof (dir_entry);
}
*endbuf = p;
}
static void
resource_tree_free (ResTreeNode * node)
{
GSList * list;
for (list = node->children; list; list = list->next)
resource_tree_free ((ResTreeNode*)list->data);
g_slist_free(node->children);
g_free (node);
}
static void
assembly_add_win32_resources (MonoDynamicImage *assembly, MonoReflectionAssemblyBuilder *assemblyb)
{
char *buf;
char *p;
guint32 size, i;
MonoReflectionWin32Resource *win32_res;
ResTreeNode *tree;
if (!assemblyb->win32_resources)
return;
/*
* Resources are stored in a three level tree inside the PE file.
* - level one contains a node for each type of resource
* - level two contains a node for each resource
* - level three contains a node for each instance of a resource for a
* specific language.
*/
tree = resource_tree_create (assemblyb->win32_resources);
/* Estimate the size of the encoded tree */
size = 0;
for (i = 0; i < mono_array_length (assemblyb->win32_resources); ++i) {
win32_res = (MonoReflectionWin32Resource*)mono_array_addr (assemblyb->win32_resources, MonoReflectionWin32Resource, i);
size += mono_array_length (win32_res->res_data);
}
/* Directory structure */
size += mono_array_length (assemblyb->win32_resources) * 256;
p = buf = g_malloc (size);
resource_tree_encode (tree, p, p, &p);
g_assert (p - buf <= size);
assembly->win32_res = g_malloc (p - buf);
assembly->win32_res_size = p - buf;
memcpy (assembly->win32_res, buf, p - buf);
g_free (buf);
resource_tree_free (tree);
}
static void
fixup_resource_directory (char *res_section, char *p, guint32 rva)
{
MonoPEResourceDir *dir = (MonoPEResourceDir*)p;
int i;
p += sizeof (MonoPEResourceDir);
for (i = 0; i < GUINT16_FROM_LE (dir->res_named_entries) + GUINT16_FROM_LE (dir->res_id_entries); ++i) {
MonoPEResourceDirEntry *dir_entry = (MonoPEResourceDirEntry*)p;
char *child = res_section + MONO_PE_RES_DIR_ENTRY_DIR_OFFSET (*dir_entry);
if (MONO_PE_RES_DIR_ENTRY_IS_DIR (*dir_entry)) {
fixup_resource_directory (res_section, child, rva);
} else {
MonoPEResourceDataEntry *data_entry = (MonoPEResourceDataEntry*)child;
data_entry->rde_data_offset = GUINT32_TO_LE (GUINT32_FROM_LE (data_entry->rde_data_offset) + rva);
}
p += sizeof (MonoPEResourceDirEntry);
}
}
static void
checked_write_file (HANDLE f, gconstpointer buffer, guint32 numbytes)
{
guint32 dummy;
if (!WriteFile (f, buffer, numbytes, &dummy, NULL))
g_error ("WriteFile returned %d\n", GetLastError ());
}
/*
* mono_image_create_pefile:
* @mb: a module builder object
*
* This function creates the PE-COFF header, the image sections, the CLI header * etc. all the data is written in
* assembly->pefile where it can be easily retrieved later in chunks.
*/
void
mono_image_create_pefile (MonoReflectionModuleBuilder *mb, HANDLE file)
{
MonoMSDOSHeader *msdos;
MonoDotNetHeader *header;
MonoSectionTable *section;
MonoCLIHeader *cli_header;
guint32 size, image_size, virtual_base, text_offset;
guint32 header_start, section_start, file_offset, virtual_offset;
MonoDynamicImage *assembly;
MonoReflectionAssemblyBuilder *assemblyb;
MonoDynamicStream pefile_stream = {0};
MonoDynamicStream *pefile = &pefile_stream;
int i, nsections;
guint32 *rva, value;
guchar *p;
static const unsigned char msheader[] = {
0x4d, 0x5a, 0x90, 0x00, 0x03, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0xff, 0xff, 0x00, 0x00,
0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00,
0x0e, 0x1f, 0xba, 0x0e, 0x00, 0xb4, 0x09, 0xcd, 0x21, 0xb8, 0x01, 0x4c, 0xcd, 0x21, 0x54, 0x68,
0x69, 0x73, 0x20, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x20, 0x63, 0x61, 0x6e, 0x6e, 0x6f,
0x74, 0x20, 0x62, 0x65, 0x20, 0x72, 0x75, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x44, 0x4f, 0x53, 0x20,
0x6d, 0x6f, 0x64, 0x65, 0x2e, 0x0d, 0x0d, 0x0a, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
assemblyb = mb->assemblyb;
mono_image_basic_init (assemblyb);
assembly = mb->dynamic_image;
assembly->pe_kind = assemblyb->pe_kind;
assembly->machine = assemblyb->machine;
((MonoDynamicImage*)assemblyb->dynamic_assembly->assembly.image)->pe_kind = assemblyb->pe_kind;
((MonoDynamicImage*)assemblyb->dynamic_assembly->assembly.image)->machine = assemblyb->machine;
mono_image_build_metadata (mb);
if (mb->is_main && assemblyb->resources) {
int len = mono_array_length (assemblyb->resources);
for (i = 0; i < len; ++i)
assembly_add_resource (mb, assembly, (MonoReflectionResource*)mono_array_addr (assemblyb->resources, MonoReflectionResource, i));
}
if (mb->resources) {
int len = mono_array_length (mb->resources);
for (i = 0; i < len; ++i)
assembly_add_resource (mb, assembly, (MonoReflectionResource*)mono_array_addr (mb->resources, MonoReflectionResource, i));
}
build_compressed_metadata (assembly);
if (mb->is_main)
assembly_add_win32_resources (assembly, assemblyb);
nsections = calc_section_size (assembly);
/* The DOS header and stub */
g_assert (sizeof (MonoMSDOSHeader) == sizeof (msheader));
mono_image_add_stream_data (pefile, (char*)msheader, sizeof (msheader));
/* the dotnet header */
header_start = mono_image_add_stream_zero (pefile, sizeof (MonoDotNetHeader));
/* the section tables */
section_start = mono_image_add_stream_zero (pefile, sizeof (MonoSectionTable) * nsections);
file_offset = section_start + sizeof (MonoSectionTable) * nsections;
virtual_offset = VIRT_ALIGN;
image_size = 0;
for (i = 0; i < MONO_SECTION_MAX; ++i) {
if (!assembly->sections [i].size)
continue;
/* align offsets */
file_offset += FILE_ALIGN - 1;
file_offset &= ~(FILE_ALIGN - 1);
virtual_offset += VIRT_ALIGN - 1;
virtual_offset &= ~(VIRT_ALIGN - 1);
assembly->sections [i].offset = file_offset;
assembly->sections [i].rva = virtual_offset;
file_offset += assembly->sections [i].size;
virtual_offset += assembly->sections [i].size;
image_size += (assembly->sections [i].size + VIRT_ALIGN - 1) & ~(VIRT_ALIGN - 1);
}
file_offset += FILE_ALIGN - 1;
file_offset &= ~(FILE_ALIGN - 1);
image_size += section_start + sizeof (MonoSectionTable) * nsections;
/* back-patch info */
msdos = (MonoMSDOSHeader*)pefile->data;
msdos->pe_offset = GUINT32_FROM_LE (sizeof (MonoMSDOSHeader));
header = (MonoDotNetHeader*)(pefile->data + header_start);
header->pesig [0] = 'P';
header->pesig [1] = 'E';
header->coff.coff_machine = GUINT16_FROM_LE (assemblyb->machine);
header->coff.coff_sections = GUINT16_FROM_LE (nsections);
header->coff.coff_time = GUINT32_FROM_LE (time (NULL));
header->coff.coff_opt_header_size = GUINT16_FROM_LE (sizeof (MonoDotNetHeader) - sizeof (MonoCOFFHeader) - 4);
if (assemblyb->pekind == 1) {
/* it's a dll */
header->coff.coff_attributes = GUINT16_FROM_LE (0x210e);
} else {
/* it's an exe */
header->coff.coff_attributes = GUINT16_FROM_LE (0x010e);
}
virtual_base = 0x400000; /* FIXME: 0x10000000 if a DLL */
header->pe.pe_magic = GUINT16_FROM_LE (0x10B);
header->pe.pe_major = 6;
header->pe.pe_minor = 0;
size = assembly->sections [MONO_SECTION_TEXT].size;
size += FILE_ALIGN - 1;
size &= ~(FILE_ALIGN - 1);
header->pe.pe_code_size = GUINT32_FROM_LE(size);
size = assembly->sections [MONO_SECTION_RSRC].size;
size += FILE_ALIGN - 1;
size &= ~(FILE_ALIGN - 1);
header->pe.pe_data_size = GUINT32_FROM_LE(size);
g_assert (START_TEXT_RVA == assembly->sections [MONO_SECTION_TEXT].rva);
header->pe.pe_rva_code_base = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_TEXT].rva);
header->pe.pe_rva_data_base = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RSRC].rva);
/* pe_rva_entry_point always at the beginning of the text section */
header->pe.pe_rva_entry_point = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_TEXT].rva);
header->nt.pe_image_base = GUINT32_FROM_LE (virtual_base);
header->nt.pe_section_align = GUINT32_FROM_LE (VIRT_ALIGN);
header->nt.pe_file_alignment = GUINT32_FROM_LE (FILE_ALIGN);
header->nt.pe_os_major = GUINT16_FROM_LE (4);
header->nt.pe_os_minor = GUINT16_FROM_LE (0);
header->nt.pe_subsys_major = GUINT16_FROM_LE (4);
size = section_start;
size += FILE_ALIGN - 1;
size &= ~(FILE_ALIGN - 1);
header->nt.pe_header_size = GUINT32_FROM_LE (size);
size = image_size;
size += VIRT_ALIGN - 1;
size &= ~(VIRT_ALIGN - 1);
header->nt.pe_image_size = GUINT32_FROM_LE (size);
/*
// Translate the PEFileKind value to the value expected by the Windows loader
*/
{
short kind;
/*
// PEFileKinds.Dll == 1
// PEFileKinds.ConsoleApplication == 2
// PEFileKinds.WindowApplication == 3
//
// need to get:
// IMAGE_SUBSYSTEM_WINDOWS_GUI 2 // Image runs in the Windows GUI subsystem.
// IMAGE_SUBSYSTEM_WINDOWS_CUI 3 // Image runs in the Windows character subsystem.
*/
if (assemblyb->pekind == 3)
kind = 2;
else
kind = 3;
header->nt.pe_subsys_required = GUINT16_FROM_LE (kind);
}
header->nt.pe_stack_reserve = GUINT32_FROM_LE (0x00100000);
header->nt.pe_stack_commit = GUINT32_FROM_LE (0x00001000);
header->nt.pe_heap_reserve = GUINT32_FROM_LE (0x00100000);
header->nt.pe_heap_commit = GUINT32_FROM_LE (0x00001000);
header->nt.pe_loader_flags = GUINT32_FROM_LE (0);
header->nt.pe_data_dir_count = GUINT32_FROM_LE (16);
/* fill data directory entries */
header->datadir.pe_resource_table.size = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RSRC].size);
header->datadir.pe_resource_table.rva = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RSRC].rva);
header->datadir.pe_reloc_table.size = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RELOC].size);
header->datadir.pe_reloc_table.rva = GUINT32_FROM_LE (assembly->sections [MONO_SECTION_RELOC].rva);
header->datadir.pe_cli_header.size = GUINT32_FROM_LE (72);
header->datadir.pe_cli_header.rva = GUINT32_FROM_LE (assembly->text_rva + assembly->cli_header_offset);
header->datadir.pe_iat.size = GUINT32_FROM_LE (8);
header->datadir.pe_iat.rva = GUINT32_FROM_LE (assembly->text_rva + assembly->iat_offset);
/* patch entrypoint name */
if (assemblyb->pekind == 1)
memcpy (assembly->code.data + assembly->imp_names_offset + 2, "_CorDllMain", 12);
else
memcpy (assembly->code.data + assembly->imp_names_offset + 2, "_CorExeMain", 12);
/* patch imported function RVA name */
rva = (guint32*)(assembly->code.data + assembly->iat_offset);
*rva = GUINT32_FROM_LE (assembly->text_rva + assembly->imp_names_offset);
/* the import table */
header->datadir.pe_import_table.size = GUINT32_FROM_LE (79); /* FIXME: magic number? */
header->datadir.pe_import_table.rva = GUINT32_FROM_LE (assembly->text_rva + assembly->idt_offset);
/* patch imported dll RVA name and other entries in the dir */
rva = (guint32*)(assembly->code.data + assembly->idt_offset + G_STRUCT_OFFSET (MonoIDT, name_rva));
*rva = GUINT32_FROM_LE (assembly->text_rva + assembly->imp_names_offset + 14); /* 14 is hint+strlen+1 of func name */
rva = (guint32*)(assembly->code.data + assembly->idt_offset + G_STRUCT_OFFSET (MonoIDT, import_address_table_rva));
*rva = GUINT32_FROM_LE (assembly->text_rva + assembly->iat_offset);
rva = (guint32*)(assembly->code.data + assembly->idt_offset + G_STRUCT_OFFSET (MonoIDT, import_lookup_table));
*rva = GUINT32_FROM_LE (assembly->text_rva + assembly->ilt_offset);
p = (guchar*)(assembly->code.data + assembly->ilt_offset);
value = (assembly->text_rva + assembly->imp_names_offset);
*p++ = (value) & 0xff;
*p++ = (value >> 8) & (0xff);
*p++ = (value >> 16) & (0xff);
*p++ = (value >> 24) & (0xff);
/* the CLI header info */
cli_header = (MonoCLIHeader*)(assembly->code.data + assembly->cli_header_offset);
cli_header->ch_size = GUINT32_FROM_LE (72);
cli_header->ch_runtime_major = GUINT16_FROM_LE (2);
cli_header->ch_runtime_minor = GUINT16_FROM_LE (5);
cli_header->ch_flags = GUINT32_FROM_LE (assemblyb->pe_kind);
if (assemblyb->entry_point) {
guint32 table_idx = 0;
if (!strcmp (assemblyb->entry_point->object.vtable->klass->name, "MethodBuilder")) {
MonoReflectionMethodBuilder *methodb = (MonoReflectionMethodBuilder*)assemblyb->entry_point;
table_idx = methodb->table_idx;
} else {
table_idx = GPOINTER_TO_UINT (g_hash_table_lookup (assembly->method_to_table_idx, assemblyb->entry_point->method));
}
cli_header->ch_entry_point = GUINT32_FROM_LE (table_idx | MONO_TOKEN_METHOD_DEF);
} else {
cli_header->ch_entry_point = GUINT32_FROM_LE (0);
}
/* The embedded managed resources */
text_offset = assembly->text_rva + assembly->code.index;
cli_header->ch_resources.rva = GUINT32_FROM_LE (text_offset);
cli_header->ch_resources.size = GUINT32_FROM_LE (assembly->resources.index);
text_offset += assembly->resources.index;
cli_header->ch_metadata.rva = GUINT32_FROM_LE (text_offset);
cli_header->ch_metadata.size = GUINT32_FROM_LE (assembly->meta_size);
text_offset += assembly->meta_size;
if (assembly->strong_name_size) {
cli_header->ch_strong_name.rva = GUINT32_FROM_LE (text_offset);
cli_header->ch_strong_name.size = GUINT32_FROM_LE (assembly->strong_name_size);
text_offset += assembly->strong_name_size;
}
/* write the section tables and section content */
section = (MonoSectionTable*)(pefile->data + section_start);
for (i = 0; i < MONO_SECTION_MAX; ++i) {
static const char section_names [][7] = {
".text", ".rsrc", ".reloc"
};
if (!assembly->sections [i].size)
continue;
strcpy (section->st_name, section_names [i]);
/*g_print ("output section %s (%d), size: %d\n", section->st_name, i, assembly->sections [i].size);*/
section->st_virtual_address = GUINT32_FROM_LE (assembly->sections [i].rva);
section->st_virtual_size = GUINT32_FROM_LE (assembly->sections [i].size);
section->st_raw_data_size = GUINT32_FROM_LE (GUINT32_TO_LE (section->st_virtual_size) + (FILE_ALIGN - 1));
section->st_raw_data_size &= GUINT32_FROM_LE (~(FILE_ALIGN - 1));
section->st_raw_data_ptr = GUINT32_FROM_LE (assembly->sections [i].offset);
section->st_flags = GUINT32_FROM_LE (assembly->sections [i].attrs);
section ++;
}
checked_write_file (file, pefile->data, pefile->index);
mono_dynamic_stream_reset (pefile);
for (i = 0; i < MONO_SECTION_MAX; ++i) {
if (!assembly->sections [i].size)
continue;
if (SetFilePointer (file, assembly->sections [i].offset, NULL, FILE_BEGIN) == INVALID_SET_FILE_POINTER)
g_error ("SetFilePointer returned %d\n", GetLastError ());
switch (i) {
case MONO_SECTION_TEXT:
/* patch entry point */
p = (guchar*)(assembly->code.data + 2);
value = (virtual_base + assembly->text_rva + assembly->iat_offset);
*p++ = (value) & 0xff;
*p++ = (value >> 8) & 0xff;
*p++ = (value >> 16) & 0xff;
*p++ = (value >> 24) & 0xff;
checked_write_file (file, assembly->code.data, assembly->code.index);
checked_write_file (file, assembly->resources.data, assembly->resources.index);
checked_write_file (file, assembly->image.raw_metadata, assembly->meta_size);
checked_write_file (file, assembly->strong_name, assembly->strong_name_size);
g_free (assembly->image.raw_metadata);
break;
case MONO_SECTION_RELOC: {
struct {
guint32 page_rva;
guint32 block_size;
guint16 type_and_offset;
guint16 term;
} reloc;
g_assert (sizeof (reloc) == 12);
reloc.page_rva = GUINT32_FROM_LE (assembly->text_rva);
reloc.block_size = GUINT32_FROM_LE (12);
/*
* the entrypoint is always at the start of the text section
* 3 is IMAGE_REL_BASED_HIGHLOW
* 2 is patch_size_rva - text_rva
*/
reloc.type_and_offset = GUINT16_FROM_LE ((3 << 12) + (2));
reloc.term = 0;
checked_write_file (file, &reloc, sizeof (reloc));
break;
}
case MONO_SECTION_RSRC:
if (assembly->win32_res) {
/* Fixup the offsets in the IMAGE_RESOURCE_DATA_ENTRY structures */
fixup_resource_directory (assembly->win32_res, assembly->win32_res, assembly->sections [i].rva);
checked_write_file (file, assembly->win32_res, assembly->win32_res_size);
}
break;
default:
g_assert_not_reached ();
}
}
/* check that the file is properly padded */
if (SetFilePointer (file, file_offset, NULL, FILE_BEGIN) == INVALID_SET_FILE_POINTER)
g_error ("SetFilePointer returned %d\n", GetLastError ());
if (! SetEndOfFile (file))
g_error ("SetEndOfFile returned %d\n", GetLastError ());
mono_dynamic_stream_reset (&assembly->code);
mono_dynamic_stream_reset (&assembly->us);
mono_dynamic_stream_reset (&assembly->blob);
mono_dynamic_stream_reset (&assembly->guid);
mono_dynamic_stream_reset (&assembly->sheap);
g_hash_table_foreach (assembly->blob_cache, (GHFunc)g_free, NULL);
g_hash_table_destroy (assembly->blob_cache);
assembly->blob_cache = NULL;
}
#else /* DISABLE_REFLECTION_EMIT_SAVE */
void
mono_image_create_pefile (MonoReflectionModuleBuilder *mb, HANDLE file)
{
g_assert_not_reached ();
}
#endif /* DISABLE_REFLECTION_EMIT_SAVE */
#ifndef DISABLE_REFLECTION_EMIT
MonoReflectionModule *
mono_image_load_module_dynamic (MonoReflectionAssemblyBuilder *ab, MonoString *fileName)
{
char *name;
MonoImage *image;
MonoImageOpenStatus status;
MonoDynamicAssembly *assembly;
guint32 module_count;
MonoImage **new_modules;
gboolean *new_modules_loaded;
name = mono_string_to_utf8 (fileName);
image = mono_image_open (name, &status);
if (!image) {
MonoException *exc;
if (status == MONO_IMAGE_ERROR_ERRNO)
exc = mono_get_exception_file_not_found (fileName);
else
exc = mono_get_exception_bad_image_format (name);
g_free (name);
mono_raise_exception (exc);
}
g_free (name);
assembly = ab->dynamic_assembly;
image->assembly = (MonoAssembly*)assembly;
module_count = image->assembly->image->module_count;
new_modules = g_new0 (MonoImage *, module_count + 1);
new_modules_loaded = g_new0 (gboolean, module_count + 1);
if (image->assembly->image->modules)
memcpy (new_modules, image->assembly->image->modules, module_count * sizeof (MonoImage *));
if (image->assembly->image->modules_loaded)
memcpy (new_modules_loaded, image->assembly->image->modules_loaded, module_count * sizeof (gboolean));
new_modules [module_count] = image;
new_modules_loaded [module_count] = TRUE;
mono_image_addref (image);
g_free (image->assembly->image->modules);
image->assembly->image->modules = new_modules;
image->assembly->image->modules_loaded = new_modules_loaded;
image->assembly->image->module_count ++;
mono_assembly_load_references (image, &status);
if (status) {
mono_image_close (image);
mono_raise_exception (mono_get_exception_file_not_found (fileName));
}
return mono_module_get_object (mono_domain_get (), image);
}
#endif /* DISABLE_REFLECTION_EMIT */
/*
* We need to return always the same object for MethodInfo, FieldInfo etc..
* but we need to consider the reflected type.
* type uses a different hash, since it uses custom hash/equal functions.
*/
typedef struct {
gpointer item;
MonoClass *refclass;
} ReflectedEntry;
static gboolean
reflected_equal (gconstpointer a, gconstpointer b) {
const ReflectedEntry *ea = a;
const ReflectedEntry *eb = b;
return (ea->item == eb->item) && (ea->refclass == eb->refclass);
}
static guint
reflected_hash (gconstpointer a) {
const ReflectedEntry *ea = a;
return mono_aligned_addr_hash (ea->item);
}
#define CHECK_OBJECT(t,p,k) \
do { \
t _obj; \
ReflectedEntry e; \
e.item = (p); \
e.refclass = (k); \
mono_domain_lock (domain); \
if (!domain->refobject_hash) \
domain->refobject_hash = mono_g_hash_table_new_type (reflected_hash, reflected_equal, MONO_HASH_VALUE_GC); \
if ((_obj = mono_g_hash_table_lookup (domain->refobject_hash, &e))) { \
mono_domain_unlock (domain); \
return _obj; \
} \
mono_domain_unlock (domain); \
} while (0)
#ifdef HAVE_BOEHM_GC
/* ReflectedEntry doesn't need to be GC tracked */
#define ALLOC_REFENTRY g_new0 (ReflectedEntry, 1)
#define FREE_REFENTRY(entry) g_free ((entry))
#define REFENTRY_REQUIRES_CLEANUP
#else
#define ALLOC_REFENTRY mono_mempool_alloc (domain->mp, sizeof (ReflectedEntry))
/* FIXME: */
#define FREE_REFENTRY(entry)
#endif
#define CACHE_OBJECT(t,p,o,k) \
do { \
t _obj; \
ReflectedEntry pe; \
pe.item = (p); \
pe.refclass = (k); \
mono_domain_lock (domain); \
if (!domain->refobject_hash) \
domain->refobject_hash = mono_g_hash_table_new_type (reflected_hash, reflected_equal, MONO_HASH_VALUE_GC); \
_obj = mono_g_hash_table_lookup (domain->refobject_hash, &pe); \
if (!_obj) { \
ReflectedEntry *e = ALLOC_REFENTRY; \
e->item = (p); \
e->refclass = (k); \
mono_g_hash_table_insert (domain->refobject_hash, e,o); \
_obj = o; \
} \
mono_domain_unlock (domain); \
return _obj; \
} while (0)
static void
clear_cached_object (MonoDomain *domain, gpointer o, MonoClass *klass)
{
mono_domain_lock (domain);
if (domain->refobject_hash) {
ReflectedEntry pe;
gpointer orig_pe, orig_value;
pe.item = o;
pe.refclass = klass;
if (mono_g_hash_table_lookup_extended (domain->refobject_hash, &pe, &orig_pe, &orig_value)) {
mono_g_hash_table_remove (domain->refobject_hash, &pe);
FREE_REFENTRY (orig_pe);
}
}
mono_domain_unlock (domain);
}
#ifdef REFENTRY_REQUIRES_CLEANUP
static void
cleanup_refobject_hash (gpointer key, gpointer value, gpointer user_data)
{
FREE_REFENTRY (key);
}
#endif
void
mono_reflection_cleanup_domain (MonoDomain *domain)
{
if (domain->refobject_hash) {
/*let's avoid scanning the whole hashtable if not needed*/
#ifdef REFENTRY_REQUIRES_CLEANUP
mono_g_hash_table_foreach (domain->refobject_hash, cleanup_refobject_hash, NULL);
#endif
mono_g_hash_table_destroy (domain->refobject_hash);
domain->refobject_hash = NULL;
}
}
#ifndef DISABLE_REFLECTION_EMIT
static gpointer
register_assembly (MonoDomain *domain, MonoReflectionAssembly *res, MonoAssembly *assembly)
{
CACHE_OBJECT (MonoReflectionAssembly *, assembly, res, NULL);
}
static gpointer
register_module (MonoDomain *domain, MonoReflectionModuleBuilder *res, MonoDynamicImage *module)
{
CACHE_OBJECT (MonoReflectionModuleBuilder *, module, res, NULL);
}
void
mono_image_module_basic_init (MonoReflectionModuleBuilder *moduleb)
{
MonoDynamicImage *image = moduleb->dynamic_image;
MonoReflectionAssemblyBuilder *ab = moduleb->assemblyb;
if (!image) {
MonoError error;
int module_count;
MonoImage **new_modules;
MonoImage *ass;
char *name, *fqname;
/*
* FIXME: we already created an image in mono_image_basic_init (), but
* we don't know which module it belongs to, since that is only
* determined at assembly save time.
*/
/*image = (MonoDynamicImage*)ab->dynamic_assembly->assembly.image; */
name = mono_string_to_utf8 (ab->name);
fqname = mono_string_to_utf8_checked (moduleb->module.fqname, &error);
if (!mono_error_ok (&error)) {
g_free (name);
mono_error_raise_exception (&error);
}
image = create_dynamic_mono_image (ab->dynamic_assembly, name, fqname);
moduleb->module.image = &image->image;
moduleb->dynamic_image = image;
register_module (mono_object_domain (moduleb), moduleb, image);
/* register the module with the assembly */
ass = ab->dynamic_assembly->assembly.image;
module_count = ass->module_count;
new_modules = g_new0 (MonoImage *, module_count + 1);
if (ass->modules)
memcpy (new_modules, ass->modules, module_count * sizeof (MonoImage *));
new_modules [module_count] = &image->image;
mono_image_addref (&image->image);
g_free (ass->modules);
ass->modules = new_modules;
ass->module_count ++;
}
}
void
mono_image_set_wrappers_type (MonoReflectionModuleBuilder *moduleb, MonoReflectionType *type)
{
MonoDynamicImage *image = moduleb->dynamic_image;
g_assert (type->type);
image->wrappers_type = mono_class_from_mono_type (type->type);
}
#endif
/*
* mono_assembly_get_object:
* @domain: an app domain
* @assembly: an assembly
*
* Return an System.Reflection.Assembly object representing the MonoAssembly @assembly.
*/
MonoReflectionAssembly*
mono_assembly_get_object (MonoDomain *domain, MonoAssembly *assembly)
{
static MonoClass *assembly_type;
MonoReflectionAssembly *res;
CHECK_OBJECT (MonoReflectionAssembly *, assembly, NULL);
if (!assembly_type) {
MonoClass *class = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoAssembly");
if (class == NULL)
class = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "Assembly");
g_assert (class);
assembly_type = class;
}
res = (MonoReflectionAssembly *)mono_object_new (domain, assembly_type);
res->assembly = assembly;
CACHE_OBJECT (MonoReflectionAssembly *, assembly, res, NULL);
}
MonoReflectionModule*
mono_module_get_object (MonoDomain *domain, MonoImage *image)
{
static MonoClass *module_type;
MonoReflectionModule *res;
char* basename;
CHECK_OBJECT (MonoReflectionModule *, image, NULL);
if (!module_type) {
MonoClass *class = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoModule");
if (class == NULL)
class = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "Module");
g_assert (class);
module_type = class;
}
res = (MonoReflectionModule *)mono_object_new (domain, module_type);
res->image = image;
MONO_OBJECT_SETREF (res, assembly, (MonoReflectionAssembly *) mono_assembly_get_object(domain, image->assembly));
MONO_OBJECT_SETREF (res, fqname, mono_string_new (domain, image->name));
basename = g_path_get_basename (image->name);
MONO_OBJECT_SETREF (res, name, mono_string_new (domain, basename));
MONO_OBJECT_SETREF (res, scopename, mono_string_new (domain, image->module_name));
g_free (basename);
if (image->assembly->image == image) {
res->token = mono_metadata_make_token (MONO_TABLE_MODULE, 1);
} else {
int i;
res->token = 0;
if (image->assembly->image->modules) {
for (i = 0; i < image->assembly->image->module_count; i++) {
if (image->assembly->image->modules [i] == image)
res->token = mono_metadata_make_token (MONO_TABLE_MODULEREF, i + 1);
}
g_assert (res->token);
}
}
CACHE_OBJECT (MonoReflectionModule *, image, res, NULL);
}
MonoReflectionModule*
mono_module_file_get_object (MonoDomain *domain, MonoImage *image, int table_index)
{
static MonoClass *module_type;
MonoReflectionModule *res;
MonoTableInfo *table;
guint32 cols [MONO_FILE_SIZE];
const char *name;
guint32 i, name_idx;
const char *val;
if (!module_type) {
MonoClass *class = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoModule");
if (class == NULL)
class = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "Module");
g_assert (class);
module_type = class;
}
res = (MonoReflectionModule *)mono_object_new (domain, module_type);
table = &image->tables [MONO_TABLE_FILE];
g_assert (table_index < table->rows);
mono_metadata_decode_row (table, table_index, cols, MONO_FILE_SIZE);
res->image = NULL;
MONO_OBJECT_SETREF (res, assembly, (MonoReflectionAssembly *) mono_assembly_get_object(domain, image->assembly));
name = mono_metadata_string_heap (image, cols [MONO_FILE_NAME]);
/* Check whenever the row has a corresponding row in the moduleref table */
table = &image->tables [MONO_TABLE_MODULEREF];
for (i = 0; i < table->rows; ++i) {
name_idx = mono_metadata_decode_row_col (table, i, MONO_MODULEREF_NAME);
val = mono_metadata_string_heap (image, name_idx);
if (strcmp (val, name) == 0)
res->image = image->modules [i];
}
MONO_OBJECT_SETREF (res, fqname, mono_string_new (domain, name));
MONO_OBJECT_SETREF (res, name, mono_string_new (domain, name));
MONO_OBJECT_SETREF (res, scopename, mono_string_new (domain, name));
res->is_resource = cols [MONO_FILE_FLAGS] && FILE_CONTAINS_NO_METADATA;
res->token = mono_metadata_make_token (MONO_TABLE_FILE, table_index + 1);
return res;
}
static gboolean
verify_safe_for_managed_space (MonoType *type)
{
switch (type->type) {
#ifdef DEBUG_HARDER
case MONO_TYPE_ARRAY:
return verify_safe_for_managed_space (&type->data.array->eklass->byval_arg);
case MONO_TYPE_PTR:
return verify_safe_for_managed_space (type->data.type);
case MONO_TYPE_SZARRAY:
return verify_safe_for_managed_space (&type->data.klass->byval_arg);
case MONO_TYPE_GENERICINST: {
MonoGenericInst *inst = type->data.generic_class->inst;
int i;
if (!inst->is_open)
break;
for (i = 0; i < inst->type_argc; ++i)
if (!verify_safe_for_managed_space (inst->type_argv [i]))
return FALSE;
break;
}
#endif
case MONO_TYPE_VAR:
case MONO_TYPE_MVAR:
return TRUE;
}
return TRUE;
}
static MonoType*
mono_type_normalize (MonoType *type)
{
int i;
MonoGenericClass *gclass;
MonoGenericInst *ginst;
MonoClass *gtd;
MonoGenericContainer *gcontainer;
MonoType **argv = NULL;
gboolean is_denorm_gtd = TRUE, requires_rebind = FALSE;
if (type->type != MONO_TYPE_GENERICINST)
return type;
gclass = type->data.generic_class;
ginst = gclass->context.class_inst;
if (!ginst->is_open)
return type;
gtd = gclass->container_class;
gcontainer = gtd->generic_container;
argv = g_newa (MonoType*, ginst->type_argc);
for (i = 0; i < ginst->type_argc; ++i) {
MonoType *t = ginst->type_argv [i], *norm;
if (t->type != MONO_TYPE_VAR || t->data.generic_param->num != i || t->data.generic_param->owner != gcontainer)
is_denorm_gtd = FALSE;
norm = mono_type_normalize (t);
argv [i] = norm;
if (norm != t)
requires_rebind = TRUE;
}
if (is_denorm_gtd)
return type->byref == gtd->byval_arg.byref ? >d->byval_arg : >d->this_arg;
if (requires_rebind) {
MonoClass *klass = mono_class_bind_generic_parameters (gtd, ginst->type_argc, argv, gclass->is_dynamic);
return type->byref == klass->byval_arg.byref ? &klass->byval_arg : &klass->this_arg;
}
return type;
}
/*
* mono_type_get_object:
* @domain: an app domain
* @type: a type
*
* Return an System.MonoType object representing the type @type.
*/
MonoReflectionType*
mono_type_get_object (MonoDomain *domain, MonoType *type)
{
MonoType *norm_type;
MonoReflectionType *res;
MonoClass *klass = mono_class_from_mono_type (type);
/*we must avoid using @type as it might have come
* from a mono_metadata_type_dup and the caller
* expects that is can be freed.
* Using the right type from
*/
type = klass->byval_arg.byref == type->byref ? &klass->byval_arg : &klass->this_arg;
/* void is very common */
if (type->type == MONO_TYPE_VOID && domain->typeof_void)
return (MonoReflectionType*)domain->typeof_void;
/*
* If the vtable of the given class was already created, we can use
* the MonoType from there and avoid all locking and hash table lookups.
*
* We cannot do this for TypeBuilders as mono_reflection_create_runtime_class expects
* that the resulting object is different.
*/
if (type == &klass->byval_arg && !klass->image->dynamic) {
MonoVTable *vtable = mono_class_try_get_vtable (domain, klass);
if (vtable && vtable->type)
return vtable->type;
}
mono_loader_lock (); /*FIXME mono_class_init and mono_class_vtable acquire it*/
mono_domain_lock (domain);
if (!domain->type_hash)
domain->type_hash = mono_g_hash_table_new_type ((GHashFunc)mono_metadata_type_hash,
(GCompareFunc)mono_metadata_type_equal, MONO_HASH_VALUE_GC);
if ((res = mono_g_hash_table_lookup (domain->type_hash, type))) {
mono_domain_unlock (domain);
mono_loader_unlock ();
return res;
}
/*Types must be normalized so a generic instance of the GTD get's the same inner type.
* For example in: Foo<A,B>; Bar<A> : Foo<A, Bar<A>>
* The second Bar will be encoded a generic instance of Bar with <A> as parameter.
* On all other places, Bar<A> will be encoded as the GTD itself. This is an implementation
* artifact of how generics are encoded and should be transparent to managed code so we
* need to weed out this diference when retrieving managed System.Type objects.
*/
norm_type = mono_type_normalize (type);
if (norm_type != type) {
res = mono_type_get_object (domain, norm_type);
mono_g_hash_table_insert (domain->type_hash, type, res);
mono_domain_unlock (domain);
mono_loader_unlock ();
return res;
}
/* This MonoGenericClass hack is no longer necessary. Let's leave it here until we finish with the 2-stage type-builder setup.*/
if ((type->type == MONO_TYPE_GENERICINST) && type->data.generic_class->is_dynamic && !type->data.generic_class->container_class->wastypebuilder)
g_assert (0);
if (!verify_safe_for_managed_space (type)) {
mono_domain_unlock (domain);
mono_loader_unlock ();
mono_raise_exception (mono_get_exception_invalid_operation ("This type cannot be propagated to managed space"));
}
if (mono_class_get_ref_info (klass) && !klass->wastypebuilder) {
gboolean is_type_done = TRUE;
/* Generic parameters have reflection_info set but they are not finished together with their enclosing type.
* We must ensure that once a type is finished we don't return a GenericTypeParameterBuilder.
* We can't simply close the types as this will interfere with other parts of the generics machinery.
*/
if (klass->byval_arg.type == MONO_TYPE_MVAR || klass->byval_arg.type == MONO_TYPE_VAR) {
MonoGenericParam *gparam = klass->byval_arg.data.generic_param;
if (gparam->owner && gparam->owner->is_method) {
MonoMethod *method = gparam->owner->owner.method;
if (method && mono_class_get_generic_type_definition (method->klass)->wastypebuilder)
is_type_done = FALSE;
} else if (gparam->owner && !gparam->owner->is_method) {
MonoClass *klass = gparam->owner->owner.klass;
if (klass && mono_class_get_generic_type_definition (klass)->wastypebuilder)
is_type_done = FALSE;
}
}
/* g_assert_not_reached (); */
/* should this be considered an error condition? */
if (is_type_done && !type->byref) {
mono_domain_unlock (domain);
mono_loader_unlock ();
return mono_class_get_ref_info (klass);
}
}
/* This is stored in vtables/JITted code so it has to be pinned */
res = (MonoReflectionType *)mono_object_new_pinned (domain, mono_defaults.monotype_class);
res->type = type;
mono_g_hash_table_insert (domain->type_hash, type, res);
if (type->type == MONO_TYPE_VOID)
domain->typeof_void = (MonoObject*)res;
mono_domain_unlock (domain);
mono_loader_unlock ();
return res;
}
/*
* mono_method_get_object:
* @domain: an app domain
* @method: a method
* @refclass: the reflected type (can be NULL)
*
* Return an System.Reflection.MonoMethod object representing the method @method.
*/
MonoReflectionMethod*
mono_method_get_object (MonoDomain *domain, MonoMethod *method, MonoClass *refclass)
{
/*
* We use the same C representation for methods and constructors, but the type
* name in C# is different.
*/
static MonoClass *System_Reflection_MonoMethod = NULL;
static MonoClass *System_Reflection_MonoCMethod = NULL;
static MonoClass *System_Reflection_MonoGenericMethod = NULL;
static MonoClass *System_Reflection_MonoGenericCMethod = NULL;
MonoClass *klass;
MonoReflectionMethod *ret;
if (method->is_inflated) {
MonoReflectionGenericMethod *gret;
refclass = method->klass;
CHECK_OBJECT (MonoReflectionMethod *, method, refclass);
if ((*method->name == '.') && (!strcmp (method->name, ".ctor") || !strcmp (method->name, ".cctor"))) {
if (!System_Reflection_MonoGenericCMethod)
System_Reflection_MonoGenericCMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoGenericCMethod");
klass = System_Reflection_MonoGenericCMethod;
} else {
if (!System_Reflection_MonoGenericMethod)
System_Reflection_MonoGenericMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoGenericMethod");
klass = System_Reflection_MonoGenericMethod;
}
gret = (MonoReflectionGenericMethod*)mono_object_new (domain, klass);
gret->method.method = method;
MONO_OBJECT_SETREF (gret, method.name, mono_string_new (domain, method->name));
MONO_OBJECT_SETREF (gret, method.reftype, mono_type_get_object (domain, &refclass->byval_arg));
CACHE_OBJECT (MonoReflectionMethod *, method, (MonoReflectionMethod*)gret, refclass);
}
if (!refclass)
refclass = method->klass;
CHECK_OBJECT (MonoReflectionMethod *, method, refclass);
if (*method->name == '.' && (strcmp (method->name, ".ctor") == 0 || strcmp (method->name, ".cctor") == 0)) {
if (!System_Reflection_MonoCMethod)
System_Reflection_MonoCMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoCMethod");
klass = System_Reflection_MonoCMethod;
}
else {
if (!System_Reflection_MonoMethod)
System_Reflection_MonoMethod = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoMethod");
klass = System_Reflection_MonoMethod;
}
ret = (MonoReflectionMethod*)mono_object_new (domain, klass);
ret->method = method;
MONO_OBJECT_SETREF (ret, reftype, mono_type_get_object (domain, &refclass->byval_arg));
CACHE_OBJECT (MonoReflectionMethod *, method, ret, refclass);
}
/*
* mono_method_clear_object:
*
* Clear the cached reflection objects for the dynamic method METHOD.
*/
void
mono_method_clear_object (MonoDomain *domain, MonoMethod *method)
{
MonoClass *klass;
g_assert (method->dynamic);
klass = method->klass;
while (klass) {
clear_cached_object (domain, method, klass);
klass = klass->parent;
}
/* Added by mono_param_get_objects () */
clear_cached_object (domain, &(method->signature), NULL);
klass = method->klass;
while (klass) {
clear_cached_object (domain, &(method->signature), klass);
klass = klass->parent;
}
}
/*
* mono_field_get_object:
* @domain: an app domain
* @klass: a type
* @field: a field
*
* Return an System.Reflection.MonoField object representing the field @field
* in class @klass.
*/
MonoReflectionField*
mono_field_get_object (MonoDomain *domain, MonoClass *klass, MonoClassField *field)
{
MonoReflectionField *res;
static MonoClass *monofield_klass;
CHECK_OBJECT (MonoReflectionField *, field, klass);
if (!monofield_klass)
monofield_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoField");
res = (MonoReflectionField *)mono_object_new (domain, monofield_klass);
res->klass = klass;
res->field = field;
MONO_OBJECT_SETREF (res, name, mono_string_new (domain, mono_field_get_name (field)));
if (is_field_on_inst (field)) {
res->attrs = get_field_on_inst_generic_type (field)->attrs;
MONO_OBJECT_SETREF (res, type, mono_type_get_object (domain, field->type));
} else {
if (field->type)
MONO_OBJECT_SETREF (res, type, mono_type_get_object (domain, field->type));
res->attrs = mono_field_get_flags (field);
}
CACHE_OBJECT (MonoReflectionField *, field, res, klass);
}
/*
* mono_property_get_object:
* @domain: an app domain
* @klass: a type
* @property: a property
*
* Return an System.Reflection.MonoProperty object representing the property @property
* in class @klass.
*/
MonoReflectionProperty*
mono_property_get_object (MonoDomain *domain, MonoClass *klass, MonoProperty *property)
{
MonoReflectionProperty *res;
static MonoClass *monoproperty_klass;
CHECK_OBJECT (MonoReflectionProperty *, property, klass);
if (!monoproperty_klass)
monoproperty_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoProperty");
res = (MonoReflectionProperty *)mono_object_new (domain, monoproperty_klass);
res->klass = klass;
res->property = property;
CACHE_OBJECT (MonoReflectionProperty *, property, res, klass);
}
/*
* mono_event_get_object:
* @domain: an app domain
* @klass: a type
* @event: a event
*
* Return an System.Reflection.MonoEvent object representing the event @event
* in class @klass.
*/
MonoReflectionEvent*
mono_event_get_object (MonoDomain *domain, MonoClass *klass, MonoEvent *event)
{
MonoReflectionEvent *res;
MonoReflectionMonoEvent *mono_event;
static MonoClass *monoevent_klass;
CHECK_OBJECT (MonoReflectionEvent *, event, klass);
if (!monoevent_klass)
monoevent_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MonoEvent");
mono_event = (MonoReflectionMonoEvent *)mono_object_new (domain, monoevent_klass);
mono_event->klass = klass;
mono_event->event = event;
res = (MonoReflectionEvent*)mono_event;
CACHE_OBJECT (MonoReflectionEvent *, event, res, klass);
}
/**
* mono_get_reflection_missing_object:
* @domain: Domain where the object lives
*
* Returns the System.Reflection.Missing.Value singleton object
* (of type System.Reflection.Missing).
*
* Used as the value for ParameterInfo.DefaultValue when Optional
* is present
*/
static MonoObject *
mono_get_reflection_missing_object (MonoDomain *domain)
{
MonoObject *obj;
static MonoClassField *missing_value_field = NULL;
if (!missing_value_field) {
MonoClass *missing_klass;
missing_klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "Missing");
mono_class_init (missing_klass);
missing_value_field = mono_class_get_field_from_name (missing_klass, "Value");
g_assert (missing_value_field);
}
obj = mono_field_get_value_object (domain, missing_value_field, NULL);
g_assert (obj);
return obj;
}
static MonoObject*
get_dbnull (MonoDomain *domain, MonoObject **dbnull)
{
if (!*dbnull)
*dbnull = mono_get_dbnull_object (domain);
return *dbnull;
}
static MonoObject*
get_reflection_missing (MonoDomain *domain, MonoObject **reflection_missing)
{
if (!*reflection_missing)
*reflection_missing = mono_get_reflection_missing_object (domain);
return *reflection_missing;
}
/*
* mono_param_get_objects:
* @domain: an app domain
* @method: a method
*
* Return an System.Reflection.ParameterInfo array object representing the parameters
* in the method @method.
*/
MonoArray*
mono_param_get_objects_internal (MonoDomain *domain, MonoMethod *method, MonoClass *refclass)
{
static MonoClass *System_Reflection_ParameterInfo;
static MonoClass *System_Reflection_ParameterInfo_array;
MonoError error;
MonoArray *res = NULL;
MonoReflectionMethod *member = NULL;
MonoReflectionParameter *param = NULL;
char **names, **blobs = NULL;
guint32 *types = NULL;
MonoType *type = NULL;
MonoObject *dbnull = NULL;
MonoObject *missing = NULL;
MonoMarshalSpec **mspecs;
MonoMethodSignature *sig;
MonoVTable *pinfo_vtable;
int i;
if (!System_Reflection_ParameterInfo_array) {
MonoClass *klass;
klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "ParameterInfo");
mono_memory_barrier ();
System_Reflection_ParameterInfo = klass;
klass = mono_array_class_get (klass, 1);
mono_memory_barrier ();
System_Reflection_ParameterInfo_array = klass;
}
sig = mono_method_signature_checked (method, &error);
if (!mono_error_ok (&error))
mono_error_raise_exception (&error);
if (!sig->param_count)
return mono_array_new_specific (mono_class_vtable (domain, System_Reflection_ParameterInfo_array), 0);
/* Note: the cache is based on the address of the signature into the method
* since we already cache MethodInfos with the method as keys.
*/
CHECK_OBJECT (MonoArray*, &(method->signature), refclass);
member = mono_method_get_object (domain, method, refclass);
names = g_new (char *, sig->param_count);
mono_method_get_param_names (method, (const char **) names);
mspecs = g_new (MonoMarshalSpec*, sig->param_count + 1);
mono_method_get_marshal_info (method, mspecs);
res = mono_array_new_specific (mono_class_vtable (domain, System_Reflection_ParameterInfo_array), sig->param_count);
pinfo_vtable = mono_class_vtable (domain, System_Reflection_ParameterInfo);
for (i = 0; i < sig->param_count; ++i) {
param = (MonoReflectionParameter *)mono_object_new_specific (pinfo_vtable);
MONO_OBJECT_SETREF (param, ClassImpl, mono_type_get_object (domain, sig->params [i]));
MONO_OBJECT_SETREF (param, MemberImpl, (MonoObject*)member);
MONO_OBJECT_SETREF (param, NameImpl, mono_string_new (domain, names [i]));
param->PositionImpl = i;
param->AttrsImpl = sig->params [i]->attrs;
if (!(param->AttrsImpl & PARAM_ATTRIBUTE_HAS_DEFAULT)) {
if (param->AttrsImpl & PARAM_ATTRIBUTE_OPTIONAL)
MONO_OBJECT_SETREF (param, DefaultValueImpl, get_reflection_missing (domain, &missing));
else
MONO_OBJECT_SETREF (param, DefaultValueImpl, get_dbnull (domain, &dbnull));
} else {
if (!blobs) {
blobs = g_new0 (char *, sig->param_count);
types = g_new0 (guint32, sig->param_count);
get_default_param_value_blobs (method, blobs, types);
}
/* Build MonoType for the type from the Constant Table */
if (!type)
type = g_new0 (MonoType, 1);
type->type = types [i];
type->data.klass = NULL;
if (types [i] == MONO_TYPE_CLASS)
type->data.klass = mono_defaults.object_class;
else if ((sig->params [i]->type == MONO_TYPE_VALUETYPE) && sig->params [i]->data.klass->enumtype) {
/* For enums, types [i] contains the base type */
type->type = MONO_TYPE_VALUETYPE;
type->data.klass = mono_class_from_mono_type (sig->params [i]);
} else
type->data.klass = mono_class_from_mono_type (type);
MONO_OBJECT_SETREF (param, DefaultValueImpl, mono_get_object_from_blob (domain, type, blobs [i]));
/* Type in the Constant table is MONO_TYPE_CLASS for nulls */
if (types [i] != MONO_TYPE_CLASS && !param->DefaultValueImpl) {
if (param->AttrsImpl & PARAM_ATTRIBUTE_OPTIONAL)
MONO_OBJECT_SETREF (param, DefaultValueImpl, get_reflection_missing (domain, &missing));
else
MONO_OBJECT_SETREF (param, DefaultValueImpl, get_dbnull (domain, &dbnull));
}
}
if (mspecs [i + 1])
MONO_OBJECT_SETREF (param, MarshalAsImpl, (MonoObject*)mono_reflection_marshal_from_marshal_spec (domain, method->klass, mspecs [i + 1]));
mono_array_setref (res, i, param);
}
g_free (names);
g_free (blobs);
g_free (types);
g_free (type);
for (i = mono_method_signature (method)->param_count; i >= 0; i--)
if (mspecs [i])
mono_metadata_free_marshal_spec (mspecs [i]);
g_free (mspecs);
CACHE_OBJECT (MonoArray *, &(method->signature), res, refclass);
}
MonoArray*
mono_param_get_objects (MonoDomain *domain, MonoMethod *method)
{
return mono_param_get_objects_internal (domain, method, NULL);
}
/*
* mono_method_body_get_object:
* @domain: an app domain
* @method: a method
*
* Return an System.Reflection.MethodBody object representing the method @method.
*/
MonoReflectionMethodBody*
mono_method_body_get_object (MonoDomain *domain, MonoMethod *method)
{
static MonoClass *System_Reflection_MethodBody = NULL;
static MonoClass *System_Reflection_LocalVariableInfo = NULL;
static MonoClass *System_Reflection_ExceptionHandlingClause = NULL;
MonoReflectionMethodBody *ret;
MonoMethodHeader *header;
MonoImage *image;
guint32 method_rva, local_var_sig_token;
char *ptr;
unsigned char format, flags;
int i;
/* for compatibility with .net */
if (method->dynamic)
mono_raise_exception (mono_get_exception_invalid_operation (NULL));
if (!System_Reflection_MethodBody)
System_Reflection_MethodBody = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "MethodBody");
if (!System_Reflection_LocalVariableInfo)
System_Reflection_LocalVariableInfo = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "LocalVariableInfo");
if (!System_Reflection_ExceptionHandlingClause)
System_Reflection_ExceptionHandlingClause = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "ExceptionHandlingClause");
CHECK_OBJECT (MonoReflectionMethodBody *, method, NULL);
if ((method->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
(method->flags & METHOD_ATTRIBUTE_ABSTRACT) ||
(method->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) ||
(method->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME))
return NULL;
image = method->klass->image;
header = mono_method_get_header (method);
if (!image->dynamic) {
/* Obtain local vars signature token */
method_rva = mono_metadata_decode_row_col (&image->tables [MONO_TABLE_METHOD], mono_metadata_token_index (method->token) - 1, MONO_METHOD_RVA);
ptr = mono_image_rva_map (image, method_rva);
flags = *(const unsigned char *) ptr;
format = flags & METHOD_HEADER_FORMAT_MASK;
switch (format){
case METHOD_HEADER_TINY_FORMAT:
local_var_sig_token = 0;
break;
case METHOD_HEADER_FAT_FORMAT:
ptr += 2;
ptr += 2;
ptr += 4;
local_var_sig_token = read32 (ptr);
break;
default:
g_assert_not_reached ();
}
} else
local_var_sig_token = 0; //FIXME
ret = (MonoReflectionMethodBody*)mono_object_new (domain, System_Reflection_MethodBody);
ret->init_locals = header->init_locals;
ret->max_stack = header->max_stack;
ret->local_var_sig_token = local_var_sig_token;
MONO_OBJECT_SETREF (ret, il, mono_array_new_cached (domain, mono_defaults.byte_class, header->code_size));
memcpy (mono_array_addr (ret->il, guint8, 0), header->code, header->code_size);
/* Locals */
MONO_OBJECT_SETREF (ret, locals, mono_array_new_cached (domain, System_Reflection_LocalVariableInfo, header->num_locals));
for (i = 0; i < header->num_locals; ++i) {
MonoReflectionLocalVariableInfo *info = (MonoReflectionLocalVariableInfo*)mono_object_new (domain, System_Reflection_LocalVariableInfo);
MONO_OBJECT_SETREF (info, local_type, mono_type_get_object (domain, header->locals [i]));
info->is_pinned = header->locals [i]->pinned;
info->local_index = i;
mono_array_setref (ret->locals, i, info);
}
/* Exceptions */
MONO_OBJECT_SETREF (ret, clauses, mono_array_new_cached (domain, System_Reflection_ExceptionHandlingClause, header->num_clauses));
for (i = 0; i < header->num_clauses; ++i) {
MonoReflectionExceptionHandlingClause *info = (MonoReflectionExceptionHandlingClause*)mono_object_new (domain, System_Reflection_ExceptionHandlingClause);
MonoExceptionClause *clause = &header->clauses [i];
info->flags = clause->flags;
info->try_offset = clause->try_offset;
info->try_length = clause->try_len;
info->handler_offset = clause->handler_offset;
info->handler_length = clause->handler_len;
if (clause->flags == MONO_EXCEPTION_CLAUSE_FILTER)
info->filter_offset = clause->data.filter_offset;
else if (clause->data.catch_class)
MONO_OBJECT_SETREF (info, catch_type, mono_type_get_object (mono_domain_get (), &clause->data.catch_class->byval_arg));
mono_array_setref (ret->clauses, i, info);
}
mono_metadata_free_mh (header);
CACHE_OBJECT (MonoReflectionMethodBody *, method, ret, NULL);
return ret;
}
/**
* mono_get_dbnull_object:
* @domain: Domain where the object lives
*
* Returns the System.DBNull.Value singleton object
*
* Used as the value for ParameterInfo.DefaultValue
*/
MonoObject *
mono_get_dbnull_object (MonoDomain *domain)
{
MonoObject *obj;
static MonoClassField *dbnull_value_field = NULL;
if (!dbnull_value_field) {
MonoClass *dbnull_klass;
dbnull_klass = mono_class_from_name (mono_defaults.corlib, "System", "DBNull");
mono_class_init (dbnull_klass);
dbnull_value_field = mono_class_get_field_from_name (dbnull_klass, "Value");
g_assert (dbnull_value_field);
}
obj = mono_field_get_value_object (domain, dbnull_value_field, NULL);
g_assert (obj);
return obj;
}
static void
get_default_param_value_blobs (MonoMethod *method, char **blobs, guint32 *types)
{
guint32 param_index, i, lastp, crow = 0;
guint32 param_cols [MONO_PARAM_SIZE], const_cols [MONO_CONSTANT_SIZE];
gint32 idx;
MonoClass *klass = method->klass;
MonoImage *image = klass->image;
MonoMethodSignature *methodsig = mono_method_signature (method);
MonoTableInfo *constt;
MonoTableInfo *methodt;
MonoTableInfo *paramt;
if (!methodsig->param_count)
return;
mono_class_init (klass);
if (klass->image->dynamic) {
MonoReflectionMethodAux *aux;
if (method->is_inflated)
method = ((MonoMethodInflated*)method)->declaring;
aux = g_hash_table_lookup (((MonoDynamicImage*)method->klass->image)->method_aux_hash, method);
if (aux && aux->param_defaults) {
memcpy (blobs, &(aux->param_defaults [1]), methodsig->param_count * sizeof (char*));
memcpy (types, &(aux->param_default_types [1]), methodsig->param_count * sizeof (guint32));
}
return;
}
methodt = &klass->image->tables [MONO_TABLE_METHOD];
paramt = &klass->image->tables [MONO_TABLE_PARAM];
constt = &image->tables [MONO_TABLE_CONSTANT];
idx = mono_method_get_index (method) - 1;
g_assert (idx != -1);
param_index = mono_metadata_decode_row_col (methodt, idx, MONO_METHOD_PARAMLIST);
if (idx + 1 < methodt->rows)
lastp = mono_metadata_decode_row_col (methodt, idx + 1, MONO_METHOD_PARAMLIST);
else
lastp = paramt->rows + 1;
for (i = param_index; i < lastp; ++i) {
guint32 paramseq;
mono_metadata_decode_row (paramt, i - 1, param_cols, MONO_PARAM_SIZE);
paramseq = param_cols [MONO_PARAM_SEQUENCE];
if (!(param_cols [MONO_PARAM_FLAGS] & PARAM_ATTRIBUTE_HAS_DEFAULT))
continue;
crow = mono_metadata_get_constant_index (image, MONO_TOKEN_PARAM_DEF | i, crow + 1);
if (!crow) {
continue;
}
mono_metadata_decode_row (constt, crow - 1, const_cols, MONO_CONSTANT_SIZE);
blobs [paramseq - 1] = (gpointer) mono_metadata_blob_heap (image, const_cols [MONO_CONSTANT_VALUE]);
types [paramseq - 1] = const_cols [MONO_CONSTANT_TYPE];
}
return;
}
MonoObject *
mono_get_object_from_blob (MonoDomain *domain, MonoType *type, const char *blob)
{
void *retval;
MonoClass *klass;
MonoObject *object;
MonoType *basetype = type;
if (!blob)
return NULL;
klass = mono_class_from_mono_type (type);
if (klass->valuetype) {
object = mono_object_new (domain, klass);
retval = ((gchar *) object + sizeof (MonoObject));
if (klass->enumtype)
basetype = mono_class_enum_basetype (klass);
} else {
retval = &object;
}
if (!mono_get_constant_value_from_blob (domain, basetype->type, blob, retval))
return object;
else
return NULL;
}
static int
assembly_name_to_aname (MonoAssemblyName *assembly, char *p) {
int found_sep;
char *s;
gboolean quoted = FALSE;
memset (assembly, 0, sizeof (MonoAssemblyName));
assembly->culture = "";
memset (assembly->public_key_token, 0, MONO_PUBLIC_KEY_TOKEN_LENGTH);
if (*p == '"') {
quoted = TRUE;
p++;
}
assembly->name = p;
while (*p && (isalnum (*p) || *p == '.' || *p == '-' || *p == '_' || *p == '$' || *p == '@' || g_ascii_isspace (*p)))
p++;
if (quoted) {
if (*p != '"')
return 1;
*p = 0;
p++;
}
if (*p != ',')
return 1;
*p = 0;
/* Remove trailing whitespace */
s = p - 1;
while (*s && g_ascii_isspace (*s))
*s-- = 0;
p ++;
while (g_ascii_isspace (*p))
p++;
while (*p) {
if (*p == 'V' && g_ascii_strncasecmp (p, "Version=", 8) == 0) {
p += 8;
assembly->major = strtoul (p, &s, 10);
if (s == p || *s != '.')
return 1;
p = ++s;
assembly->minor = strtoul (p, &s, 10);
if (s == p || *s != '.')
return 1;
p = ++s;
assembly->build = strtoul (p, &s, 10);
if (s == p || *s != '.')
return 1;
p = ++s;
assembly->revision = strtoul (p, &s, 10);
if (s == p)
return 1;
p = s;
} else if (*p == 'C' && g_ascii_strncasecmp (p, "Culture=", 8) == 0) {
p += 8;
if (g_ascii_strncasecmp (p, "neutral", 7) == 0) {
assembly->culture = "";
p += 7;
} else {
assembly->culture = p;
while (*p && *p != ',') {
p++;
}
}
} else if (*p == 'P' && g_ascii_strncasecmp (p, "PublicKeyToken=", 15) == 0) {
p += 15;
if (strncmp (p, "null", 4) == 0) {
p += 4;
} else {
int len;
gchar *start = p;
while (*p && *p != ',') {
p++;
}
len = (p - start + 1);
if (len > MONO_PUBLIC_KEY_TOKEN_LENGTH)
len = MONO_PUBLIC_KEY_TOKEN_LENGTH;
g_strlcpy ((char*)assembly->public_key_token, start, len);
}
} else {
while (*p && *p != ',')
p++;
}
found_sep = 0;
while (g_ascii_isspace (*p) || *p == ',') {
*p++ = 0;
found_sep = 1;
continue;
}
/* failed */
if (!found_sep)
return 1;
}
return 0;
}
/*
* mono_reflection_parse_type:
* @name: type name
*
* Parse a type name as accepted by the GetType () method and output the info
* extracted in the info structure.
* the name param will be mangled, so, make a copy before passing it to this function.
* The fields in info will be valid until the memory pointed to by name is valid.
*
* See also mono_type_get_name () below.
*
* Returns: 0 on parse error.
*/
static int
_mono_reflection_parse_type (char *name, char **endptr, gboolean is_recursed,
MonoTypeNameParse *info)
{
char *start, *p, *w, *temp, *last_point, *startn;
int in_modifiers = 0;
int isbyref = 0, rank, arity = 0, i;
start = p = w = name;
//FIXME could we just zero the whole struct? memset (&info, 0, sizeof (MonoTypeNameParse))
memset (&info->assembly, 0, sizeof (MonoAssemblyName));
info->name = info->name_space = NULL;
info->nested = NULL;
info->modifiers = NULL;
info->type_arguments = NULL;
/* last_point separates the namespace from the name */
last_point = NULL;
/* Skips spaces */
while (*p == ' ') p++, start++, w++, name++;
while (*p) {
switch (*p) {
case '+':
*p = 0; /* NULL terminate the name */
startn = p + 1;
info->nested = g_list_append (info->nested, startn);
/* we have parsed the nesting namespace + name */
if (info->name)
break;
if (last_point) {
info->name_space = start;
*last_point = 0;
info->name = last_point + 1;
} else {
info->name_space = (char *)"";
info->name = start;
}
break;
case '.':
last_point = p;
break;
case '\\':
++p;
break;
case '&':
case '*':
case '[':
case ',':
case ']':
in_modifiers = 1;
break;
case '`':
++p;
i = strtol (p, &temp, 10);
arity += i;
if (p == temp)
return 0;
p = temp-1;
break;
default:
break;
}
if (in_modifiers)
break;
// *w++ = *p++;
p++;
}
if (!info->name) {
if (last_point) {
info->name_space = start;
*last_point = 0;
info->name = last_point + 1;
} else {
info->name_space = (char *)"";
info->name = start;
}
}
while (*p) {
switch (*p) {
case '&':
if (isbyref) /* only one level allowed by the spec */
return 0;
isbyref = 1;
info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (0));
*p++ = 0;
break;
case '*':
info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (-1));
*p++ = 0;
break;
case '[':
if (arity != 0) {
*p++ = 0;
info->type_arguments = g_ptr_array_new ();
for (i = 0; i < arity; i++) {
MonoTypeNameParse *subinfo = g_new0 (MonoTypeNameParse, 1);
gboolean fqname = FALSE;
g_ptr_array_add (info->type_arguments, subinfo);
if (*p == '[') {
p++;
fqname = TRUE;
}
if (!_mono_reflection_parse_type (p, &p, TRUE, subinfo))
return 0;
/*MS is lenient on [] delimited parameters that aren't fqn - and F# uses them.*/
if (fqname && (*p != ']')) {
char *aname;
if (*p != ',')
return 0;
*p++ = 0;
aname = p;
while (*p && (*p != ']'))
p++;
if (*p != ']')
return 0;
*p++ = 0;
while (*aname) {
if (g_ascii_isspace (*aname)) {
++aname;
continue;
}
break;
}
if (!*aname ||
!assembly_name_to_aname (&subinfo->assembly, aname))
return 0;
} else if (fqname && (*p == ']')) {
*p++ = 0;
}
if (i + 1 < arity) {
if (*p != ',')
return 0;
} else {
if (*p != ']')
return 0;
}
*p++ = 0;
}
arity = 0;
break;
}
rank = 1;
*p++ = 0;
while (*p) {
if (*p == ']')
break;
if (*p == ',')
rank++;
else if (*p == '*') /* '*' means unknown lower bound */
info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (-2));
else
return 0;
++p;
}
if (*p++ != ']')
return 0;
info->modifiers = g_list_append (info->modifiers, GUINT_TO_POINTER (rank));
break;
case ']':
if (is_recursed)
goto end;
return 0;
case ',':
if (is_recursed)
goto end;
*p++ = 0;
while (*p) {
if (g_ascii_isspace (*p)) {
++p;
continue;
}
break;
}
if (!*p)
return 0; /* missing assembly name */
if (!assembly_name_to_aname (&info->assembly, p))
return 0;
break;
default:
return 0;
}
if (info->assembly.name)
break;
}
// *w = 0; /* terminate class name */
end:
if (!info->name || !*info->name)
return 0;
if (endptr)
*endptr = p;
/* add other consistency checks */
return 1;
}
int
mono_reflection_parse_type (char *name, MonoTypeNameParse *info)
{
return _mono_reflection_parse_type (name, NULL, FALSE, info);
}
static MonoType*
_mono_reflection_get_type_from_info (MonoTypeNameParse *info, MonoImage *image, gboolean ignorecase)
{
gboolean type_resolve = FALSE;
MonoType *type;
MonoImage *rootimage = image;
if (info->assembly.name) {
MonoAssembly *assembly = mono_assembly_loaded (&info->assembly);
if (!assembly && image && image->assembly && mono_assembly_names_equal (&info->assembly, &image->assembly->aname))
/*
* This could happen in the AOT compiler case when the search hook is not
* installed.
*/
assembly = image->assembly;
if (!assembly) {
/* then we must load the assembly ourselve - see #60439 */
assembly = mono_assembly_load (&info->assembly, NULL, NULL);
if (!assembly)
return NULL;
}
image = assembly->image;
} else if (!image) {
image = mono_defaults.corlib;
}
type = mono_reflection_get_type_with_rootimage (rootimage, image, info, ignorecase, &type_resolve);
if (type == NULL && !info->assembly.name && image != mono_defaults.corlib) {
image = mono_defaults.corlib;
type = mono_reflection_get_type_with_rootimage (rootimage, image, info, ignorecase, &type_resolve);
}
return type;
}
static MonoType*
mono_reflection_get_type_internal (MonoImage *rootimage, MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase)
{
MonoClass *klass;
GList *mod;
int modval;
gboolean bounded = FALSE;
if (!image)
image = mono_defaults.corlib;
if (ignorecase)
klass = mono_class_from_name_case (image, info->name_space, info->name);
else
klass = mono_class_from_name (image, info->name_space, info->name);
if (!klass)
return NULL;
for (mod = info->nested; mod; mod = mod->next) {
gpointer iter = NULL;
MonoClass *parent;
parent = klass;
mono_class_init (parent);
while ((klass = mono_class_get_nested_types (parent, &iter))) {
if (ignorecase) {
if (mono_utf8_strcasecmp (klass->name, mod->data) == 0)
break;
} else {
if (strcmp (klass->name, mod->data) == 0)
break;
}
}
if (!klass)
break;
}
if (!klass)
return NULL;
if (info->type_arguments) {
MonoType **type_args = g_new0 (MonoType *, info->type_arguments->len);
MonoReflectionType *the_type;
MonoType *instance;
int i;
for (i = 0; i < info->type_arguments->len; i++) {
MonoTypeNameParse *subinfo = g_ptr_array_index (info->type_arguments, i);
type_args [i] = _mono_reflection_get_type_from_info (subinfo, rootimage, ignorecase);
if (!type_args [i]) {
g_free (type_args);
return NULL;
}
}
the_type = mono_type_get_object (mono_domain_get (), &klass->byval_arg);
instance = mono_reflection_bind_generic_parameters (
the_type, info->type_arguments->len, type_args);
g_free (type_args);
if (!instance)
return NULL;
klass = mono_class_from_mono_type (instance);
}
for (mod = info->modifiers; mod; mod = mod->next) {
modval = GPOINTER_TO_UINT (mod->data);
if (!modval) { /* byref: must be last modifier */
return &klass->this_arg;
} else if (modval == -1) {
klass = mono_ptr_class_get (&klass->byval_arg);
} else if (modval == -2) {
bounded = TRUE;
} else { /* array rank */
klass = mono_bounded_array_class_get (klass, modval, bounded);
}
}
return &klass->byval_arg;
}
/*
* mono_reflection_get_type:
* @image: a metadata context
* @info: type description structure
* @ignorecase: flag for case-insensitive string compares
* @type_resolve: whenever type resolve was already tried
*
* Build a MonoType from the type description in @info.
*
*/
MonoType*
mono_reflection_get_type (MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase, gboolean *type_resolve) {
return mono_reflection_get_type_with_rootimage(image, image, info, ignorecase, type_resolve);
}
static MonoType*
mono_reflection_get_type_internal_dynamic (MonoImage *rootimage, MonoAssembly *assembly, MonoTypeNameParse *info, gboolean ignorecase)
{
MonoReflectionAssemblyBuilder *abuilder;
MonoType *type;
int i;
g_assert (assembly->dynamic);
abuilder = (MonoReflectionAssemblyBuilder*)mono_assembly_get_object (((MonoDynamicAssembly*)assembly)->domain, assembly);
/* Enumerate all modules */
type = NULL;
if (abuilder->modules) {
for (i = 0; i < mono_array_length (abuilder->modules); ++i) {
MonoReflectionModuleBuilder *mb = mono_array_get (abuilder->modules, MonoReflectionModuleBuilder*, i);
type = mono_reflection_get_type_internal (rootimage, &mb->dynamic_image->image, info, ignorecase);
if (type)
break;
}
}
if (!type && abuilder->loaded_modules) {
for (i = 0; i < mono_array_length (abuilder->loaded_modules); ++i) {
MonoReflectionModule *mod = mono_array_get (abuilder->loaded_modules, MonoReflectionModule*, i);
type = mono_reflection_get_type_internal (rootimage, mod->image, info, ignorecase);
if (type)
break;
}
}
return type;
}
MonoType*
mono_reflection_get_type_with_rootimage (MonoImage *rootimage, MonoImage* image, MonoTypeNameParse *info, gboolean ignorecase, gboolean *type_resolve)
{
MonoType *type;
MonoReflectionAssembly *assembly;
GString *fullName;
GList *mod;
if (image && image->dynamic)
type = mono_reflection_get_type_internal_dynamic (rootimage, image->assembly, info, ignorecase);
else
type = mono_reflection_get_type_internal (rootimage, image, info, ignorecase);
if (type)
return type;
if (!mono_domain_has_type_resolve (mono_domain_get ()))
return NULL;
if (type_resolve) {
if (*type_resolve)
return NULL;
else
*type_resolve = TRUE;
}
/* Reconstruct the type name */
fullName = g_string_new ("");
if (info->name_space && (info->name_space [0] != '\0'))
g_string_printf (fullName, "%s.%s", info->name_space, info->name);
else
g_string_printf (fullName, "%s", info->name);
for (mod = info->nested; mod; mod = mod->next)
g_string_append_printf (fullName, "+%s", (char*)mod->data);
assembly = mono_domain_try_type_resolve ( mono_domain_get (), fullName->str, NULL);
if (assembly) {
if (assembly->assembly->dynamic)
type = mono_reflection_get_type_internal_dynamic (rootimage, assembly->assembly, info, ignorecase);
else
type = mono_reflection_get_type_internal (rootimage, assembly->assembly->image,
info, ignorecase);
}
g_string_free (fullName, TRUE);
return type;
}
void
mono_reflection_free_type_info (MonoTypeNameParse *info)
{
g_list_free (info->modifiers);
g_list_free (info->nested);
if (info->type_arguments) {
int i;
for (i = 0; i < info->type_arguments->len; i++) {
MonoTypeNameParse *subinfo = g_ptr_array_index (info->type_arguments, i);
mono_reflection_free_type_info (subinfo);
/*We free the subinfo since it is allocated by _mono_reflection_parse_type*/
g_free (subinfo);
}
g_ptr_array_free (info->type_arguments, TRUE);
}
}
/*
* mono_reflection_type_from_name:
* @name: type name.
* @image: a metadata context (can be NULL).
*
* Retrieves a MonoType from its @name. If the name is not fully qualified,
* it defaults to get the type from @image or, if @image is NULL or loading
* from it fails, uses corlib.
*
*/
MonoType*
mono_reflection_type_from_name (char *name, MonoImage *image)
{
MonoType *type = NULL;
MonoTypeNameParse info;
char *tmp;
/* Make a copy since parse_type modifies its argument */
tmp = g_strdup (name);
/*g_print ("requested type %s\n", str);*/
if (mono_reflection_parse_type (tmp, &info)) {
type = _mono_reflection_get_type_from_info (&info, image, FALSE);
}
g_free (tmp);
mono_reflection_free_type_info (&info);
return type;
}
/*
* mono_reflection_get_token:
*
* Return the metadata token of OBJ which should be an object
* representing a metadata element.
*/
guint32
mono_reflection_get_token (MonoObject *obj)
{
MonoClass *klass;
guint32 token = 0;
klass = obj->vtable->klass;
if (strcmp (klass->name, "MethodBuilder") == 0) {
MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder *)obj;
token = mb->table_idx | MONO_TOKEN_METHOD_DEF;
} else if (strcmp (klass->name, "ConstructorBuilder") == 0) {
MonoReflectionCtorBuilder *mb = (MonoReflectionCtorBuilder *)obj;
token = mb->table_idx | MONO_TOKEN_METHOD_DEF;
} else if (strcmp (klass->name, "FieldBuilder") == 0) {
MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder *)obj;
token = fb->table_idx | MONO_TOKEN_FIELD_DEF;
} else if (strcmp (klass->name, "TypeBuilder") == 0) {
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)obj;
token = tb->table_idx | MONO_TOKEN_TYPE_DEF;
} else if (strcmp (klass->name, "MonoType") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)obj);
MonoClass *mc = mono_class_from_mono_type (type);
if (!mono_class_init (mc))
mono_raise_exception (mono_class_get_exception_for_failure (mc));
token = mc->type_token;
} else if (strcmp (klass->name, "MonoCMethod") == 0 ||
strcmp (klass->name, "MonoMethod") == 0 ||
strcmp (klass->name, "MonoGenericMethod") == 0 ||
strcmp (klass->name, "MonoGenericCMethod") == 0) {
MonoReflectionMethod *m = (MonoReflectionMethod *)obj;
if (m->method->is_inflated) {
MonoMethodInflated *inflated = (MonoMethodInflated *) m->method;
return inflated->declaring->token;
} else {
token = m->method->token;
}
} else if (strcmp (klass->name, "MonoField") == 0) {
MonoReflectionField *f = (MonoReflectionField*)obj;
if (is_field_on_inst (f->field)) {
MonoDynamicGenericClass *dgclass = (MonoDynamicGenericClass*)f->field->parent->generic_class;
int field_index = f->field - dgclass->fields;
MonoObject *obj;
g_assert (field_index >= 0 && field_index < dgclass->count_fields);
obj = dgclass->field_objects [field_index];
return mono_reflection_get_token (obj);
}
token = mono_class_get_field_token (f->field);
} else if (strcmp (klass->name, "MonoProperty") == 0) {
MonoReflectionProperty *p = (MonoReflectionProperty*)obj;
token = mono_class_get_property_token (p->property);
} else if (strcmp (klass->name, "MonoEvent") == 0) {
MonoReflectionMonoEvent *p = (MonoReflectionMonoEvent*)obj;
token = mono_class_get_event_token (p->event);
} else if (strcmp (klass->name, "ParameterInfo") == 0) {
MonoReflectionParameter *p = (MonoReflectionParameter*)obj;
MonoClass *member_class = mono_object_class (p->MemberImpl);
g_assert (mono_class_is_reflection_method_or_constructor (member_class));
token = mono_method_get_param_token (((MonoReflectionMethod*)p->MemberImpl)->method, p->PositionImpl);
} else if (strcmp (klass->name, "Module") == 0 || strcmp (klass->name, "MonoModule") == 0) {
MonoReflectionModule *m = (MonoReflectionModule*)obj;
token = m->token;
} else if (strcmp (klass->name, "Assembly") == 0 || strcmp (klass->name, "MonoAssembly") == 0) {
token = mono_metadata_make_token (MONO_TABLE_ASSEMBLY, 1);
} else {
gchar *msg = g_strdup_printf ("MetadataToken is not supported for type '%s.%s'", klass->name_space, klass->name);
MonoException *ex = mono_get_exception_not_implemented (msg);
g_free (msg);
mono_raise_exception (ex);
}
return token;
}
static void*
load_cattr_value (MonoImage *image, MonoType *t, const char *p, const char **end)
{
int slen, type = t->type;
MonoClass *tklass = t->data.klass;
handle_enum:
switch (type) {
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_BOOLEAN: {
MonoBoolean *bval = g_malloc (sizeof (MonoBoolean));
*bval = *p;
*end = p + 1;
return bval;
}
case MONO_TYPE_CHAR:
case MONO_TYPE_U2:
case MONO_TYPE_I2: {
guint16 *val = g_malloc (sizeof (guint16));
*val = read16 (p);
*end = p + 2;
return val;
}
#if SIZEOF_VOID_P == 4
case MONO_TYPE_U:
case MONO_TYPE_I:
#endif
case MONO_TYPE_R4:
case MONO_TYPE_U4:
case MONO_TYPE_I4: {
guint32 *val = g_malloc (sizeof (guint32));
*val = read32 (p);
*end = p + 4;
return val;
}
#if SIZEOF_VOID_P == 8
case MONO_TYPE_U: /* error out instead? this should probably not happen */
case MONO_TYPE_I:
#endif
case MONO_TYPE_U8:
case MONO_TYPE_I8: {
guint64 *val = g_malloc (sizeof (guint64));
*val = read64 (p);
*end = p + 8;
return val;
}
case MONO_TYPE_R8: {
double *val = g_malloc (sizeof (double));
readr8 (p, val);
*end = p + 8;
return val;
}
case MONO_TYPE_VALUETYPE:
if (t->data.klass->enumtype) {
type = mono_class_enum_basetype (t->data.klass)->type;
goto handle_enum;
} else {
MonoClass *k = t->data.klass;
if (mono_is_corlib_image (k->image) && strcmp (k->name_space, "System") == 0 && strcmp (k->name, "DateTime") == 0){
guint64 *val = g_malloc (sizeof (guint64));
*val = read64 (p);
*end = p + 8;
return val;
}
}
g_error ("generic valutype %s not handled in custom attr value decoding", t->data.klass->name);
break;
case MONO_TYPE_STRING:
if (*p == (char)0xFF) {
*end = p + 1;
return NULL;
}
slen = mono_metadata_decode_value (p, &p);
*end = p + slen;
return mono_string_new_len (mono_domain_get (), p, slen);
case MONO_TYPE_CLASS: {
char *n;
MonoType *t;
if (*p == (char)0xFF) {
*end = p + 1;
return NULL;
}
handle_type:
slen = mono_metadata_decode_value (p, &p);
n = g_memdup (p, slen + 1);
n [slen] = 0;
t = mono_reflection_type_from_name (n, image);
if (!t)
g_warning ("Cannot load type '%s'", n);
g_free (n);
*end = p + slen;
if (t)
return mono_type_get_object (mono_domain_get (), t);
else
return NULL;
}
case MONO_TYPE_OBJECT: {
char subt = *p++;
MonoObject *obj;
MonoClass *subc = NULL;
void *val;
if (subt == 0x50) {
goto handle_type;
} else if (subt == 0x0E) {
type = MONO_TYPE_STRING;
goto handle_enum;
} else if (subt == 0x1D) {
MonoType simple_type = {{0}};
int etype = *p;
p ++;
if (etype == 0x51)
/* See Partition II, Appendix B3 */
etype = MONO_TYPE_OBJECT;
type = MONO_TYPE_SZARRAY;
simple_type.type = etype;
tklass = mono_class_from_mono_type (&simple_type);
goto handle_enum;
} else if (subt == 0x55) {
char *n;
MonoType *t;
slen = mono_metadata_decode_value (p, &p);
n = g_memdup (p, slen + 1);
n [slen] = 0;
t = mono_reflection_type_from_name (n, image);
if (!t)
g_error ("Cannot load type '%s'", n);
g_free (n);
p += slen;
subc = mono_class_from_mono_type (t);
} else if (subt >= MONO_TYPE_BOOLEAN && subt <= MONO_TYPE_R8) {
MonoType simple_type = {{0}};
simple_type.type = subt;
subc = mono_class_from_mono_type (&simple_type);
} else {
g_error ("Unknown type 0x%02x for object type encoding in custom attr", subt);
}
val = load_cattr_value (image, &subc->byval_arg, p, end);
obj = mono_object_new (mono_domain_get (), subc);
g_assert (!subc->has_references);
memcpy ((char*)obj + sizeof (MonoObject), val, mono_class_value_size (subc, NULL));
g_free (val);
return obj;
}
case MONO_TYPE_SZARRAY: {
MonoArray *arr;
guint32 i, alen, basetype;
alen = read32 (p);
p += 4;
if (alen == 0xffffffff) {
*end = p;
return NULL;
}
arr = mono_array_new (mono_domain_get(), tklass, alen);
basetype = tklass->byval_arg.type;
if (basetype == MONO_TYPE_VALUETYPE && tklass->enumtype)
basetype = mono_class_enum_basetype (tklass)->type;
switch (basetype)
{
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_BOOLEAN:
for (i = 0; i < alen; i++) {
MonoBoolean val = *p++;
mono_array_set (arr, MonoBoolean, i, val);
}
break;
case MONO_TYPE_CHAR:
case MONO_TYPE_U2:
case MONO_TYPE_I2:
for (i = 0; i < alen; i++) {
guint16 val = read16 (p);
mono_array_set (arr, guint16, i, val);
p += 2;
}
break;
case MONO_TYPE_R4:
case MONO_TYPE_U4:
case MONO_TYPE_I4:
for (i = 0; i < alen; i++) {
guint32 val = read32 (p);
mono_array_set (arr, guint32, i, val);
p += 4;
}
break;
case MONO_TYPE_R8:
for (i = 0; i < alen; i++) {
double val;
readr8 (p, &val);
mono_array_set (arr, double, i, val);
p += 8;
}
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
for (i = 0; i < alen; i++) {
guint64 val = read64 (p);
mono_array_set (arr, guint64, i, val);
p += 8;
}
break;
case MONO_TYPE_CLASS:
case MONO_TYPE_OBJECT:
case MONO_TYPE_STRING:
for (i = 0; i < alen; i++) {
MonoObject *item = load_cattr_value (image, &tklass->byval_arg, p, &p);
mono_array_setref (arr, i, item);
}
break;
default:
g_error ("Type 0x%02x not handled in custom attr array decoding", basetype);
}
*end=p;
return arr;
}
default:
g_error ("Type 0x%02x not handled in custom attr value decoding", type);
}
return NULL;
}
static MonoObject*
create_cattr_typed_arg (MonoType *t, MonoObject *val)
{
static MonoClass *klass;
static MonoMethod *ctor;
MonoObject *retval;
void *params [2], *unboxed;
if (!klass)
klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "CustomAttributeTypedArgument");
if (!ctor)
ctor = mono_class_get_method_from_name (klass, ".ctor", 2);
params [0] = mono_type_get_object (mono_domain_get (), t);
params [1] = val;
retval = mono_object_new (mono_domain_get (), klass);
unboxed = mono_object_unbox (retval);
mono_runtime_invoke (ctor, unboxed, params, NULL);
return retval;
}
static MonoObject*
create_cattr_named_arg (void *minfo, MonoObject *typedarg)
{
static MonoClass *klass;
static MonoMethod *ctor;
MonoObject *retval;
void *unboxed, *params [2];
if (!klass)
klass = mono_class_from_name (mono_defaults.corlib, "System.Reflection", "CustomAttributeNamedArgument");
if (!ctor)
ctor = mono_class_get_method_from_name (klass, ".ctor", 2);
params [0] = minfo;
params [1] = typedarg;
retval = mono_object_new (mono_domain_get (), klass);
unboxed = mono_object_unbox (retval);
mono_runtime_invoke (ctor, unboxed, params, NULL);
return retval;
}
static gboolean
type_is_reference (MonoType *type)
{
switch (type->type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_CHAR:
case MONO_TYPE_U:
case MONO_TYPE_I:
case MONO_TYPE_U1:
case MONO_TYPE_I1:
case MONO_TYPE_U2:
case MONO_TYPE_I2:
case MONO_TYPE_U4:
case MONO_TYPE_I4:
case MONO_TYPE_U8:
case MONO_TYPE_I8:
case MONO_TYPE_R8:
case MONO_TYPE_R4:
case MONO_TYPE_VALUETYPE:
return FALSE;
default:
return TRUE;
}
}
static void
free_param_data (MonoMethodSignature *sig, void **params) {
int i;
for (i = 0; i < sig->param_count; ++i) {
if (!type_is_reference (sig->params [i]))
g_free (params [i]);
}
}
/*
* Find the field index in the metadata FieldDef table.
*/
static guint32
find_field_index (MonoClass *klass, MonoClassField *field) {
int i;
for (i = 0; i < klass->field.count; ++i) {
if (field == &klass->fields [i])
return klass->field.first + 1 + i;
}
return 0;
}
/*
* Find the property index in the metadata Property table.
*/
static guint32
find_property_index (MonoClass *klass, MonoProperty *property) {
int i;
for (i = 0; i < klass->ext->property.count; ++i) {
if (property == &klass->ext->properties [i])
return klass->ext->property.first + 1 + i;
}
return 0;
}
/*
* Find the event index in the metadata Event table.
*/
static guint32
find_event_index (MonoClass *klass, MonoEvent *event) {
int i;
for (i = 0; i < klass->ext->event.count; ++i) {
if (event == &klass->ext->events [i])
return klass->ext->event.first + 1 + i;
}
return 0;
}
static MonoObject*
create_custom_attr (MonoImage *image, MonoMethod *method, const guchar *data, guint32 len, MonoError *error)
{
const char *p = (const char*)data;
const char *named;
guint32 i, j, num_named;
MonoObject *attr;
void *params_buf [32];
void **params;
MonoMethodSignature *sig;
mono_error_init (error);
mono_class_init (method->klass);
if (!mono_verifier_verify_cattr_content (image, method, data, len, NULL)) {
mono_error_set_generic_error (error, "System.Reflection", "CustomAttributeFormatException", "Binary format of the specified custom attribute was invalid.");
return NULL;
}
if (len == 0) {
attr = mono_object_new (mono_domain_get (), method->klass);
mono_runtime_invoke (method, attr, NULL, NULL);
return attr;
}
if (len < 2 || read16 (p) != 0x0001) /* Prolog */
return NULL;
/*g_print ("got attr %s\n", method->klass->name);*/
sig = mono_method_signature (method);
if (sig->param_count < 32)
params = params_buf;
else
/* Allocate using GC so it gets GC tracking */
params = mono_gc_alloc_fixed (sig->param_count * sizeof (void*), NULL);
/* skip prolog */
p += 2;
for (i = 0; i < mono_method_signature (method)->param_count; ++i) {
params [i] = load_cattr_value (image, mono_method_signature (method)->params [i], p, &p);
}
named = p;
attr = mono_object_new (mono_domain_get (), method->klass);
mono_runtime_invoke (method, attr, params, NULL);
free_param_data (method->signature, params);
num_named = read16 (named);
named += 2;
for (j = 0; j < num_named; j++) {
gint name_len;
char *name, named_type, data_type;
named_type = *named++;
data_type = *named++; /* type of data */
if (data_type == MONO_TYPE_SZARRAY)
data_type = *named++;
if (data_type == MONO_TYPE_ENUM) {
gint type_len;
char *type_name;
type_len = mono_metadata_decode_blob_size (named, &named);
type_name = g_malloc (type_len + 1);
memcpy (type_name, named, type_len);
type_name [type_len] = 0;
named += type_len;
/* FIXME: lookup the type and check type consistency */
g_free (type_name);
}
name_len = mono_metadata_decode_blob_size (named, &named);
name = g_malloc (name_len + 1);
memcpy (name, named, name_len);
name [name_len] = 0;
named += name_len;
if (named_type == 0x53) {
MonoClassField *field = mono_class_get_field_from_name (mono_object_class (attr), name);
void *val = load_cattr_value (image, field->type, named, &named);
mono_field_set_value (attr, field, val);
if (!type_is_reference (field->type))
g_free (val);
} else if (named_type == 0x54) {
MonoProperty *prop;
void *pparams [1];
MonoType *prop_type;
prop = mono_class_get_property_from_name (mono_object_class (attr), name);
/* can we have more that 1 arg in a custom attr named property? */
prop_type = prop->get? mono_method_signature (prop->get)->ret :
mono_method_signature (prop->set)->params [mono_method_signature (prop->set)->param_count - 1];
pparams [0] = load_cattr_value (image, prop_type, named, &named);
mono_property_set_value (prop, attr, pparams, NULL);
if (!type_is_reference (prop_type))
g_free (pparams [0]);
}
g_free (name);
}
if (params != params_buf)
mono_gc_free_fixed (params);
return attr;
}
/*
* mono_reflection_create_custom_attr_data_args:
*
* Create an array of typed and named arguments from the cattr blob given by DATA.
* TYPED_ARGS and NAMED_ARGS will contain the objects representing the arguments,
* NAMED_ARG_INFO will contain information about the named arguments.
*/
void
mono_reflection_create_custom_attr_data_args (MonoImage *image, MonoMethod *method, const guchar *data, guint32 len, MonoArray **typed_args, MonoArray **named_args, CattrNamedArg **named_arg_info)
{
MonoArray *typedargs, *namedargs;
MonoClass *attrklass;
MonoDomain *domain;
const char *p = (const char*)data;
const char *named;
guint32 i, j, num_named;
CattrNamedArg *arginfo = NULL;
if (!mono_verifier_verify_cattr_content (image, method, data, len, NULL))
return;
mono_class_init (method->klass);
*typed_args = NULL;
*named_args = NULL;
*named_arg_info = NULL;
domain = mono_domain_get ();
if (len < 2 || read16 (p) != 0x0001) /* Prolog */
return;
typedargs = mono_array_new (domain, mono_get_object_class (), mono_method_signature (method)->param_count);
/* skip prolog */
p += 2;
for (i = 0; i < mono_method_signature (method)->param_count; ++i) {
MonoObject *obj;
void *val;
val = load_cattr_value (image, mono_method_signature (method)->params [i], p, &p);
obj = type_is_reference (mono_method_signature (method)->params [i]) ?
val : mono_value_box (domain, mono_class_from_mono_type (mono_method_signature (method)->params [i]), val);
mono_array_setref (typedargs, i, obj);
if (!type_is_reference (mono_method_signature (method)->params [i]))
g_free (val);
}
named = p;
num_named = read16 (named);
namedargs = mono_array_new (domain, mono_get_object_class (), num_named);
named += 2;
attrklass = method->klass;
arginfo = g_new0 (CattrNamedArg, num_named);
*named_arg_info = arginfo;
for (j = 0; j < num_named; j++) {
gint name_len;
char *name, named_type, data_type;
named_type = *named++;
data_type = *named++; /* type of data */
if (data_type == MONO_TYPE_SZARRAY)
data_type = *named++;
if (data_type == MONO_TYPE_ENUM) {
gint type_len;
char *type_name;
type_len = mono_metadata_decode_blob_size (named, &named);
type_name = g_malloc (type_len + 1);
memcpy (type_name, named, type_len);
type_name [type_len] = 0;
named += type_len;
/* FIXME: lookup the type and check type consistency */
g_free (type_name);
}
name_len = mono_metadata_decode_blob_size (named, &named);
name = g_malloc (name_len + 1);
memcpy (name, named, name_len);
name [name_len] = 0;
named += name_len;
if (named_type == 0x53) {
MonoObject *obj;
MonoClassField *field = mono_class_get_field_from_name (attrklass, name);
void *val;
arginfo [j].type = field->type;
arginfo [j].field = field;
val = load_cattr_value (image, field->type, named, &named);
obj = type_is_reference (field->type) ? val : mono_value_box (domain, mono_class_from_mono_type (field->type), val);
mono_array_setref (namedargs, j, obj);
if (!type_is_reference (field->type))
g_free (val);
} else if (named_type == 0x54) {
MonoObject *obj;
MonoType *prop_type;
MonoProperty *prop = mono_class_get_property_from_name (attrklass, name);
void *val;
prop_type = prop->get? mono_method_signature (prop->get)->ret :
mono_method_signature (prop->set)->params [mono_method_signature (prop->set)->param_count - 1];
arginfo [j].type = prop_type;
arginfo [j].prop = prop;
val = load_cattr_value (image, prop_type, named, &named);
obj = type_is_reference (prop_type) ? val : mono_value_box (domain, mono_class_from_mono_type (prop_type), val);
mono_array_setref (namedargs, j, obj);
if (!type_is_reference (prop_type))
g_free (val);
}
g_free (name);
}
*typed_args = typedargs;
*named_args = namedargs;
}
void
mono_reflection_resolve_custom_attribute_data (MonoReflectionMethod *ref_method, MonoReflectionAssembly *assembly, gpointer data, guint32 len, MonoArray **ctor_args, MonoArray **named_args)
{
MonoDomain *domain;
MonoArray *typedargs, *namedargs;
MonoImage *image;
MonoMethod *method;
CattrNamedArg *arginfo;
int i;
*ctor_args = NULL;
*named_args = NULL;
if (len == 0)
return;
image = assembly->assembly->image;
method = ref_method->method;
domain = mono_object_domain (ref_method);
if (!mono_class_init (method->klass))
mono_raise_exception (mono_class_get_exception_for_failure (method->klass));
mono_reflection_create_custom_attr_data_args (image, method, data, len, &typedargs, &namedargs, &arginfo);
if (mono_loader_get_last_error ())
mono_raise_exception (mono_loader_error_prepare_exception (mono_loader_get_last_error ()));
if (!typedargs || !namedargs)
return;
for (i = 0; i < mono_method_signature (method)->param_count; ++i) {
MonoObject *obj = mono_array_get (typedargs, MonoObject*, i);
MonoObject *typedarg;
typedarg = create_cattr_typed_arg (mono_method_signature (method)->params [i], obj);
mono_array_setref (typedargs, i, typedarg);
}
for (i = 0; i < mono_array_length (namedargs); ++i) {
MonoObject *obj = mono_array_get (namedargs, MonoObject*, i);
MonoObject *typedarg, *namedarg, *minfo;
if (arginfo [i].prop)
minfo = (MonoObject*)mono_property_get_object (domain, NULL, arginfo [i].prop);
else
minfo = (MonoObject*)mono_field_get_object (domain, NULL, arginfo [i].field);
typedarg = create_cattr_typed_arg (arginfo [i].type, obj);
namedarg = create_cattr_named_arg (minfo, typedarg);
mono_array_setref (namedargs, i, namedarg);
}
*ctor_args = typedargs;
*named_args = namedargs;
}
static MonoObject*
create_custom_attr_data (MonoImage *image, MonoCustomAttrEntry *cattr)
{
static MonoMethod *ctor;
MonoDomain *domain;
MonoObject *attr;
void *params [4];
g_assert (image->assembly);
if (!ctor)
ctor = mono_class_get_method_from_name (mono_defaults.customattribute_data_class, ".ctor", 4);
domain = mono_domain_get ();
attr = mono_object_new (domain, mono_defaults.customattribute_data_class);
params [0] = mono_method_get_object (domain, cattr->ctor, NULL);
params [1] = mono_assembly_get_object (domain, image->assembly);
params [2] = (gpointer)&cattr->data;
params [3] = &cattr->data_size;
mono_runtime_invoke (ctor, attr, params, NULL);
return attr;
}
static MonoArray*
mono_custom_attrs_construct_by_type (MonoCustomAttrInfo *cinfo, MonoClass *attr_klass, MonoError *error)
{
MonoArray *result;
MonoObject *attr;
int i, n;
mono_error_init (error);
n = 0;
for (i = 0; i < cinfo->num_attrs; ++i) {
if (!attr_klass || mono_class_is_assignable_from (attr_klass, cinfo->attrs [i].ctor->klass))
n ++;
}
result = mono_array_new_cached (mono_domain_get (), mono_defaults.attribute_class, n);
n = 0;
for (i = 0; i < cinfo->num_attrs; ++i) {
if (!cinfo->attrs [i].ctor)
/* The cattr type is not finished yet */
/* We should include the type name but cinfo doesn't contain it */
mono_raise_exception (mono_get_exception_type_load (NULL, NULL));
if (!attr_klass || mono_class_is_assignable_from (attr_klass, cinfo->attrs [i].ctor->klass)) {
attr = create_custom_attr (cinfo->image, cinfo->attrs [i].ctor, cinfo->attrs [i].data, cinfo->attrs [i].data_size, error);
if (!mono_error_ok (error))
return result;
mono_array_setref (result, n, attr);
n ++;
}
}
return result;
}
MonoArray*
mono_custom_attrs_construct (MonoCustomAttrInfo *cinfo)
{
MonoError error;
return mono_custom_attrs_construct_by_type (cinfo, NULL, &error);
}
static MonoArray*
mono_custom_attrs_data_construct (MonoCustomAttrInfo *cinfo)
{
MonoArray *result;
MonoObject *attr;
int i;
result = mono_array_new (mono_domain_get (), mono_defaults.customattribute_data_class, cinfo->num_attrs);
for (i = 0; i < cinfo->num_attrs; ++i) {
attr = create_custom_attr_data (cinfo->image, &cinfo->attrs [i]);
mono_array_setref (result, i, attr);
}
return result;
}
/**
* mono_custom_attrs_from_index:
*
* Returns: NULL if no attributes are found or if a loading error occurs.
*/
MonoCustomAttrInfo*
mono_custom_attrs_from_index (MonoImage *image, guint32 idx)
{
guint32 mtoken, i, len;
guint32 cols [MONO_CUSTOM_ATTR_SIZE];
MonoTableInfo *ca;
MonoCustomAttrInfo *ainfo;
GList *tmp, *list = NULL;
const char *data;
ca = &image->tables [MONO_TABLE_CUSTOMATTRIBUTE];
i = mono_metadata_custom_attrs_from_index (image, idx);
if (!i)
return NULL;
i --;
while (i < ca->rows) {
if (mono_metadata_decode_row_col (ca, i, MONO_CUSTOM_ATTR_PARENT) != idx)
break;
list = g_list_prepend (list, GUINT_TO_POINTER (i));
++i;
}
len = g_list_length (list);
if (!len)
return NULL;
ainfo = g_malloc0 (MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * len);
ainfo->num_attrs = len;
ainfo->image = image;
for (i = 0, tmp = list; i < len; ++i, tmp = tmp->next) {
mono_metadata_decode_row (ca, GPOINTER_TO_UINT (tmp->data), cols, MONO_CUSTOM_ATTR_SIZE);
mtoken = cols [MONO_CUSTOM_ATTR_TYPE] >> MONO_CUSTOM_ATTR_TYPE_BITS;
switch (cols [MONO_CUSTOM_ATTR_TYPE] & MONO_CUSTOM_ATTR_TYPE_MASK) {
case MONO_CUSTOM_ATTR_TYPE_METHODDEF:
mtoken |= MONO_TOKEN_METHOD_DEF;
break;
case MONO_CUSTOM_ATTR_TYPE_MEMBERREF:
mtoken |= MONO_TOKEN_MEMBER_REF;
break;
default:
g_error ("Unknown table for custom attr type %08x", cols [MONO_CUSTOM_ATTR_TYPE]);
break;
}
ainfo->attrs [i].ctor = mono_get_method (image, mtoken, NULL);
if (!ainfo->attrs [i].ctor) {
g_warning ("Can't find custom attr constructor image: %s mtoken: 0x%08x", image->name, mtoken);
g_list_free (list);
g_free (ainfo);
return NULL;
}
if (!mono_verifier_verify_cattr_blob (image, cols [MONO_CUSTOM_ATTR_VALUE], NULL)) {
/*FIXME raising an exception here doesn't make any sense*/
g_warning ("Invalid custom attribute blob on image %s for index %x", image->name, idx);
g_list_free (list);
g_free (ainfo);
return NULL;
}
data = mono_metadata_blob_heap (image, cols [MONO_CUSTOM_ATTR_VALUE]);
ainfo->attrs [i].data_size = mono_metadata_decode_value (data, &data);
ainfo->attrs [i].data = (guchar*)data;
}
g_list_free (list);
return ainfo;
}
MonoCustomAttrInfo*
mono_custom_attrs_from_method (MonoMethod *method)
{
guint32 idx;
/*
* An instantiated method has the same cattrs as the generic method definition.
*
* LAMESPEC: The .NET SRE throws an exception for instantiations of generic method builders
* Note that this stanza is not necessary for non-SRE types, but it's a micro-optimization
*/
if (method->is_inflated)
method = ((MonoMethodInflated *) method)->declaring;
if (method->dynamic || method->klass->image->dynamic)
return lookup_custom_attr (method->klass->image, method);
if (!method->token)
/* Synthetic methods */
return NULL;
idx = mono_method_get_index (method);
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_METHODDEF;
return mono_custom_attrs_from_index (method->klass->image, idx);
}
MonoCustomAttrInfo*
mono_custom_attrs_from_class (MonoClass *klass)
{
guint32 idx;
if (klass->generic_class)
klass = klass->generic_class->container_class;
if (klass->image->dynamic)
return lookup_custom_attr (klass->image, klass);
if (klass->byval_arg.type == MONO_TYPE_VAR || klass->byval_arg.type == MONO_TYPE_MVAR) {
idx = mono_metadata_token_index (klass->sizes.generic_param_token);
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_GENERICPAR;
} else {
idx = mono_metadata_token_index (klass->type_token);
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_TYPEDEF;
}
return mono_custom_attrs_from_index (klass->image, idx);
}
MonoCustomAttrInfo*
mono_custom_attrs_from_assembly (MonoAssembly *assembly)
{
guint32 idx;
if (assembly->image->dynamic)
return lookup_custom_attr (assembly->image, assembly);
idx = 1; /* there is only one assembly */
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_ASSEMBLY;
return mono_custom_attrs_from_index (assembly->image, idx);
}
static MonoCustomAttrInfo*
mono_custom_attrs_from_module (MonoImage *image)
{
guint32 idx;
if (image->dynamic)
return lookup_custom_attr (image, image);
idx = 1; /* there is only one module */
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_MODULE;
return mono_custom_attrs_from_index (image, idx);
}
MonoCustomAttrInfo*
mono_custom_attrs_from_property (MonoClass *klass, MonoProperty *property)
{
guint32 idx;
if (klass->image->dynamic) {
property = mono_metadata_get_corresponding_property_from_generic_type_definition (property);
return lookup_custom_attr (klass->image, property);
}
idx = find_property_index (klass, property);
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_PROPERTY;
return mono_custom_attrs_from_index (klass->image, idx);
}
MonoCustomAttrInfo*
mono_custom_attrs_from_event (MonoClass *klass, MonoEvent *event)
{
guint32 idx;
if (klass->image->dynamic) {
event = mono_metadata_get_corresponding_event_from_generic_type_definition (event);
return lookup_custom_attr (klass->image, event);
}
idx = find_event_index (klass, event);
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_EVENT;
return mono_custom_attrs_from_index (klass->image, idx);
}
MonoCustomAttrInfo*
mono_custom_attrs_from_field (MonoClass *klass, MonoClassField *field)
{
guint32 idx;
if (klass->image->dynamic) {
field = mono_metadata_get_corresponding_field_from_generic_type_definition (field);
return lookup_custom_attr (klass->image, field);
}
idx = find_field_index (klass, field);
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_FIELDDEF;
return mono_custom_attrs_from_index (klass->image, idx);
}
MonoCustomAttrInfo*
mono_custom_attrs_from_param (MonoMethod *method, guint32 param)
{
MonoTableInfo *ca;
guint32 i, idx, method_index;
guint32 param_list, param_last, param_pos, found;
MonoImage *image;
MonoReflectionMethodAux *aux;
/*
* An instantiated method has the same cattrs as the generic method definition.
*
* LAMESPEC: The .NET SRE throws an exception for instantiations of generic method builders
* Note that this stanza is not necessary for non-SRE types, but it's a micro-optimization
*/
if (method->is_inflated)
method = ((MonoMethodInflated *) method)->declaring;
if (method->klass->image->dynamic) {
MonoCustomAttrInfo *res, *ainfo;
int size;
aux = g_hash_table_lookup (((MonoDynamicImage*)method->klass->image)->method_aux_hash, method);
if (!aux || !aux->param_cattr)
return NULL;
/* Need to copy since it will be freed later */
ainfo = aux->param_cattr [param];
if (!ainfo)
return NULL;
size = MONO_SIZEOF_CUSTOM_ATTR_INFO + sizeof (MonoCustomAttrEntry) * ainfo->num_attrs;
res = g_malloc0 (size);
memcpy (res, ainfo, size);
return res;
}
image = method->klass->image;
method_index = mono_method_get_index (method);
if (!method_index)
return NULL;
ca = &image->tables [MONO_TABLE_METHOD];
param_list = mono_metadata_decode_row_col (ca, method_index - 1, MONO_METHOD_PARAMLIST);
if (method_index == ca->rows) {
ca = &image->tables [MONO_TABLE_PARAM];
param_last = ca->rows + 1;
} else {
param_last = mono_metadata_decode_row_col (ca, method_index, MONO_METHOD_PARAMLIST);
ca = &image->tables [MONO_TABLE_PARAM];
}
found = FALSE;
for (i = param_list; i < param_last; ++i) {
param_pos = mono_metadata_decode_row_col (ca, i - 1, MONO_PARAM_SEQUENCE);
if (param_pos == param) {
found = TRUE;
break;
}
}
if (!found)
return NULL;
idx = i;
idx <<= MONO_CUSTOM_ATTR_BITS;
idx |= MONO_CUSTOM_ATTR_PARAMDEF;
return mono_custom_attrs_from_index (image, idx);
}
gboolean
mono_custom_attrs_has_attr (MonoCustomAttrInfo *ainfo, MonoClass *attr_klass)
{
int i;
MonoClass *klass;
for (i = 0; i < ainfo->num_attrs; ++i) {
klass = ainfo->attrs [i].ctor->klass;
if (mono_class_has_parent (klass, attr_klass) || (MONO_CLASS_IS_INTERFACE (attr_klass) && mono_class_is_assignable_from (attr_klass, klass)))
return TRUE;
}
return FALSE;
}
MonoObject*
mono_custom_attrs_get_attr (MonoCustomAttrInfo *ainfo, MonoClass *attr_klass)
{
int i, attr_index;
MonoClass *klass;
MonoArray *attrs;
attr_index = -1;
for (i = 0; i < ainfo->num_attrs; ++i) {
klass = ainfo->attrs [i].ctor->klass;
if (mono_class_has_parent (klass, attr_klass)) {
attr_index = i;
break;
}
}
if (attr_index == -1)
return NULL;
attrs = mono_custom_attrs_construct (ainfo);
if (attrs)
return mono_array_get (attrs, MonoObject*, attr_index);
else
return NULL;
}
/*
* mono_reflection_get_custom_attrs_info:
* @obj: a reflection object handle
*
* Return the custom attribute info for attributes defined for the
* reflection handle @obj. The objects.
*
* FIXME this function leaks like a sieve for SRE objects.
*/
MonoCustomAttrInfo*
mono_reflection_get_custom_attrs_info (MonoObject *obj)
{
MonoClass *klass;
MonoCustomAttrInfo *cinfo = NULL;
klass = obj->vtable->klass;
if (klass == mono_defaults.monotype_class) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType *)obj);
klass = mono_class_from_mono_type (type);
/*We cannot mono_class_init the class from which we'll load the custom attributes since this must work with broken types.*/
cinfo = mono_custom_attrs_from_class (klass);
} else if (strcmp ("Assembly", klass->name) == 0 || strcmp ("MonoAssembly", klass->name) == 0) {
MonoReflectionAssembly *rassembly = (MonoReflectionAssembly*)obj;
cinfo = mono_custom_attrs_from_assembly (rassembly->assembly);
} else if (strcmp ("Module", klass->name) == 0 || strcmp ("MonoModule", klass->name) == 0) {
MonoReflectionModule *module = (MonoReflectionModule*)obj;
cinfo = mono_custom_attrs_from_module (module->image);
} else if (strcmp ("MonoProperty", klass->name) == 0) {
MonoReflectionProperty *rprop = (MonoReflectionProperty*)obj;
cinfo = mono_custom_attrs_from_property (rprop->property->parent, rprop->property);
} else if (strcmp ("MonoEvent", klass->name) == 0) {
MonoReflectionMonoEvent *revent = (MonoReflectionMonoEvent*)obj;
cinfo = mono_custom_attrs_from_event (revent->event->parent, revent->event);
} else if (strcmp ("MonoField", klass->name) == 0) {
MonoReflectionField *rfield = (MonoReflectionField*)obj;
cinfo = mono_custom_attrs_from_field (rfield->field->parent, rfield->field);
} else if ((strcmp ("MonoMethod", klass->name) == 0) || (strcmp ("MonoCMethod", klass->name) == 0)) {
MonoReflectionMethod *rmethod = (MonoReflectionMethod*)obj;
cinfo = mono_custom_attrs_from_method (rmethod->method);
} else if ((strcmp ("MonoGenericMethod", klass->name) == 0) || (strcmp ("MonoGenericCMethod", klass->name) == 0)) {
MonoReflectionMethod *rmethod = (MonoReflectionMethod*)obj;
cinfo = mono_custom_attrs_from_method (rmethod->method);
} else if (strcmp ("ParameterInfo", klass->name) == 0) {
MonoReflectionParameter *param = (MonoReflectionParameter*)obj;
MonoClass *member_class = mono_object_class (param->MemberImpl);
if (mono_class_is_reflection_method_or_constructor (member_class)) {
MonoReflectionMethod *rmethod = (MonoReflectionMethod*)param->MemberImpl;
cinfo = mono_custom_attrs_from_param (rmethod->method, param->PositionImpl + 1);
} else if (is_sr_mono_property (member_class)) {
MonoReflectionProperty *prop = (MonoReflectionProperty *)param->MemberImpl;
MonoMethod *method;
if (!(method = prop->property->get))
method = prop->property->set;
g_assert (method);
cinfo = mono_custom_attrs_from_param (method, param->PositionImpl + 1);
}
#ifndef DISABLE_REFLECTION_EMIT
else if (is_sre_method_on_tb_inst (member_class)) {/*XXX This is a workaround for Compiler Context*/
MonoMethod *method = mono_reflection_method_on_tb_inst_get_handle ((MonoReflectionMethodOnTypeBuilderInst*)param->MemberImpl);
cinfo = mono_custom_attrs_from_param (method, param->PositionImpl + 1);
} else if (is_sre_ctor_on_tb_inst (member_class)) { /*XX This is a workaround for Compiler Context*/
MonoReflectionCtorOnTypeBuilderInst *c = (MonoReflectionCtorOnTypeBuilderInst*)param->MemberImpl;
MonoMethod *method = NULL;
if (is_sre_ctor_builder (mono_object_class (c->cb)))
method = ((MonoReflectionCtorBuilder *)c->cb)->mhandle;
else if (is_sr_mono_cmethod (mono_object_class (c->cb)))
method = ((MonoReflectionMethod *)c->cb)->method;
else
g_error ("mono_reflection_get_custom_attrs_info:: can't handle a CTBI with base_method of type %s", mono_type_get_full_name (member_class));
cinfo = mono_custom_attrs_from_param (method, param->PositionImpl + 1);
}
#endif
else {
char *type_name = mono_type_get_full_name (member_class);
char *msg = g_strdup_printf ("Custom attributes on a ParamInfo with member %s are not supported", type_name);
MonoException *ex = mono_get_exception_not_supported (msg);
g_free (type_name);
g_free (msg);
mono_raise_exception (ex);
}
} else if (strcmp ("AssemblyBuilder", klass->name) == 0) {
MonoReflectionAssemblyBuilder *assemblyb = (MonoReflectionAssemblyBuilder*)obj;
cinfo = mono_custom_attrs_from_builders (NULL, assemblyb->assembly.assembly->image, assemblyb->cattrs);
} else if (strcmp ("TypeBuilder", klass->name) == 0) {
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)obj;
cinfo = mono_custom_attrs_from_builders (NULL, &tb->module->dynamic_image->image, tb->cattrs);
} else if (strcmp ("ModuleBuilder", klass->name) == 0) {
MonoReflectionModuleBuilder *mb = (MonoReflectionModuleBuilder*)obj;
cinfo = mono_custom_attrs_from_builders (NULL, &mb->dynamic_image->image, mb->cattrs);
} else if (strcmp ("ConstructorBuilder", klass->name) == 0) {
MonoReflectionCtorBuilder *cb = (MonoReflectionCtorBuilder*)obj;
cinfo = mono_custom_attrs_from_builders (NULL, cb->mhandle->klass->image, cb->cattrs);
} else if (strcmp ("MethodBuilder", klass->name) == 0) {
MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder*)obj;
cinfo = mono_custom_attrs_from_builders (NULL, mb->mhandle->klass->image, mb->cattrs);
} else if (strcmp ("FieldBuilder", klass->name) == 0) {
MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder*)obj;
cinfo = mono_custom_attrs_from_builders (NULL, &((MonoReflectionTypeBuilder*)fb->typeb)->module->dynamic_image->image, fb->cattrs);
} else if (strcmp ("MonoGenericClass", klass->name) == 0) {
MonoReflectionGenericClass *gclass = (MonoReflectionGenericClass*)obj;
cinfo = mono_reflection_get_custom_attrs_info ((MonoObject*)gclass->generic_type);
} else { /* handle other types here... */
g_error ("get custom attrs not yet supported for %s", klass->name);
}
return cinfo;
}
/*
* mono_reflection_get_custom_attrs_by_type:
* @obj: a reflection object handle
*
* Return an array with all the custom attributes defined of the
* reflection handle @obj. If @attr_klass is non-NULL, only custom attributes
* of that type are returned. The objects are fully build. Return NULL if a loading error
* occurs.
*/
MonoArray*
mono_reflection_get_custom_attrs_by_type (MonoObject *obj, MonoClass *attr_klass, MonoError *error)
{
MonoArray *result;
MonoCustomAttrInfo *cinfo;
mono_error_init (error);
cinfo = mono_reflection_get_custom_attrs_info (obj);
if (cinfo) {
result = mono_custom_attrs_construct_by_type (cinfo, attr_klass, error);
if (!cinfo->cached)
mono_custom_attrs_free (cinfo);
} else {
if (mono_loader_get_last_error ())
return NULL;
result = mono_array_new_cached (mono_domain_get (), mono_defaults.attribute_class, 0);
}
return result;
}
/*
* mono_reflection_get_custom_attrs:
* @obj: a reflection object handle
*
* Return an array with all the custom attributes defined of the
* reflection handle @obj. The objects are fully build. Return NULL if a loading error
* occurs.
*/
MonoArray*
mono_reflection_get_custom_attrs (MonoObject *obj)
{
MonoError error;
return mono_reflection_get_custom_attrs_by_type (obj, NULL, &error);
}
/*
* mono_reflection_get_custom_attrs_data:
* @obj: a reflection obj handle
*
* Returns an array of System.Reflection.CustomAttributeData,
* which include information about attributes reflected on
* types loaded using the Reflection Only methods
*/
MonoArray*
mono_reflection_get_custom_attrs_data (MonoObject *obj)
{
MonoArray *result;
MonoCustomAttrInfo *cinfo;
cinfo = mono_reflection_get_custom_attrs_info (obj);
if (cinfo) {
result = mono_custom_attrs_data_construct (cinfo);
if (!cinfo->cached)
mono_custom_attrs_free (cinfo);
} else
result = mono_array_new (mono_domain_get (), mono_defaults.customattribute_data_class, 0);
return result;
}
static MonoReflectionType*
mono_reflection_type_get_underlying_system_type (MonoReflectionType* t)
{
static MonoMethod *method_get_underlying_system_type = NULL;
MonoMethod *usertype_method;
if (!method_get_underlying_system_type)
method_get_underlying_system_type = mono_class_get_method_from_name (mono_defaults.systemtype_class, "get_UnderlyingSystemType", 0);
usertype_method = mono_object_get_virtual_method ((MonoObject *) t, method_get_underlying_system_type);
return (MonoReflectionType *) mono_runtime_invoke (usertype_method, t, NULL, NULL);
}
static gboolean
is_corlib_type (MonoClass *class)
{
return class->image == mono_defaults.corlib;
}
#define check_corlib_type_cached(_class, _namespace, _name) do { \
static MonoClass *cached_class; \
if (cached_class) \
return cached_class == _class; \
if (is_corlib_type (_class) && !strcmp (_name, _class->name) && !strcmp (_namespace, _class->name_space)) { \
cached_class = _class; \
return TRUE; \
} \
return FALSE; \
} while (0) \
#ifndef DISABLE_REFLECTION_EMIT
static gboolean
is_sre_array (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "ArrayType");
}
static gboolean
is_sre_byref (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "ByRefType");
}
static gboolean
is_sre_pointer (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "PointerType");
}
static gboolean
is_sre_generic_instance (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection", "MonoGenericClass");
}
static gboolean
is_sre_type_builder (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "TypeBuilder");
}
static gboolean
is_sre_method_builder (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "MethodBuilder");
}
static gboolean
is_sre_ctor_builder (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "ConstructorBuilder");
}
static gboolean
is_sre_field_builder (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "FieldBuilder");
}
static gboolean
is_sre_method_on_tb_inst (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "MethodOnTypeBuilderInst");
}
static gboolean
is_sre_ctor_on_tb_inst (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection.Emit", "ConstructorOnTypeBuilderInst");
}
MonoType*
mono_reflection_type_get_handle (MonoReflectionType* ref)
{
MonoClass *class;
if (!ref)
return NULL;
if (ref->type)
return ref->type;
if (is_usertype (ref)) {
ref = mono_reflection_type_get_underlying_system_type (ref);
if (ref == NULL || is_usertype (ref))
return NULL;
if (ref->type)
return ref->type;
}
class = mono_object_class (ref);
if (is_sre_array (class)) {
MonoType *res;
MonoReflectionArrayType *sre_array = (MonoReflectionArrayType*)ref;
MonoType *base = mono_reflection_type_get_handle (sre_array->element_type);
g_assert (base);
if (sre_array->rank == 0) //single dimentional array
res = &mono_array_class_get (mono_class_from_mono_type (base), 1)->byval_arg;
else
res = &mono_bounded_array_class_get (mono_class_from_mono_type (base), sre_array->rank, TRUE)->byval_arg;
sre_array->type.type = res;
return res;
} else if (is_sre_byref (class)) {
MonoType *res;
MonoReflectionDerivedType *sre_byref = (MonoReflectionDerivedType*)ref;
MonoType *base = mono_reflection_type_get_handle (sre_byref->element_type);
g_assert (base);
res = &mono_class_from_mono_type (base)->this_arg;
sre_byref->type.type = res;
return res;
} else if (is_sre_pointer (class)) {
MonoType *res;
MonoReflectionDerivedType *sre_pointer = (MonoReflectionDerivedType*)ref;
MonoType *base = mono_reflection_type_get_handle (sre_pointer->element_type);
g_assert (base);
res = &mono_ptr_class_get (base)->byval_arg;
sre_pointer->type.type = res;
return res;
} else if (is_sre_generic_instance (class)) {
MonoType *res, **types;
MonoReflectionGenericClass *gclass = (MonoReflectionGenericClass*)ref;
int i, count;
count = mono_array_length (gclass->type_arguments);
types = g_new0 (MonoType*, count);
for (i = 0; i < count; ++i) {
MonoReflectionType *t = mono_array_get (gclass->type_arguments, gpointer, i);
types [i] = mono_reflection_type_get_handle (t);
if (!types[i]) {
g_free (types);
return NULL;
}
}
res = mono_reflection_bind_generic_parameters (gclass->generic_type, count, types);
g_free (types);
g_assert (res);
gclass->type.type = res;
return res;
}
g_error ("Cannot handle corlib user type %s", mono_type_full_name (&mono_object_class(ref)->byval_arg));
return NULL;
}
void
mono_reflection_create_unmanaged_type (MonoReflectionType *type)
{
mono_reflection_type_get_handle (type);
}
void
mono_reflection_register_with_runtime (MonoReflectionType *type)
{
MonoType *res = mono_reflection_type_get_handle (type);
MonoDomain *domain = mono_object_domain ((MonoObject*)type);
MonoClass *class;
if (!res)
mono_raise_exception (mono_get_exception_argument (NULL, "Invalid generic instantiation, one or more arguments are not proper user types"));
class = mono_class_from_mono_type (res);
mono_loader_lock (); /*same locking as mono_type_get_object*/
mono_domain_lock (domain);
if (!class->image->dynamic) {
mono_class_setup_supertypes (class);
} else {
if (!domain->type_hash)
domain->type_hash = mono_g_hash_table_new_type ((GHashFunc)mono_metadata_type_hash,
(GCompareFunc)mono_metadata_type_equal, MONO_HASH_VALUE_GC);
mono_g_hash_table_insert (domain->type_hash, res, type);
}
mono_domain_unlock (domain);
mono_loader_unlock ();
}
/**
* LOCKING: Assumes the loader lock is held.
*/
static MonoMethodSignature*
parameters_to_signature (MonoImage *image, MonoArray *parameters) {
MonoMethodSignature *sig;
int count, i;
count = parameters? mono_array_length (parameters): 0;
sig = image_g_malloc0 (image, MONO_SIZEOF_METHOD_SIGNATURE + sizeof (MonoType*) * count);
sig->param_count = count;
sig->sentinelpos = -1; /* FIXME */
for (i = 0; i < count; ++i)
sig->params [i] = mono_type_array_get_and_resolve (parameters, i);
return sig;
}
/**
* LOCKING: Assumes the loader lock is held.
*/
static MonoMethodSignature*
ctor_builder_to_signature (MonoImage *image, MonoReflectionCtorBuilder *ctor) {
MonoMethodSignature *sig;
sig = parameters_to_signature (image, ctor->parameters);
sig->hasthis = ctor->attrs & METHOD_ATTRIBUTE_STATIC? 0: 1;
sig->ret = &mono_defaults.void_class->byval_arg;
return sig;
}
/**
* LOCKING: Assumes the loader lock is held.
*/
static MonoMethodSignature*
method_builder_to_signature (MonoImage *image, MonoReflectionMethodBuilder *method) {
MonoMethodSignature *sig;
sig = parameters_to_signature (image, method->parameters);
sig->hasthis = method->attrs & METHOD_ATTRIBUTE_STATIC? 0: 1;
sig->ret = method->rtype? mono_reflection_type_get_handle ((MonoReflectionType*)method->rtype): &mono_defaults.void_class->byval_arg;
sig->generic_param_count = method->generic_params ? mono_array_length (method->generic_params) : 0;
return sig;
}
static MonoMethodSignature*
dynamic_method_to_signature (MonoReflectionDynamicMethod *method) {
MonoMethodSignature *sig;
sig = parameters_to_signature (NULL, method->parameters);
sig->hasthis = method->attrs & METHOD_ATTRIBUTE_STATIC? 0: 1;
sig->ret = method->rtype? mono_reflection_type_get_handle (method->rtype): &mono_defaults.void_class->byval_arg;
sig->generic_param_count = 0;
return sig;
}
static void
get_prop_name_and_type (MonoObject *prop, char **name, MonoType **type)
{
MonoClass *klass = mono_object_class (prop);
if (strcmp (klass->name, "PropertyBuilder") == 0) {
MonoReflectionPropertyBuilder *pb = (MonoReflectionPropertyBuilder *)prop;
*name = mono_string_to_utf8 (pb->name);
*type = mono_reflection_type_get_handle ((MonoReflectionType*)pb->type);
} else {
MonoReflectionProperty *p = (MonoReflectionProperty *)prop;
*name = g_strdup (p->property->name);
if (p->property->get)
*type = mono_method_signature (p->property->get)->ret;
else
*type = mono_method_signature (p->property->set)->params [mono_method_signature (p->property->set)->param_count - 1];
}
}
static void
get_field_name_and_type (MonoObject *field, char **name, MonoType **type)
{
MonoClass *klass = mono_object_class (field);
if (strcmp (klass->name, "FieldBuilder") == 0) {
MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder *)field;
*name = mono_string_to_utf8 (fb->name);
*type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type);
} else {
MonoReflectionField *f = (MonoReflectionField *)field;
*name = g_strdup (mono_field_get_name (f->field));
*type = f->field->type;
}
}
#else /* DISABLE_REFLECTION_EMIT */
void
mono_reflection_register_with_runtime (MonoReflectionType *type)
{
/* This is empty */
}
static gboolean
is_sre_type_builder (MonoClass *class)
{
return FALSE;
}
static gboolean
is_sre_generic_instance (MonoClass *class)
{
return FALSE;
}
static void
init_type_builder_generics (MonoObject *type)
{
}
#endif /* !DISABLE_REFLECTION_EMIT */
static gboolean
is_sr_mono_field (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection", "MonoField");
}
static gboolean
is_sr_mono_property (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection", "MonoProperty");
}
static gboolean
is_sr_mono_method (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection", "MonoMethod");
}
static gboolean
is_sr_mono_cmethod (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection", "MonoCMethod");
}
static gboolean
is_sr_mono_generic_method (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection", "MonoGenericMethod");
}
static gboolean
is_sr_mono_generic_cmethod (MonoClass *class)
{
check_corlib_type_cached (class, "System.Reflection", "MonoGenericCMethod");
}
gboolean
mono_class_is_reflection_method_or_constructor (MonoClass *class)
{
return is_sr_mono_method (class) || is_sr_mono_cmethod (class) || is_sr_mono_generic_method (class) || is_sr_mono_generic_cmethod (class);
}
static gboolean
is_usertype (MonoReflectionType *ref)
{
MonoClass *class = mono_object_class (ref);
return class->image != mono_defaults.corlib || strcmp ("TypeDelegator", class->name) == 0;
}
static MonoReflectionType*
mono_reflection_type_resolve_user_types (MonoReflectionType *type)
{
if (!type || type->type)
return type;
if (is_usertype (type)) {
type = mono_reflection_type_get_underlying_system_type (type);
if (is_usertype (type))
mono_raise_exception (mono_get_exception_not_supported ("User defined subclasses of System.Type are not yet supported22"));
}
return type;
}
/*
* Encode a value in a custom attribute stream of bytes.
* The value to encode is either supplied as an object in argument val
* (valuetypes are boxed), or as a pointer to the data in the
* argument argval.
* @type represents the type of the value
* @buffer is the start of the buffer
* @p the current position in the buffer
* @buflen contains the size of the buffer and is used to return the new buffer size
* if this needs to be realloced.
* @retbuffer and @retp return the start and the position of the buffer
*/
static void
encode_cattr_value (MonoAssembly *assembly, char *buffer, char *p, char **retbuffer, char **retp, guint32 *buflen, MonoType *type, MonoObject *arg, char *argval)
{
MonoTypeEnum simple_type;
if ((p-buffer) + 10 >= *buflen) {
char *newbuf;
*buflen *= 2;
newbuf = g_realloc (buffer, *buflen);
p = newbuf + (p-buffer);
buffer = newbuf;
}
if (!argval)
argval = ((char*)arg + sizeof (MonoObject));
simple_type = type->type;
handle_enum:
switch (simple_type) {
case MONO_TYPE_BOOLEAN:
case MONO_TYPE_U1:
case MONO_TYPE_I1:
*p++ = *argval;
break;
case MONO_TYPE_CHAR:
case MONO_TYPE_U2:
case MONO_TYPE_I2:
swap_with_size (p, argval, 2, 1);
p += 2;
break;
case MONO_TYPE_U4:
case MONO_TYPE_I4:
case MONO_TYPE_R4:
swap_with_size (p, argval, 4, 1);
p += 4;
break;
case MONO_TYPE_R8:
#if defined(ARM_FPU_FPA) && G_BYTE_ORDER == G_LITTLE_ENDIAN
p [0] = argval [4];
p [1] = argval [5];
p [2] = argval [6];
p [3] = argval [7];
p [4] = argval [0];
p [5] = argval [1];
p [6] = argval [2];
p [7] = argval [3];
#else
swap_with_size (p, argval, 8, 1);
#endif
p += 8;
break;
case MONO_TYPE_U8:
case MONO_TYPE_I8:
swap_with_size (p, argval, 8, 1);
p += 8;
break;
case MONO_TYPE_VALUETYPE:
if (type->data.klass->enumtype) {
simple_type = mono_class_enum_basetype (type->data.klass)->type;
goto handle_enum;
} else {
g_warning ("generic valutype %s not handled in custom attr value decoding", type->data.klass->name);
}
break;
case MONO_TYPE_STRING: {
char *str;
guint32 slen;
if (!arg) {
*p++ = 0xFF;
break;
}
str = mono_string_to_utf8 ((MonoString*)arg);
slen = strlen (str);
if ((p-buffer) + 10 + slen >= *buflen) {
char *newbuf;
*buflen *= 2;
*buflen += slen;
newbuf = g_realloc (buffer, *buflen);
p = newbuf + (p-buffer);
buffer = newbuf;
}
mono_metadata_encode_value (slen, p, &p);
memcpy (p, str, slen);
p += slen;
g_free (str);
break;
}
case MONO_TYPE_CLASS: {
char *str;
guint32 slen;
if (!arg) {
*p++ = 0xFF;
break;
}
handle_type:
str = type_get_qualified_name (mono_reflection_type_get_handle ((MonoReflectionType*)arg), NULL);
slen = strlen (str);
if ((p-buffer) + 10 + slen >= *buflen) {
char *newbuf;
*buflen *= 2;
*buflen += slen;
newbuf = g_realloc (buffer, *buflen);
p = newbuf + (p-buffer);
buffer = newbuf;
}
mono_metadata_encode_value (slen, p, &p);
memcpy (p, str, slen);
p += slen;
g_free (str);
break;
}
case MONO_TYPE_SZARRAY: {
int len, i;
MonoClass *eclass, *arg_eclass;
if (!arg) {
*p++ = 0xff; *p++ = 0xff; *p++ = 0xff; *p++ = 0xff;
break;
}
len = mono_array_length ((MonoArray*)arg);
*p++ = len & 0xff;
*p++ = (len >> 8) & 0xff;
*p++ = (len >> 16) & 0xff;
*p++ = (len >> 24) & 0xff;
*retp = p;
*retbuffer = buffer;
eclass = type->data.klass;
arg_eclass = mono_object_class (arg)->element_class;
if (!eclass) {
/* Happens when we are called from the MONO_TYPE_OBJECT case below */
eclass = mono_defaults.object_class;
}
if (eclass == mono_defaults.object_class && arg_eclass->valuetype) {
char *elptr = mono_array_addr ((MonoArray*)arg, char, 0);
int elsize = mono_class_array_element_size (arg_eclass);
for (i = 0; i < len; ++i) {
encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &arg_eclass->byval_arg, NULL, elptr);
elptr += elsize;
}
} else if (eclass->valuetype && arg_eclass->valuetype) {
char *elptr = mono_array_addr ((MonoArray*)arg, char, 0);
int elsize = mono_class_array_element_size (eclass);
for (i = 0; i < len; ++i) {
encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &eclass->byval_arg, NULL, elptr);
elptr += elsize;
}
} else {
for (i = 0; i < len; ++i) {
encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &eclass->byval_arg, mono_array_get ((MonoArray*)arg, MonoObject*, i), NULL);
}
}
break;
}
case MONO_TYPE_OBJECT: {
MonoClass *klass;
char *str;
guint32 slen;
/*
* The parameter type is 'object' but the type of the actual
* argument is not. So we have to add type information to the blob
* too. This is completely undocumented in the spec.
*/
if (arg == NULL) {
*p++ = MONO_TYPE_STRING; // It's same hack as MS uses
*p++ = 0xFF;
break;
}
klass = mono_object_class (arg);
if (mono_object_isinst (arg, mono_defaults.systemtype_class)) {
*p++ = 0x50;
goto handle_type;
} else if (klass->enumtype) {
*p++ = 0x55;
} else if (klass == mono_defaults.string_class) {
simple_type = MONO_TYPE_STRING;
*p++ = 0x0E;
goto handle_enum;
} else if (klass->rank == 1) {
*p++ = 0x1D;
if (klass->element_class->byval_arg.type == MONO_TYPE_OBJECT)
/* See Partition II, Appendix B3 */
*p++ = 0x51;
else
*p++ = klass->element_class->byval_arg.type;
encode_cattr_value (assembly, buffer, p, &buffer, &p, buflen, &klass->byval_arg, arg, NULL);
break;
} else if (klass->byval_arg.type >= MONO_TYPE_BOOLEAN && klass->byval_arg.type <= MONO_TYPE_R8) {
*p++ = simple_type = klass->byval_arg.type;
goto handle_enum;
} else {
g_error ("unhandled type in custom attr");
}
str = type_get_qualified_name (mono_class_get_type(klass), NULL);
slen = strlen (str);
if ((p-buffer) + 10 + slen >= *buflen) {
char *newbuf;
*buflen *= 2;
*buflen += slen;
newbuf = g_realloc (buffer, *buflen);
p = newbuf + (p-buffer);
buffer = newbuf;
}
mono_metadata_encode_value (slen, p, &p);
memcpy (p, str, slen);
p += slen;
g_free (str);
simple_type = mono_class_enum_basetype (klass)->type;
goto handle_enum;
}
default:
g_error ("type 0x%02x not yet supported in custom attr encoder", simple_type);
}
*retp = p;
*retbuffer = buffer;
}
static void
encode_field_or_prop_type (MonoType *type, char *p, char **retp)
{
if (type->type == MONO_TYPE_VALUETYPE && type->data.klass->enumtype) {
char *str = type_get_qualified_name (type, NULL);
int slen = strlen (str);
*p++ = 0x55;
/*
* This seems to be optional...
* *p++ = 0x80;
*/
mono_metadata_encode_value (slen, p, &p);
memcpy (p, str, slen);
p += slen;
g_free (str);
} else if (type->type == MONO_TYPE_OBJECT) {
*p++ = 0x51;
} else if (type->type == MONO_TYPE_CLASS) {
/* it should be a type: encode_cattr_value () has the check */
*p++ = 0x50;
} else {
mono_metadata_encode_value (type->type, p, &p);
if (type->type == MONO_TYPE_SZARRAY)
/* See the examples in Partition VI, Annex B */
encode_field_or_prop_type (&type->data.klass->byval_arg, p, &p);
}
*retp = p;
}
#ifndef DISABLE_REFLECTION_EMIT
static void
encode_named_val (MonoReflectionAssembly *assembly, char *buffer, char *p, char **retbuffer, char **retp, guint32 *buflen, MonoType *type, char *name, MonoObject *value)
{
int len;
/* Preallocate a large enough buffer */
if (type->type == MONO_TYPE_VALUETYPE && type->data.klass->enumtype) {
char *str = type_get_qualified_name (type, NULL);
len = strlen (str);
g_free (str);
} else if (type->type == MONO_TYPE_SZARRAY && type->data.klass->enumtype) {
char *str = type_get_qualified_name (&type->data.klass->byval_arg, NULL);
len = strlen (str);
g_free (str);
} else {
len = 0;
}
len += strlen (name);
if ((p-buffer) + 20 + len >= *buflen) {
char *newbuf;
*buflen *= 2;
*buflen += len;
newbuf = g_realloc (buffer, *buflen);
p = newbuf + (p-buffer);
buffer = newbuf;
}
encode_field_or_prop_type (type, p, &p);
len = strlen (name);
mono_metadata_encode_value (len, p, &p);
memcpy (p, name, len);
p += len;
encode_cattr_value (assembly->assembly, buffer, p, &buffer, &p, buflen, type, value, NULL);
*retp = p;
*retbuffer = buffer;
}
/*
* mono_reflection_get_custom_attrs_blob:
* @ctor: custom attribute constructor
* @ctorArgs: arguments o the constructor
* @properties:
* @propValues:
* @fields:
* @fieldValues:
*
* Creates the blob of data that needs to be saved in the metadata and that represents
* the custom attributed described by @ctor, @ctorArgs etc.
* Returns: a Byte array representing the blob of data.
*/
MonoArray*
mono_reflection_get_custom_attrs_blob (MonoReflectionAssembly *assembly, MonoObject *ctor, MonoArray *ctorArgs, MonoArray *properties, MonoArray *propValues, MonoArray *fields, MonoArray* fieldValues)
{
MonoArray *result;
MonoMethodSignature *sig;
MonoObject *arg;
char *buffer, *p;
guint32 buflen, i;
MONO_ARCH_SAVE_REGS;
if (strcmp (ctor->vtable->klass->name, "MonoCMethod")) {
/* sig is freed later so allocate it in the heap */
sig = ctor_builder_to_signature (NULL, (MonoReflectionCtorBuilder*)ctor);
} else {
sig = mono_method_signature (((MonoReflectionMethod*)ctor)->method);
}
g_assert (mono_array_length (ctorArgs) == sig->param_count);
buflen = 256;
p = buffer = g_malloc (buflen);
/* write the prolog */
*p++ = 1;
*p++ = 0;
for (i = 0; i < sig->param_count; ++i) {
arg = mono_array_get (ctorArgs, MonoObject*, i);
encode_cattr_value (assembly->assembly, buffer, p, &buffer, &p, &buflen, sig->params [i], arg, NULL);
}
i = 0;
if (properties)
i += mono_array_length (properties);
if (fields)
i += mono_array_length (fields);
*p++ = i & 0xff;
*p++ = (i >> 8) & 0xff;
if (properties) {
MonoObject *prop;
for (i = 0; i < mono_array_length (properties); ++i) {
MonoType *ptype;
char *pname;
prop = mono_array_get (properties, gpointer, i);
get_prop_name_and_type (prop, &pname, &ptype);
*p++ = 0x54; /* PROPERTY signature */
encode_named_val (assembly, buffer, p, &buffer, &p, &buflen, ptype, pname, (MonoObject*)mono_array_get (propValues, gpointer, i));
g_free (pname);
}
}
if (fields) {
MonoObject *field;
for (i = 0; i < mono_array_length (fields); ++i) {
MonoType *ftype;
char *fname;
field = mono_array_get (fields, gpointer, i);
get_field_name_and_type (field, &fname, &ftype);
*p++ = 0x53; /* FIELD signature */
encode_named_val (assembly, buffer, p, &buffer, &p, &buflen, ftype, fname, (MonoObject*)mono_array_get (fieldValues, gpointer, i));
g_free (fname);
}
}
g_assert (p - buffer <= buflen);
buflen = p - buffer;
result = mono_array_new (mono_domain_get (), mono_defaults.byte_class, buflen);
p = mono_array_addr (result, char, 0);
memcpy (p, buffer, buflen);
g_free (buffer);
if (strcmp (ctor->vtable->klass->name, "MonoCMethod"))
g_free (sig);
return result;
}
/*
* mono_reflection_setup_internal_class:
* @tb: a TypeBuilder object
*
* Creates a MonoClass that represents the TypeBuilder.
* This is a trick that lets us simplify a lot of reflection code
* (and will allow us to support Build and Run assemblies easier).
*/
void
mono_reflection_setup_internal_class (MonoReflectionTypeBuilder *tb)
{
MonoError error;
MonoClass *klass, *parent;
MONO_ARCH_SAVE_REGS;
RESOLVE_TYPE (tb->parent);
mono_loader_lock ();
if (tb->parent) {
/* check so we can compile corlib correctly */
if (strcmp (mono_object_class (tb->parent)->name, "TypeBuilder") == 0) {
/* mono_class_setup_mono_type () guaranteess type->data.klass is valid */
parent = mono_reflection_type_get_handle ((MonoReflectionType*)tb->parent)->data.klass;
} else {
parent = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb->parent));
}
} else {
parent = NULL;
}
/* the type has already being created: it means we just have to change the parent */
if (tb->type.type) {
klass = mono_class_from_mono_type (tb->type.type);
klass->parent = NULL;
/* fool mono_class_setup_parent */
klass->supertypes = NULL;
mono_class_setup_parent (klass, parent);
mono_class_setup_mono_type (klass);
mono_loader_unlock ();
return;
}
klass = mono_image_alloc0 (&tb->module->dynamic_image->image, sizeof (MonoClass));
klass->image = &tb->module->dynamic_image->image;
klass->inited = 1; /* we lie to the runtime */
klass->name = mono_string_to_utf8_image (klass->image, tb->name, &error);
if (!mono_error_ok (&error))
goto failure;
klass->name_space = mono_string_to_utf8_image (klass->image, tb->nspace, &error);
if (!mono_error_ok (&error))
goto failure;
klass->type_token = MONO_TOKEN_TYPE_DEF | tb->table_idx;
klass->flags = tb->attrs;
mono_profiler_class_event (klass, MONO_PROFILE_START_LOAD);
klass->element_class = klass;
if (mono_class_get_ref_info (klass) == NULL) {
mono_class_set_ref_info (klass, tb);
/* Put into cache so mono_class_get () will find it.
Skip nested types as those should not be available on the global scope. */
if (!tb->nesting_type) {
mono_image_add_to_name_cache (klass->image, klass->name_space, klass->name, tb->table_idx);
} else {
klass->image->reflection_info_unregister_classes =
g_slist_prepend (klass->image->reflection_info_unregister_classes, klass);
}
} else {
g_assert (mono_class_get_ref_info (klass) == tb);
}
mono_g_hash_table_insert (tb->module->dynamic_image->tokens,
GUINT_TO_POINTER (MONO_TOKEN_TYPE_DEF | tb->table_idx), tb);
if (parent != NULL) {
mono_class_setup_parent (klass, parent);
} else if (strcmp (klass->name, "Object") == 0 && strcmp (klass->name_space, "System") == 0) {
const char *old_n = klass->name;
/* trick to get relative numbering right when compiling corlib */
klass->name = "BuildingObject";
mono_class_setup_parent (klass, mono_defaults.object_class);
klass->name = old_n;
}
if ((!strcmp (klass->name, "ValueType") && !strcmp (klass->name_space, "System")) ||
(!strcmp (klass->name, "Object") && !strcmp (klass->name_space, "System")) ||
(!strcmp (klass->name, "Enum") && !strcmp (klass->name_space, "System"))) {
klass->instance_size = sizeof (MonoObject);
klass->size_inited = 1;
mono_class_setup_vtable_general (klass, NULL, 0, NULL);
}
mono_class_setup_mono_type (klass);
mono_class_setup_supertypes (klass);
/*
* FIXME: handle interfaces.
*/
tb->type.type = &klass->byval_arg;
if (tb->nesting_type) {
g_assert (tb->nesting_type->type);
klass->nested_in = mono_class_from_mono_type (mono_reflection_type_get_handle (tb->nesting_type));
}
/*g_print ("setup %s as %s (%p)\n", klass->name, ((MonoObject*)tb)->vtable->klass->name, tb);*/
mono_profiler_class_loaded (klass, MONO_PROFILE_OK);
mono_loader_unlock ();
return;
failure:
mono_loader_unlock ();
mono_error_raise_exception (&error);
}
/*
* mono_reflection_setup_generic_class:
* @tb: a TypeBuilder object
*
* Setup the generic class before adding the first generic parameter.
*/
void
mono_reflection_setup_generic_class (MonoReflectionTypeBuilder *tb)
{
}
/*
* mono_reflection_create_generic_class:
* @tb: a TypeBuilder object
*
* Creates the generic class after all generic parameters have been added.
*/
void
mono_reflection_create_generic_class (MonoReflectionTypeBuilder *tb)
{
MonoClass *klass;
int count, i;
MONO_ARCH_SAVE_REGS;
klass = mono_class_from_mono_type (tb->type.type);
count = tb->generic_params ? mono_array_length (tb->generic_params) : 0;
if (klass->generic_container || (count == 0))
return;
g_assert (tb->generic_container && (tb->generic_container->owner.klass == klass));
klass->generic_container = mono_image_alloc0 (klass->image, sizeof (MonoGenericContainer));
klass->generic_container->owner.klass = klass;
klass->generic_container->type_argc = count;
klass->generic_container->type_params = mono_image_alloc0 (klass->image, sizeof (MonoGenericParamFull) * count);
klass->is_generic = 1;
for (i = 0; i < count; i++) {
MonoReflectionGenericParam *gparam = mono_array_get (tb->generic_params, gpointer, i);
MonoGenericParamFull *param = (MonoGenericParamFull *) mono_reflection_type_get_handle ((MonoReflectionType*)gparam)->data.generic_param;
klass->generic_container->type_params [i] = *param;
/*Make sure we are a diferent type instance */
klass->generic_container->type_params [i].param.owner = klass->generic_container;
klass->generic_container->type_params [i].info.pklass = NULL;
klass->generic_container->type_params [i].info.flags = gparam->attrs;
g_assert (klass->generic_container->type_params [i].param.owner);
}
klass->generic_container->context.class_inst = mono_get_shared_generic_inst (klass->generic_container);
}
/*
* mono_reflection_create_internal_class:
* @tb: a TypeBuilder object
*
* Actually create the MonoClass that is associated with the TypeBuilder.
*/
void
mono_reflection_create_internal_class (MonoReflectionTypeBuilder *tb)
{
MonoClass *klass;
MONO_ARCH_SAVE_REGS;
klass = mono_class_from_mono_type (tb->type.type);
mono_loader_lock ();
if (klass->enumtype && mono_class_enum_basetype (klass) == NULL) {
MonoReflectionFieldBuilder *fb;
MonoClass *ec;
MonoType *enum_basetype;
g_assert (tb->fields != NULL);
g_assert (mono_array_length (tb->fields) >= 1);
fb = mono_array_get (tb->fields, MonoReflectionFieldBuilder*, 0);
if (!mono_type_is_valid_enum_basetype (mono_reflection_type_get_handle ((MonoReflectionType*)fb->type))) {
mono_loader_unlock ();
return;
}
enum_basetype = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type);
klass->element_class = mono_class_from_mono_type (enum_basetype);
if (!klass->element_class)
klass->element_class = mono_class_from_mono_type (enum_basetype);
/*
* get the element_class from the current corlib.
*/
ec = default_class_from_mono_type (enum_basetype);
klass->instance_size = ec->instance_size;
klass->size_inited = 1;
/*
* this is almost safe to do with enums and it's needed to be able
* to create objects of the enum type (for use in SetConstant).
*/
/* FIXME: Does this mean enums can't have method overrides ? */
mono_class_setup_vtable_general (klass, NULL, 0, NULL);
}
mono_loader_unlock ();
}
static MonoMarshalSpec*
mono_marshal_spec_from_builder (MonoImage *image, MonoAssembly *assembly,
MonoReflectionMarshal *minfo)
{
MonoMarshalSpec *res;
res = image_g_new0 (image, MonoMarshalSpec, 1);
res->native = minfo->type;
switch (minfo->type) {
case MONO_NATIVE_LPARRAY:
res->data.array_data.elem_type = minfo->eltype;
if (minfo->has_size) {
res->data.array_data.param_num = minfo->param_num;
res->data.array_data.num_elem = minfo->count;
res->data.array_data.elem_mult = minfo->param_num == -1 ? 0 : 1;
}
else {
res->data.array_data.param_num = -1;
res->data.array_data.num_elem = -1;
res->data.array_data.elem_mult = -1;
}
break;
case MONO_NATIVE_BYVALTSTR:
case MONO_NATIVE_BYVALARRAY:
res->data.array_data.num_elem = minfo->count;
break;
case MONO_NATIVE_CUSTOM:
if (minfo->marshaltyperef)
res->data.custom_data.custom_name =
type_get_fully_qualified_name (mono_reflection_type_get_handle ((MonoReflectionType*)minfo->marshaltyperef));
if (minfo->mcookie)
res->data.custom_data.cookie = mono_string_to_utf8 (minfo->mcookie);
break;
default:
break;
}
return res;
}
#endif /* !DISABLE_REFLECTION_EMIT */
MonoReflectionMarshal*
mono_reflection_marshal_from_marshal_spec (MonoDomain *domain, MonoClass *klass,
MonoMarshalSpec *spec)
{
static MonoClass *System_Reflection_Emit_UnmanagedMarshalClass;
MonoReflectionMarshal *minfo;
MonoType *mtype;
if (!System_Reflection_Emit_UnmanagedMarshalClass) {
System_Reflection_Emit_UnmanagedMarshalClass = mono_class_from_name (
mono_defaults.corlib, "System.Reflection.Emit", "UnmanagedMarshal");
g_assert (System_Reflection_Emit_UnmanagedMarshalClass);
}
minfo = (MonoReflectionMarshal*)mono_object_new (domain, System_Reflection_Emit_UnmanagedMarshalClass);
minfo->type = spec->native;
switch (minfo->type) {
case MONO_NATIVE_LPARRAY:
minfo->eltype = spec->data.array_data.elem_type;
minfo->count = spec->data.array_data.num_elem;
minfo->param_num = spec->data.array_data.param_num;
break;
case MONO_NATIVE_BYVALTSTR:
case MONO_NATIVE_BYVALARRAY:
minfo->count = spec->data.array_data.num_elem;
break;
case MONO_NATIVE_CUSTOM:
if (spec->data.custom_data.custom_name) {
mtype = mono_reflection_type_from_name (spec->data.custom_data.custom_name, klass->image);
if (mtype)
MONO_OBJECT_SETREF (minfo, marshaltyperef, mono_type_get_object (domain, mtype));
MONO_OBJECT_SETREF (minfo, marshaltype, mono_string_new (domain, spec->data.custom_data.custom_name));
}
if (spec->data.custom_data.cookie)
MONO_OBJECT_SETREF (minfo, mcookie, mono_string_new (domain, spec->data.custom_data.cookie));
break;
default:
break;
}
return minfo;
}
#ifndef DISABLE_REFLECTION_EMIT
static MonoMethod*
reflection_methodbuilder_to_mono_method (MonoClass *klass,
ReflectionMethodBuilder *rmb,
MonoMethodSignature *sig)
{
MonoError error;
MonoMethod *m;
MonoMethodWrapper *wrapperm;
MonoMarshalSpec **specs;
MonoReflectionMethodAux *method_aux;
MonoImage *image;
gboolean dynamic;
int i;
mono_error_init (&error);
/*
* Methods created using a MethodBuilder should have their memory allocated
* inside the image mempool, while dynamic methods should have their memory
* malloc'd.
*/
dynamic = rmb->refs != NULL;
image = dynamic ? NULL : klass->image;
if (!dynamic)
g_assert (!klass->generic_class);
mono_loader_lock ();
if ((rmb->attrs & METHOD_ATTRIBUTE_PINVOKE_IMPL) ||
(rmb->iattrs & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL))
m = (MonoMethod *)image_g_new0 (image, MonoMethodPInvoke, 1);
else
m = (MonoMethod *)image_g_new0 (image, MonoMethodWrapper, 1);
wrapperm = (MonoMethodWrapper*)m;
m->dynamic = dynamic;
m->slot = -1;
m->flags = rmb->attrs;
m->iflags = rmb->iattrs;
m->name = mono_string_to_utf8_image (image, rmb->name, &error);
g_assert (mono_error_ok (&error));
m->klass = klass;
m->signature = sig;
m->sre_method = TRUE;
m->skip_visibility = rmb->skip_visibility;
if (rmb->table_idx)
m->token = MONO_TOKEN_METHOD_DEF | (*rmb->table_idx);
if (m->iflags & METHOD_IMPL_ATTRIBUTE_INTERNAL_CALL) {
if (klass == mono_defaults.string_class && !strcmp (m->name, ".ctor"))
m->string_ctor = 1;
m->signature->pinvoke = 1;
} else if (m->flags & METHOD_ATTRIBUTE_PINVOKE_IMPL) {
m->signature->pinvoke = 1;
method_aux = image_g_new0 (image, MonoReflectionMethodAux, 1);
method_aux->dllentry = rmb->dllentry ? mono_string_to_utf8_image (image, rmb->dllentry, &error) : image_strdup (image, m->name);
g_assert (mono_error_ok (&error));
method_aux->dll = mono_string_to_utf8_image (image, rmb->dll, &error);
g_assert (mono_error_ok (&error));
((MonoMethodPInvoke*)m)->piflags = (rmb->native_cc << 8) | (rmb->charset ? (rmb->charset - 1) * 2 : 0) | rmb->extra_flags;
if (klass->image->dynamic)
g_hash_table_insert (((MonoDynamicImage*)klass->image)->method_aux_hash, m, method_aux);
mono_loader_unlock ();
return m;
} else if (!(m->flags & METHOD_ATTRIBUTE_ABSTRACT) &&
!(m->iflags & METHOD_IMPL_ATTRIBUTE_RUNTIME)) {
MonoMethodHeader *header;
guint32 code_size;
gint32 max_stack, i;
gint32 num_locals = 0;
gint32 num_clauses = 0;
guint8 *code;
if (rmb->ilgen) {
code = mono_array_addr (rmb->ilgen->code, guint8, 0);
code_size = rmb->ilgen->code_len;
max_stack = rmb->ilgen->max_stack;
num_locals = rmb->ilgen->locals ? mono_array_length (rmb->ilgen->locals) : 0;
if (rmb->ilgen->ex_handlers)
num_clauses = method_count_clauses (rmb->ilgen);
} else {
if (rmb->code) {
code = mono_array_addr (rmb->code, guint8, 0);
code_size = mono_array_length (rmb->code);
/* we probably need to run a verifier on the code... */
max_stack = 8;
}
else {
code = NULL;
code_size = 0;
max_stack = 8;
}
}
header = image_g_malloc0 (image, MONO_SIZEOF_METHOD_HEADER + num_locals * sizeof (MonoType*));
header->code_size = code_size;
header->code = image_g_malloc (image, code_size);
memcpy ((char*)header->code, code, code_size);
header->max_stack = max_stack;
header->init_locals = rmb->init_locals;
header->num_locals = num_locals;
for (i = 0; i < num_locals; ++i) {
MonoReflectionLocalBuilder *lb =
mono_array_get (rmb->ilgen->locals, MonoReflectionLocalBuilder*, i);
header->locals [i] = image_g_new0 (image, MonoType, 1);
memcpy (header->locals [i], mono_reflection_type_get_handle ((MonoReflectionType*)lb->type), MONO_SIZEOF_TYPE);
}
header->num_clauses = num_clauses;
if (num_clauses) {
header->clauses = method_encode_clauses (image, (MonoDynamicImage*)klass->image,
rmb->ilgen, num_clauses);
}
wrapperm->header = header;
}
if (rmb->generic_params) {
int count = mono_array_length (rmb->generic_params);
MonoGenericContainer *container = rmb->generic_container;
g_assert (container);
container->type_argc = count;
container->type_params = image_g_new0 (image, MonoGenericParamFull, count);
container->owner.method = m;
m->is_generic = TRUE;
mono_method_set_generic_container (m, container);
for (i = 0; i < count; i++) {
MonoReflectionGenericParam *gp =
mono_array_get (rmb->generic_params, MonoReflectionGenericParam*, i);
MonoGenericParamFull *param = (MonoGenericParamFull *) mono_reflection_type_get_handle ((MonoReflectionType*)gp)->data.generic_param;
container->type_params [i] = *param;
}
/*
* The method signature might have pointers to generic parameters that belong to other methods.
* This is a valid SRE case, but the resulting method signature must be encoded using the proper
* generic parameters.
*/
for (i = 0; i < m->signature->param_count; ++i) {
MonoType *t = m->signature->params [i];
if (t->type == MONO_TYPE_MVAR) {
MonoGenericParam *gparam = t->data.generic_param;
if (gparam->num < count) {
m->signature->params [i] = mono_metadata_type_dup (image, m->signature->params [i]);
m->signature->params [i]->data.generic_param = mono_generic_container_get_param (container, gparam->num);
}
}
}
if (klass->generic_container) {
container->parent = klass->generic_container;
container->context.class_inst = klass->generic_container->context.class_inst;
}
container->context.method_inst = mono_get_shared_generic_inst (container);
}
if (rmb->refs) {
MonoMethodWrapper *mw = (MonoMethodWrapper*)m;
int i;
void **data;
m->wrapper_type = MONO_WRAPPER_DYNAMIC_METHOD;
mw->method_data = data = image_g_new (image, gpointer, rmb->nrefs + 1);
data [0] = GUINT_TO_POINTER (rmb->nrefs);
for (i = 0; i < rmb->nrefs; ++i)
data [i + 1] = rmb->refs [i];
}
method_aux = NULL;
/* Parameter info */
if (rmb->pinfo) {
if (!method_aux)
method_aux = image_g_new0 (image, MonoReflectionMethodAux, 1);
method_aux->param_names = image_g_new0 (image, char *, mono_method_signature (m)->param_count + 1);
for (i = 0; i <= m->signature->param_count; ++i) {
MonoReflectionParamBuilder *pb;
if ((pb = mono_array_get (rmb->pinfo, MonoReflectionParamBuilder*, i))) {
if ((i > 0) && (pb->attrs)) {
/* Make a copy since it might point to a shared type structure */
m->signature->params [i - 1] = mono_metadata_type_dup (klass->image, m->signature->params [i - 1]);
m->signature->params [i - 1]->attrs = pb->attrs;
}
if (pb->attrs & PARAM_ATTRIBUTE_HAS_DEFAULT) {
MonoDynamicImage *assembly;
guint32 idx, def_type, len;
char *p;
const char *p2;
if (!method_aux->param_defaults) {
method_aux->param_defaults = image_g_new0 (image, guint8*, m->signature->param_count + 1);
method_aux->param_default_types = image_g_new0 (image, guint32, m->signature->param_count + 1);
}
assembly = (MonoDynamicImage*)klass->image;
idx = encode_constant (assembly, pb->def_value, &def_type);
/* Copy the data from the blob since it might get realloc-ed */
p = assembly->blob.data + idx;
len = mono_metadata_decode_blob_size (p, &p2);
len += p2 - p;
method_aux->param_defaults [i] = image_g_malloc (image, len);
method_aux->param_default_types [i] = def_type;
memcpy ((gpointer)method_aux->param_defaults [i], p, len);
}
if (pb->name) {
method_aux->param_names [i] = mono_string_to_utf8_image (image, pb->name, &error);
g_assert (mono_error_ok (&error));
}
if (pb->cattrs) {
if (!method_aux->param_cattr)
method_aux->param_cattr = image_g_new0 (image, MonoCustomAttrInfo*, m->signature->param_count + 1);
method_aux->param_cattr [i] = mono_custom_attrs_from_builders (image, klass->image, pb->cattrs);
}
}
}
}
/* Parameter marshalling */
specs = NULL;
if (rmb->pinfo)
for (i = 0; i < mono_array_length (rmb->pinfo); ++i) {
MonoReflectionParamBuilder *pb;
if ((pb = mono_array_get (rmb->pinfo, MonoReflectionParamBuilder*, i))) {
if (pb->marshal_info) {
if (specs == NULL)
specs = image_g_new0 (image, MonoMarshalSpec*, sig->param_count + 1);
specs [pb->position] =
mono_marshal_spec_from_builder (image, klass->image->assembly, pb->marshal_info);
}
}
}
if (specs != NULL) {
if (!method_aux)
method_aux = image_g_new0 (image, MonoReflectionMethodAux, 1);
method_aux->param_marshall = specs;
}
if (klass->image->dynamic && method_aux)
g_hash_table_insert (((MonoDynamicImage*)klass->image)->method_aux_hash, m, method_aux);
mono_loader_unlock ();
return m;
}
static MonoMethod*
ctorbuilder_to_mono_method (MonoClass *klass, MonoReflectionCtorBuilder* mb)
{
ReflectionMethodBuilder rmb;
MonoMethodSignature *sig;
mono_loader_lock ();
sig = ctor_builder_to_signature (klass->image, mb);
mono_loader_unlock ();
reflection_methodbuilder_from_ctor_builder (&rmb, mb);
mb->mhandle = reflection_methodbuilder_to_mono_method (klass, &rmb, sig);
mono_save_custom_attrs (klass->image, mb->mhandle, mb->cattrs);
/* If we are in a generic class, we might be called multiple times from inflate_method */
if (!((MonoDynamicImage*)(MonoDynamicImage*)klass->image)->save && !klass->generic_container) {
/* ilgen is no longer needed */
mb->ilgen = NULL;
}
return mb->mhandle;
}
static MonoMethod*
methodbuilder_to_mono_method (MonoClass *klass, MonoReflectionMethodBuilder* mb)
{
ReflectionMethodBuilder rmb;
MonoMethodSignature *sig;
mono_loader_lock ();
sig = method_builder_to_signature (klass->image, mb);
mono_loader_unlock ();
reflection_methodbuilder_from_method_builder (&rmb, mb);
mb->mhandle = reflection_methodbuilder_to_mono_method (klass, &rmb, sig);
mono_save_custom_attrs (klass->image, mb->mhandle, mb->cattrs);
/* If we are in a generic class, we might be called multiple times from inflate_method */
if (!((MonoDynamicImage*)(MonoDynamicImage*)klass->image)->save && !klass->generic_container) {
/* ilgen is no longer needed */
mb->ilgen = NULL;
}
return mb->mhandle;
}
static MonoClassField*
fieldbuilder_to_mono_class_field (MonoClass *klass, MonoReflectionFieldBuilder* fb)
{
MonoClassField *field;
MonoType *custom;
MonoError error;
field = g_new0 (MonoClassField, 1);
field->name = mono_string_to_utf8_image (klass->image, fb->name, &error);
g_assert (mono_error_ok (&error));
if (fb->attrs || fb->modreq || fb->modopt) {
field->type = mono_metadata_type_dup (NULL, mono_reflection_type_get_handle ((MonoReflectionType*)fb->type));
field->type->attrs = fb->attrs;
g_assert (klass->image->dynamic);
custom = add_custom_modifiers ((MonoDynamicImage*)klass->image, field->type, fb->modreq, fb->modopt);
g_free (field->type);
field->type = mono_metadata_type_dup (klass->image, custom);
g_free (custom);
} else {
field->type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type);
}
if (fb->offset != -1)
field->offset = fb->offset;
field->parent = klass;
mono_save_custom_attrs (klass->image, field, fb->cattrs);
// FIXME: Can't store fb->def_value/RVA, is it needed for field_on_insts ?
return field;
}
#endif
MonoType*
mono_reflection_bind_generic_parameters (MonoReflectionType *type, int type_argc, MonoType **types)
{
MonoClass *klass;
MonoReflectionTypeBuilder *tb = NULL;
gboolean is_dynamic = FALSE;
MonoDomain *domain;
MonoClass *geninst;
mono_loader_lock ();
domain = mono_object_domain (type);
if (is_sre_type_builder (mono_object_class (type))) {
tb = (MonoReflectionTypeBuilder *) type;
is_dynamic = TRUE;
} else if (is_sre_generic_instance (mono_object_class (type))) {
MonoReflectionGenericClass *rgi = (MonoReflectionGenericClass *) type;
MonoReflectionType *gtd = rgi->generic_type;
if (is_sre_type_builder (mono_object_class (gtd))) {
tb = (MonoReflectionTypeBuilder *)gtd;
is_dynamic = TRUE;
}
}
/* FIXME: fix the CreateGenericParameters protocol to avoid the two stage setup of TypeBuilders */
if (tb && tb->generic_container)
mono_reflection_create_generic_class (tb);
klass = mono_class_from_mono_type (mono_reflection_type_get_handle (type));
if (!klass->generic_container) {
mono_loader_unlock ();
return NULL;
}
if (klass->wastypebuilder) {
tb = (MonoReflectionTypeBuilder *) mono_class_get_ref_info (klass);
is_dynamic = TRUE;
}
mono_loader_unlock ();
geninst = mono_class_bind_generic_parameters (klass, type_argc, types, is_dynamic);
return &geninst->byval_arg;
}
MonoClass*
mono_class_bind_generic_parameters (MonoClass *klass, int type_argc, MonoType **types, gboolean is_dynamic)
{
MonoGenericClass *gclass;
MonoGenericInst *inst;
g_assert (klass->generic_container);
inst = mono_metadata_get_generic_inst (type_argc, types);
gclass = mono_metadata_lookup_generic_class (klass, inst, is_dynamic);
return mono_generic_class_get_class (gclass);
}
MonoReflectionMethod*
mono_reflection_bind_generic_method_parameters (MonoReflectionMethod *rmethod, MonoArray *types)
{
MonoClass *klass;
MonoMethod *method, *inflated;
MonoMethodInflated *imethod;
MonoGenericContext tmp_context;
MonoGenericInst *ginst;
MonoType **type_argv;
int count, i;
MONO_ARCH_SAVE_REGS;
/*FIXME but this no longer should happen*/
if (!strcmp (rmethod->object.vtable->klass->name, "MethodBuilder")) {
#ifndef DISABLE_REFLECTION_EMIT
MonoReflectionMethodBuilder *mb = NULL;
MonoReflectionTypeBuilder *tb;
MonoClass *klass;
mb = (MonoReflectionMethodBuilder *) rmethod;
tb = (MonoReflectionTypeBuilder *) mb->type;
klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb));
method = methodbuilder_to_mono_method (klass, mb);
#else
g_assert_not_reached ();
method = NULL;
#endif
} else {
method = rmethod->method;
}
klass = method->klass;
if (method->is_inflated)
method = ((MonoMethodInflated *) method)->declaring;
count = mono_method_signature (method)->generic_param_count;
if (count != mono_array_length (types))
return NULL;
type_argv = g_new0 (MonoType *, count);
for (i = 0; i < count; i++) {
MonoReflectionType *garg = mono_array_get (types, gpointer, i);
type_argv [i] = mono_reflection_type_get_handle (garg);
}
ginst = mono_metadata_get_generic_inst (count, type_argv);
g_free (type_argv);
tmp_context.class_inst = klass->generic_class ? klass->generic_class->context.class_inst : NULL;
tmp_context.method_inst = ginst;
inflated = mono_class_inflate_generic_method (method, &tmp_context);
imethod = (MonoMethodInflated *) inflated;
/*FIXME but I think this is no longer necessary*/
if (method->klass->image->dynamic) {
MonoDynamicImage *image = (MonoDynamicImage*)method->klass->image;
/*
* This table maps metadata structures representing inflated methods/fields
* to the reflection objects representing their generic definitions.
*/
mono_loader_lock ();
mono_g_hash_table_insert (image->generic_def_objects, imethod, rmethod);
mono_loader_unlock ();
}
if (!mono_verifier_is_method_valid_generic_instantiation (inflated))
mono_raise_exception (mono_get_exception_argument ("typeArguments", "Invalid generic arguments"));
return mono_method_get_object (mono_object_domain (rmethod), inflated, NULL);
}
#ifndef DISABLE_REFLECTION_EMIT
static MonoMethod *
inflate_mono_method (MonoClass *klass, MonoMethod *method, MonoObject *obj)
{
MonoMethodInflated *imethod;
MonoGenericContext *context;
int i;
/*
* With generic code sharing the klass might not be inflated.
* This can happen because classes inflated with their own
* type arguments are "normalized" to the uninflated class.
*/
if (!klass->generic_class)
return method;
context = mono_class_get_context (klass);
if (klass->method.count && klass->methods) {
/* Find the already created inflated method */
for (i = 0; i < klass->method.count; ++i) {
g_assert (klass->methods [i]->is_inflated);
if (((MonoMethodInflated*)klass->methods [i])->declaring == method)
break;
}
g_assert (i < klass->method.count);
imethod = (MonoMethodInflated*)klass->methods [i];
} else {
imethod = (MonoMethodInflated *) mono_class_inflate_generic_method_full (method, klass, context);
}
if (method->is_generic && method->klass->image->dynamic) {
MonoDynamicImage *image = (MonoDynamicImage*)method->klass->image;
mono_loader_lock ();
mono_g_hash_table_insert (image->generic_def_objects, imethod, obj);
mono_loader_unlock ();
}
return (MonoMethod *) imethod;
}
static MonoMethod *
inflate_method (MonoReflectionType *type, MonoObject *obj)
{
MonoMethod *method;
MonoClass *gklass;
MonoClass *type_class = mono_object_class (type);
if (is_sre_generic_instance (type_class)) {
MonoReflectionGenericClass *mgc = (MonoReflectionGenericClass*)type;
gklass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)mgc->generic_type));
} else if (is_sre_type_builder (type_class)) {
gklass = mono_class_from_mono_type (mono_reflection_type_get_handle (type));
} else if (type->type) {
gklass = mono_class_from_mono_type (type->type);
gklass = mono_class_get_generic_type_definition (gklass);
} else {
g_error ("Can't handle type %s", mono_type_get_full_name (mono_object_class (type)));
}
if (!strcmp (obj->vtable->klass->name, "MethodBuilder"))
if (((MonoReflectionMethodBuilder*)obj)->mhandle)
method = ((MonoReflectionMethodBuilder*)obj)->mhandle;
else
method = methodbuilder_to_mono_method (gklass, (MonoReflectionMethodBuilder *) obj);
else if (!strcmp (obj->vtable->klass->name, "ConstructorBuilder"))
method = ctorbuilder_to_mono_method (gklass, (MonoReflectionCtorBuilder *) obj);
else if (!strcmp (obj->vtable->klass->name, "MonoMethod") || !strcmp (obj->vtable->klass->name, "MonoCMethod"))
method = ((MonoReflectionMethod *) obj)->method;
else {
method = NULL; /* prevent compiler warning */
g_error ("can't handle type %s", obj->vtable->klass->name);
}
return inflate_mono_method (mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)type)), method, obj);
}
/*TODO avoid saving custom attrs for generic classes as it's enough to have them on the generic type definition.*/
void
mono_reflection_generic_class_initialize (MonoReflectionGenericClass *type, MonoArray *methods,
MonoArray *ctors, MonoArray *fields, MonoArray *properties,
MonoArray *events)
{
MonoGenericClass *gclass;
MonoDynamicGenericClass *dgclass;
MonoClass *klass, *gklass;
MonoType *gtype;
int i;
MONO_ARCH_SAVE_REGS;
gtype = mono_reflection_type_get_handle ((MonoReflectionType*)type);
klass = mono_class_from_mono_type (gtype);
g_assert (gtype->type == MONO_TYPE_GENERICINST);
gclass = gtype->data.generic_class;
if (!gclass->is_dynamic)
return;
dgclass = (MonoDynamicGenericClass *) gclass;
if (dgclass->initialized)
return;
gklass = gclass->container_class;
mono_class_init (gklass);
dgclass->count_methods = methods ? mono_array_length (methods) : 0;
dgclass->count_ctors = ctors ? mono_array_length (ctors) : 0;
dgclass->count_fields = fields ? mono_array_length (fields) : 0;
dgclass->methods = mono_image_set_new0 (gclass->owner, MonoMethod *, dgclass->count_methods);
dgclass->ctors = mono_image_set_new0 (gclass->owner, MonoMethod *, dgclass->count_ctors);
dgclass->fields = mono_image_set_new0 (gclass->owner, MonoClassField, dgclass->count_fields);
dgclass->field_objects = mono_image_set_new0 (gclass->owner, MonoObject*, dgclass->count_fields);
dgclass->field_generic_types = mono_image_set_new0 (gclass->owner, MonoType*, dgclass->count_fields);
for (i = 0; i < dgclass->count_methods; i++) {
MonoObject *obj = mono_array_get (methods, gpointer, i);
dgclass->methods [i] = inflate_method ((MonoReflectionType*)type, obj);
}
for (i = 0; i < dgclass->count_ctors; i++) {
MonoObject *obj = mono_array_get (ctors, gpointer, i);
dgclass->ctors [i] = inflate_method ((MonoReflectionType*)type, obj);
}
for (i = 0; i < dgclass->count_fields; i++) {
MonoObject *obj = mono_array_get (fields, gpointer, i);
MonoClassField *field, *inflated_field = NULL;
if (!strcmp (obj->vtable->klass->name, "FieldBuilder"))
inflated_field = field = fieldbuilder_to_mono_class_field (klass, (MonoReflectionFieldBuilder *) obj);
else if (!strcmp (obj->vtable->klass->name, "MonoField"))
field = ((MonoReflectionField *) obj)->field;
else {
field = NULL; /* prevent compiler warning */
g_assert_not_reached ();
}
dgclass->fields [i] = *field;
dgclass->fields [i].parent = klass;
dgclass->fields [i].type = mono_class_inflate_generic_type (
field->type, mono_generic_class_get_context ((MonoGenericClass *) dgclass));
dgclass->field_generic_types [i] = field->type;
MONO_GC_REGISTER_ROOT_IF_MOVING (dgclass->field_objects [i]);
dgclass->field_objects [i] = obj;
if (inflated_field) {
g_free (inflated_field);
} else {
dgclass->fields [i].name = mono_image_set_strdup (gclass->owner, dgclass->fields [i].name);
}
}
dgclass->initialized = TRUE;
}
void
mono_reflection_free_dynamic_generic_class (MonoGenericClass *gclass)
{
MonoDynamicGenericClass *dgclass;
int i;
g_assert (gclass->is_dynamic);
dgclass = (MonoDynamicGenericClass *)gclass;
for (i = 0; i < dgclass->count_fields; ++i) {
MonoClassField *field = dgclass->fields + i;
mono_metadata_free_type (field->type);
MONO_GC_UNREGISTER_ROOT_IF_MOVING (dgclass->field_objects [i]);
}
}
static void
fix_partial_generic_class (MonoClass *klass)
{
MonoClass *gklass = klass->generic_class->container_class;
MonoDynamicGenericClass *dgclass;
int i;
if (klass->wastypebuilder)
return;
dgclass = (MonoDynamicGenericClass *) klass->generic_class;
if (klass->parent != gklass->parent) {
MonoError error;
MonoType *parent_type = mono_class_inflate_generic_type_checked (&gklass->parent->byval_arg, &klass->generic_class->context, &error);
if (mono_error_ok (&error)) {
MonoClass *parent = mono_class_from_mono_type (parent_type);
mono_metadata_free_type (parent_type);
if (parent != klass->parent) {
/*fool mono_class_setup_parent*/
klass->supertypes = NULL;
mono_class_setup_parent (klass, parent);
}
} else {
mono_class_set_failure (klass, MONO_EXCEPTION_TYPE_LOAD, NULL);
mono_error_cleanup (&error);
if (gklass->wastypebuilder)
klass->wastypebuilder = TRUE;
return;
}
}
if (!dgclass->initialized)
return;
if (klass->method.count != gklass->method.count) {
klass->method.count = gklass->method.count;
klass->methods = mono_image_alloc (klass->image, sizeof (MonoMethod*) * (klass->method.count + 1));
for (i = 0; i < klass->method.count; i++) {
klass->methods [i] = mono_class_inflate_generic_method_full (
gklass->methods [i], klass, mono_class_get_context (klass));
}
}
if (klass->interface_count && klass->interface_count != gklass->interface_count) {
klass->interface_count = gklass->interface_count;
klass->interfaces = mono_image_alloc (klass->image, sizeof (MonoClass*) * gklass->interface_count);
klass->interfaces_packed = NULL; /*make setup_interface_offsets happy*/
for (i = 0; i < gklass->interface_count; ++i) {
MonoType *iface_type = mono_class_inflate_generic_type (&gklass->interfaces [i]->byval_arg, mono_class_get_context (klass));
klass->interfaces [i] = mono_class_from_mono_type (iface_type);
mono_metadata_free_type (iface_type);
ensure_runtime_vtable (klass->interfaces [i]);
}
klass->interfaces_inited = 1;
}
if (klass->field.count != gklass->field.count) {
klass->field.count = gklass->field.count;
klass->fields = image_g_new0 (klass->image, MonoClassField, klass->field.count);
for (i = 0; i < klass->field.count; i++) {
klass->fields [i] = gklass->fields [i];
klass->fields [i].parent = klass;
klass->fields [i].type = mono_class_inflate_generic_type (gklass->fields [i].type, mono_class_get_context (klass));
}
}
/*We can only finish with this klass once it's parent has as well*/
if (gklass->wastypebuilder)
klass->wastypebuilder = TRUE;
return;
}
static void
ensure_generic_class_runtime_vtable (MonoClass *klass)
{
MonoClass *gklass = klass->generic_class->container_class;
ensure_runtime_vtable (gklass);
fix_partial_generic_class (klass);
}
static void
ensure_runtime_vtable (MonoClass *klass)
{
MonoReflectionTypeBuilder *tb = mono_class_get_ref_info (klass);
int i, num, j;
if (!klass->image->dynamic || (!tb && !klass->generic_class) || klass->wastypebuilder)
return;
if (klass->parent)
ensure_runtime_vtable (klass->parent);
if (tb) {
num = tb->ctors? mono_array_length (tb->ctors): 0;
num += tb->num_methods;
klass->method.count = num;
klass->methods = mono_image_alloc (klass->image, sizeof (MonoMethod*) * num);
num = tb->ctors? mono_array_length (tb->ctors): 0;
for (i = 0; i < num; ++i)
klass->methods [i] = ctorbuilder_to_mono_method (klass, mono_array_get (tb->ctors, MonoReflectionCtorBuilder*, i));
num = tb->num_methods;
j = i;
for (i = 0; i < num; ++i)
klass->methods [j++] = methodbuilder_to_mono_method (klass, mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i));
if (tb->interfaces) {
klass->interface_count = mono_array_length (tb->interfaces);
klass->interfaces = mono_image_alloc (klass->image, sizeof (MonoClass*) * klass->interface_count);
for (i = 0; i < klass->interface_count; ++i) {
MonoType *iface = mono_type_array_get_and_resolve (tb->interfaces, i);
klass->interfaces [i] = mono_class_from_mono_type (iface);
ensure_runtime_vtable (klass->interfaces [i]);
}
klass->interfaces_inited = 1;
}
} else if (klass->generic_class){
ensure_generic_class_runtime_vtable (klass);
}
if (klass->flags & TYPE_ATTRIBUTE_INTERFACE) {
int slot_num = 0;
for (i = 0; i < klass->method.count; ++i) {
MonoMethod *im = klass->methods [i];
if (!(im->flags & METHOD_ATTRIBUTE_STATIC))
im->slot = slot_num++;
}
klass->interfaces_packed = NULL; /*make setup_interface_offsets happy*/
mono_class_setup_interface_offsets (klass);
mono_class_setup_interface_id (klass);
}
/*
* The generic vtable is needed even if image->run is not set since some
* runtime code like ves_icall_Type_GetMethodsByName depends on
* method->slot being defined.
*/
/*
* tb->methods could not be freed since it is used for determining
* overrides during dynamic vtable construction.
*/
}
static MonoMethod*
mono_reflection_method_get_handle (MonoObject *method)
{
MonoClass *class = mono_object_class (method);
if (is_sr_mono_method (class) || is_sr_mono_generic_method (class)) {
MonoReflectionMethod *sr_method = (MonoReflectionMethod*)method;
return sr_method->method;
}
if (is_sre_method_builder (class)) {
MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder*)method;
return mb->mhandle;
}
if (is_sre_method_on_tb_inst (class)) {
MonoReflectionMethodOnTypeBuilderInst *m = (MonoReflectionMethodOnTypeBuilderInst*)method;
MonoMethod *result;
/*FIXME move this to a proper method and unify with resolve_object*/
if (m->method_args) {
result = mono_reflection_method_on_tb_inst_get_handle (m);
} else {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)m->inst);
MonoClass *inflated_klass = mono_class_from_mono_type (type);
MonoMethod *mono_method;
if (is_sre_method_builder (mono_object_class (m->mb)))
mono_method = ((MonoReflectionMethodBuilder *)m->mb)->mhandle;
else if (is_sr_mono_method (mono_object_class (m->mb)))
mono_method = ((MonoReflectionMethod *)m->mb)->method;
else
g_error ("resolve_object:: can't handle a MTBI with base_method of type %s", mono_type_get_full_name (mono_object_class (m->mb)));
result = inflate_mono_method (inflated_klass, mono_method, (MonoObject*)m->mb);
}
return result;
}
g_error ("Can't handle methods of type %s:%s", class->name_space, class->name);
return NULL;
}
void
mono_reflection_get_dynamic_overrides (MonoClass *klass, MonoMethod ***overrides, int *num_overrides)
{
MonoReflectionTypeBuilder *tb;
int i, onum;
*overrides = NULL;
*num_overrides = 0;
g_assert (klass->image->dynamic);
if (!mono_class_get_ref_info (klass))
return;
g_assert (strcmp (((MonoObject*)mono_class_get_ref_info (klass))->vtable->klass->name, "TypeBuilder") == 0);
tb = (MonoReflectionTypeBuilder*)mono_class_get_ref_info (klass);
onum = 0;
if (tb->methods) {
for (i = 0; i < tb->num_methods; ++i) {
MonoReflectionMethodBuilder *mb =
mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i);
if (mb->override_method)
onum ++;
}
}
if (onum) {
*overrides = g_new0 (MonoMethod*, onum * 2);
onum = 0;
for (i = 0; i < tb->num_methods; ++i) {
MonoReflectionMethodBuilder *mb =
mono_array_get (tb->methods, MonoReflectionMethodBuilder*, i);
if (mb->override_method) {
(*overrides) [onum * 2] = mono_reflection_method_get_handle ((MonoObject *)mb->override_method);
(*overrides) [onum * 2 + 1] = mb->mhandle;
g_assert (mb->mhandle);
onum ++;
}
}
}
*num_overrides = onum;
}
static void
typebuilder_setup_fields (MonoClass *klass, MonoError *error)
{
MonoReflectionTypeBuilder *tb = mono_class_get_ref_info (klass);
MonoReflectionFieldBuilder *fb;
MonoClassField *field;
MonoImage *image = klass->image;
const char *p, *p2;
int i;
guint32 len, idx, real_size = 0;
klass->field.count = tb->num_fields;
klass->field.first = 0;
mono_error_init (error);
if (tb->class_size) {
g_assert ((tb->packing_size & 0xfffffff0) == 0);
klass->packing_size = tb->packing_size;
real_size = klass->instance_size + tb->class_size;
}
if (!klass->field.count) {
klass->instance_size = MAX (klass->instance_size, real_size);
return;
}
klass->fields = image_g_new0 (image, MonoClassField, klass->field.count);
mono_class_alloc_ext (klass);
klass->ext->field_def_values = image_g_new0 (image, MonoFieldDefaultValue, klass->field.count);
/*
This is, guess what, a hack.
The issue is that the runtime doesn't know how to setup the fields of a typebuider and crash.
On the static path no field class is resolved, only types are built. This is the right thing to do
but we suck.
Setting size_inited is harmless because we're doing the same job as mono_class_setup_fields anyway.
*/
klass->size_inited = 1;
for (i = 0; i < klass->field.count; ++i) {
fb = mono_array_get (tb->fields, gpointer, i);
field = &klass->fields [i];
field->name = mono_string_to_utf8_image (image, fb->name, error);
if (!mono_error_ok (error))
return;
if (fb->attrs) {
field->type = mono_metadata_type_dup (klass->image, mono_reflection_type_get_handle ((MonoReflectionType*)fb->type));
field->type->attrs = fb->attrs;
} else {
field->type = mono_reflection_type_get_handle ((MonoReflectionType*)fb->type);
}
if ((fb->attrs & FIELD_ATTRIBUTE_HAS_FIELD_RVA) && fb->rva_data)
klass->ext->field_def_values [i].data = mono_array_addr (fb->rva_data, char, 0);
if (fb->offset != -1)
field->offset = fb->offset;
field->parent = klass;
fb->handle = field;
mono_save_custom_attrs (klass->image, field, fb->cattrs);
if (klass->enumtype && !(field->type->attrs & FIELD_ATTRIBUTE_STATIC)) {
klass->cast_class = klass->element_class = mono_class_from_mono_type (field->type);
}
if (fb->def_value) {
MonoDynamicImage *assembly = (MonoDynamicImage*)klass->image;
field->type->attrs |= FIELD_ATTRIBUTE_HAS_DEFAULT;
idx = encode_constant (assembly, fb->def_value, &klass->ext->field_def_values [i].def_type);
/* Copy the data from the blob since it might get realloc-ed */
p = assembly->blob.data + idx;
len = mono_metadata_decode_blob_size (p, &p2);
len += p2 - p;
klass->ext->field_def_values [i].data = mono_image_alloc (image, len);
memcpy ((gpointer)klass->ext->field_def_values [i].data, p, len);
}
}
klass->instance_size = MAX (klass->instance_size, real_size);
mono_class_layout_fields (klass);
}
static void
typebuilder_setup_properties (MonoClass *klass, MonoError *error)
{
MonoReflectionTypeBuilder *tb = mono_class_get_ref_info (klass);
MonoReflectionPropertyBuilder *pb;
MonoImage *image = klass->image;
MonoProperty *properties;
int i;
mono_error_init (error);
if (!klass->ext)
klass->ext = image_g_new0 (image, MonoClassExt, 1);
klass->ext->property.count = tb->properties ? mono_array_length (tb->properties) : 0;
klass->ext->property.first = 0;
properties = image_g_new0 (image, MonoProperty, klass->ext->property.count);
klass->ext->properties = properties;
for (i = 0; i < klass->ext->property.count; ++i) {
pb = mono_array_get (tb->properties, MonoReflectionPropertyBuilder*, i);
properties [i].parent = klass;
properties [i].attrs = pb->attrs;
properties [i].name = mono_string_to_utf8_image (image, pb->name, error);
if (!mono_error_ok (error))
return;
if (pb->get_method)
properties [i].get = pb->get_method->mhandle;
if (pb->set_method)
properties [i].set = pb->set_method->mhandle;
mono_save_custom_attrs (klass->image, &properties [i], pb->cattrs);
if (pb->def_value) {
guint32 len, idx;
const char *p, *p2;
MonoDynamicImage *assembly = (MonoDynamicImage*)klass->image;
if (!klass->ext->prop_def_values)
klass->ext->prop_def_values = image_g_new0 (image, MonoFieldDefaultValue, klass->ext->property.count);
properties [i].attrs |= PROPERTY_ATTRIBUTE_HAS_DEFAULT;
idx = encode_constant (assembly, pb->def_value, &klass->ext->prop_def_values [i].def_type);
/* Copy the data from the blob since it might get realloc-ed */
p = assembly->blob.data + idx;
len = mono_metadata_decode_blob_size (p, &p2);
len += p2 - p;
klass->ext->prop_def_values [i].data = mono_image_alloc (image, len);
memcpy ((gpointer)klass->ext->prop_def_values [i].data, p, len);
}
}
}
MonoReflectionEvent *
mono_reflection_event_builder_get_event_info (MonoReflectionTypeBuilder *tb, MonoReflectionEventBuilder *eb)
{
MonoEvent *event = g_new0 (MonoEvent, 1);
MonoClass *klass;
klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb));
event->parent = klass;
event->attrs = eb->attrs;
event->name = mono_string_to_utf8 (eb->name);
if (eb->add_method)
event->add = eb->add_method->mhandle;
if (eb->remove_method)
event->remove = eb->remove_method->mhandle;
if (eb->raise_method)
event->raise = eb->raise_method->mhandle;
#ifndef MONO_SMALL_CONFIG
if (eb->other_methods) {
int j;
event->other = g_new0 (MonoMethod*, mono_array_length (eb->other_methods) + 1);
for (j = 0; j < mono_array_length (eb->other_methods); ++j) {
MonoReflectionMethodBuilder *mb =
mono_array_get (eb->other_methods,
MonoReflectionMethodBuilder*, j);
event->other [j] = mb->mhandle;
}
}
#endif
return mono_event_get_object (mono_object_domain (tb), klass, event);
}
static void
typebuilder_setup_events (MonoClass *klass, MonoError *error)
{
MonoReflectionTypeBuilder *tb = mono_class_get_ref_info (klass);
MonoReflectionEventBuilder *eb;
MonoImage *image = klass->image;
MonoEvent *events;
int i;
mono_error_init (error);
if (!klass->ext)
klass->ext = image_g_new0 (image, MonoClassExt, 1);
klass->ext->event.count = tb->events ? mono_array_length (tb->events) : 0;
klass->ext->event.first = 0;
events = image_g_new0 (image, MonoEvent, klass->ext->event.count);
klass->ext->events = events;
for (i = 0; i < klass->ext->event.count; ++i) {
eb = mono_array_get (tb->events, MonoReflectionEventBuilder*, i);
events [i].parent = klass;
events [i].attrs = eb->attrs;
events [i].name = mono_string_to_utf8_image (image, eb->name, error);
if (!mono_error_ok (error))
return;
if (eb->add_method)
events [i].add = eb->add_method->mhandle;
if (eb->remove_method)
events [i].remove = eb->remove_method->mhandle;
if (eb->raise_method)
events [i].raise = eb->raise_method->mhandle;
#ifndef MONO_SMALL_CONFIG
if (eb->other_methods) {
int j;
events [i].other = image_g_new0 (image, MonoMethod*, mono_array_length (eb->other_methods) + 1);
for (j = 0; j < mono_array_length (eb->other_methods); ++j) {
MonoReflectionMethodBuilder *mb =
mono_array_get (eb->other_methods,
MonoReflectionMethodBuilder*, j);
events [i].other [j] = mb->mhandle;
}
}
#endif
mono_save_custom_attrs (klass->image, &events [i], eb->cattrs);
}
}
static gboolean
remove_instantiations_of_and_ensure_contents (gpointer key,
gpointer value,
gpointer user_data)
{
MonoType *type = (MonoType*)key;
MonoClass *klass = (MonoClass*)user_data;
if ((type->type == MONO_TYPE_GENERICINST) && (type->data.generic_class->container_class == klass)) {
fix_partial_generic_class (mono_class_from_mono_type (type)); //Ensure it's safe to use it.
return TRUE;
} else
return FALSE;
}
static void
check_array_for_usertypes (MonoArray *arr)
{
int i;
if (!arr)
return;
for (i = 0; i < mono_array_length (arr); ++i)
RESOLVE_ARRAY_TYPE_ELEMENT (arr, i);
}
MonoReflectionType*
mono_reflection_create_runtime_class (MonoReflectionTypeBuilder *tb)
{
MonoError error;
MonoClass *klass;
MonoDomain* domain;
MonoReflectionType* res;
int i, j;
MONO_ARCH_SAVE_REGS;
domain = mono_object_domain (tb);
klass = mono_class_from_mono_type (tb->type.type);
/*
* Check for user defined Type subclasses.
*/
RESOLVE_TYPE (tb->parent);
check_array_for_usertypes (tb->interfaces);
if (tb->fields) {
for (i = 0; i < mono_array_length (tb->fields); ++i) {
MonoReflectionFieldBuilder *fb = mono_array_get (tb->fields, gpointer, i);
if (fb) {
RESOLVE_TYPE (fb->type);
check_array_for_usertypes (fb->modreq);
check_array_for_usertypes (fb->modopt);
if (fb->marshal_info && fb->marshal_info->marshaltyperef)
RESOLVE_TYPE (fb->marshal_info->marshaltyperef);
}
}
}
if (tb->methods) {
for (i = 0; i < mono_array_length (tb->methods); ++i) {
MonoReflectionMethodBuilder *mb = mono_array_get (tb->methods, gpointer, i);
if (mb) {
RESOLVE_TYPE (mb->rtype);
check_array_for_usertypes (mb->return_modreq);
check_array_for_usertypes (mb->return_modopt);
check_array_for_usertypes (mb->parameters);
if (mb->param_modreq)
for (j = 0; j < mono_array_length (mb->param_modreq); ++j)
check_array_for_usertypes (mono_array_get (mb->param_modreq, MonoArray*, j));
if (mb->param_modopt)
for (j = 0; j < mono_array_length (mb->param_modopt); ++j)
check_array_for_usertypes (mono_array_get (mb->param_modopt, MonoArray*, j));
}
}
}
if (tb->ctors) {
for (i = 0; i < mono_array_length (tb->ctors); ++i) {
MonoReflectionCtorBuilder *mb = mono_array_get (tb->ctors, gpointer, i);
if (mb) {
check_array_for_usertypes (mb->parameters);
if (mb->param_modreq)
for (j = 0; j < mono_array_length (mb->param_modreq); ++j)
check_array_for_usertypes (mono_array_get (mb->param_modreq, MonoArray*, j));
if (mb->param_modopt)
for (j = 0; j < mono_array_length (mb->param_modopt); ++j)
check_array_for_usertypes (mono_array_get (mb->param_modopt, MonoArray*, j));
}
}
}
mono_save_custom_attrs (klass->image, klass, tb->cattrs);
/*
* we need to lock the domain because the lock will be taken inside
* So, we need to keep the locking order correct.
*/
mono_loader_lock ();
mono_domain_lock (domain);
if (klass->wastypebuilder) {
mono_domain_unlock (domain);
mono_loader_unlock ();
return mono_type_get_object (mono_object_domain (tb), &klass->byval_arg);
}
/*
* Fields to set in klass:
* the various flags: delegate/unicode/contextbound etc.
*/
klass->flags = tb->attrs;
klass->has_cctor = 1;
klass->has_finalize = 1;
klass->has_finalize_inited = 1;
/* fool mono_class_setup_parent */
klass->supertypes = NULL;
mono_class_setup_parent (klass, klass->parent);
mono_class_setup_mono_type (klass);
#if 0
if (!((MonoDynamicImage*)klass->image)->run) {
if (klass->generic_container) {
/* FIXME: The code below can't handle generic classes */
klass->wastypebuilder = TRUE;
mono_loader_unlock ();
mono_domain_unlock (domain);
return mono_type_get_object (mono_object_domain (tb), &klass->byval_arg);
}
}
#endif
/* enums are done right away */
if (!klass->enumtype)
ensure_runtime_vtable (klass);
if (tb->subtypes) {
for (i = 0; i < mono_array_length (tb->subtypes); ++i) {
MonoReflectionTypeBuilder *subtb = mono_array_get (tb->subtypes, MonoReflectionTypeBuilder*, i);
mono_class_alloc_ext (klass);
klass->ext->nested_classes = g_list_prepend_image (klass->image, klass->ext->nested_classes, mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)subtb)));
}
}
klass->nested_classes_inited = TRUE;
/* fields and object layout */
if (klass->parent) {
if (!klass->parent->size_inited)
mono_class_init (klass->parent);
klass->instance_size = klass->parent->instance_size;
klass->sizes.class_size = 0;
klass->min_align = klass->parent->min_align;
/* if the type has no fields we won't call the field_setup
* routine which sets up klass->has_references.
*/
klass->has_references |= klass->parent->has_references;
} else {
klass->instance_size = sizeof (MonoObject);
klass->min_align = 1;
}
/* FIXME: handle packing_size and instance_size */
typebuilder_setup_fields (klass, &error);
if (!mono_error_ok (&error))
goto failure;
typebuilder_setup_properties (klass, &error);
if (!mono_error_ok (&error))
goto failure;
typebuilder_setup_events (klass, &error);
if (!mono_error_ok (&error))
goto failure;
klass->wastypebuilder = TRUE;
/*
* If we are a generic TypeBuilder, there might be instantiations in the type cache
* which have type System.Reflection.MonoGenericClass, but after the type is created,
* we want to return normal System.MonoType objects, so clear these out from the cache.
*
* Together with this we must ensure the contents of all instances to match the created type.
*/
if (domain->type_hash && klass->generic_container)
mono_g_hash_table_foreach_remove (domain->type_hash, remove_instantiations_of_and_ensure_contents, klass);
mono_domain_unlock (domain);
mono_loader_unlock ();
if (klass->enumtype && !mono_class_is_valid_enum (klass)) {
mono_class_set_failure (klass, MONO_EXCEPTION_TYPE_LOAD, NULL);
mono_raise_exception (mono_get_exception_type_load (tb->name, NULL));
}
res = mono_type_get_object (mono_object_domain (tb), &klass->byval_arg);
g_assert (res != (MonoReflectionType*)tb);
return res;
failure:
mono_class_set_failure (klass, MONO_EXCEPTION_TYPE_LOAD, NULL);
klass->wastypebuilder = TRUE;
mono_domain_unlock (domain);
mono_loader_unlock ();
mono_error_raise_exception (&error);
return NULL;
}
void
mono_reflection_initialize_generic_parameter (MonoReflectionGenericParam *gparam)
{
MonoGenericParamFull *param;
MonoImage *image;
MonoClass *pklass;
MonoError error;
MONO_ARCH_SAVE_REGS;
image = &gparam->tbuilder->module->dynamic_image->image;
param = mono_image_new0 (image, MonoGenericParamFull, 1);
param->info.name = mono_string_to_utf8_image (image, gparam->name, &error);
g_assert (mono_error_ok (&error));
param->param.num = gparam->index;
if (gparam->mbuilder) {
if (!gparam->mbuilder->generic_container) {
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder *)gparam->mbuilder->type;
MonoClass *klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)tb));
gparam->mbuilder->generic_container = mono_image_alloc0 (klass->image, sizeof (MonoGenericContainer));
gparam->mbuilder->generic_container->is_method = TRUE;
/*
* Cannot set owner.method, since the MonoMethod is not created yet.
* Set the image field instead, so type_in_image () works.
*/
gparam->mbuilder->generic_container->image = klass->image;
}
param->param.owner = gparam->mbuilder->generic_container;
} else if (gparam->tbuilder) {
if (!gparam->tbuilder->generic_container) {
MonoClass *klass = mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)gparam->tbuilder));
gparam->tbuilder->generic_container = mono_image_alloc0 (klass->image, sizeof (MonoGenericContainer));
gparam->tbuilder->generic_container->owner.klass = klass;
}
param->param.owner = gparam->tbuilder->generic_container;
}
pklass = mono_class_from_generic_parameter ((MonoGenericParam *) param, image, gparam->mbuilder != NULL);
gparam->type.type = &pklass->byval_arg;
mono_class_set_ref_info (pklass, gparam);
mono_image_lock (image);
image->reflection_info_unregister_classes = g_slist_prepend (image->reflection_info_unregister_classes, pklass);
mono_image_unlock (image);
}
MonoArray *
mono_reflection_sighelper_get_signature_local (MonoReflectionSigHelper *sig)
{
MonoReflectionModuleBuilder *module = sig->module;
MonoDynamicImage *assembly = module != NULL ? module->dynamic_image : NULL;
guint32 na = sig->arguments ? mono_array_length (sig->arguments) : 0;
guint32 buflen, i;
MonoArray *result;
SigBuffer buf;
check_array_for_usertypes (sig->arguments);
sigbuffer_init (&buf, 32);
sigbuffer_add_value (&buf, 0x07);
sigbuffer_add_value (&buf, na);
if (assembly != NULL){
for (i = 0; i < na; ++i) {
MonoReflectionType *type = mono_array_get (sig->arguments, MonoReflectionType*, i);
encode_reflection_type (assembly, type, &buf);
}
}
buflen = buf.p - buf.buf;
result = mono_array_new (mono_domain_get (), mono_defaults.byte_class, buflen);
memcpy (mono_array_addr (result, char, 0), buf.buf, buflen);
sigbuffer_free (&buf);
return result;
}
MonoArray *
mono_reflection_sighelper_get_signature_field (MonoReflectionSigHelper *sig)
{
MonoDynamicImage *assembly = sig->module->dynamic_image;
guint32 na = sig->arguments ? mono_array_length (sig->arguments) : 0;
guint32 buflen, i;
MonoArray *result;
SigBuffer buf;
check_array_for_usertypes (sig->arguments);
sigbuffer_init (&buf, 32);
sigbuffer_add_value (&buf, 0x06);
for (i = 0; i < na; ++i) {
MonoReflectionType *type = mono_array_get (sig->arguments, MonoReflectionType*, i);
encode_reflection_type (assembly, type, &buf);
}
buflen = buf.p - buf.buf;
result = mono_array_new (mono_domain_get (), mono_defaults.byte_class, buflen);
memcpy (mono_array_addr (result, char, 0), buf.buf, buflen);
sigbuffer_free (&buf);
return result;
}
typedef struct {
MonoMethod *handle;
MonoDomain *domain;
} DynamicMethodReleaseData;
static MonoReferenceQueue *dynamic_method_queue;
void
mono_reflection_shutdown (void)
{
MonoReferenceQueue *queue;
mono_loader_lock ();
queue = dynamic_method_queue;
dynamic_method_queue = NULL;
if (queue)
mono_gc_reference_queue_free (queue);
mono_loader_unlock ();
}
static void
free_dynamic_method (void *dynamic_method)
{
DynamicMethodReleaseData *data = dynamic_method;
mono_runtime_free_method (data->domain, data->handle);
g_free (data);
}
void
mono_reflection_create_dynamic_method (MonoReflectionDynamicMethod *mb)
{
MonoReferenceQueue *queue;
MonoMethod *handle;
DynamicMethodReleaseData *release_data;
ReflectionMethodBuilder rmb;
MonoMethodSignature *sig;
MonoClass *klass;
GSList *l;
int i;
if (mono_runtime_is_shutting_down ())
mono_raise_exception (mono_get_exception_invalid_operation (""));
if (!(queue = dynamic_method_queue)) {
mono_loader_lock ();
if (!(queue = dynamic_method_queue))
queue = dynamic_method_queue = mono_gc_reference_queue_new (free_dynamic_method);
mono_loader_unlock ();
}
sig = dynamic_method_to_signature (mb);
reflection_methodbuilder_from_dynamic_method (&rmb, mb);
/*
* Resolve references.
*/
/*
* Every second entry in the refs array is reserved for storing handle_class,
* which is needed by the ldtoken implementation in the JIT.
*/
rmb.nrefs = mb->nrefs;
rmb.refs = g_new0 (gpointer, mb->nrefs + 1);
for (i = 0; i < mb->nrefs; i += 2) {
MonoClass *handle_class;
gpointer ref;
MonoObject *obj = mono_array_get (mb->refs, MonoObject*, i);
if (strcmp (obj->vtable->klass->name, "DynamicMethod") == 0) {
MonoReflectionDynamicMethod *method = (MonoReflectionDynamicMethod*)obj;
/*
* The referenced DynamicMethod should already be created by the managed
* code, except in the case of circular references. In that case, we store
* method in the refs array, and fix it up later when the referenced
* DynamicMethod is created.
*/
if (method->mhandle) {
ref = method->mhandle;
} else {
/* FIXME: GC object stored in unmanaged memory */
ref = method;
/* FIXME: GC object stored in unmanaged memory */
method->referenced_by = g_slist_append (method->referenced_by, mb);
}
handle_class = mono_defaults.methodhandle_class;
} else {
MonoException *ex = NULL;
ref = resolve_object (mb->module->image, obj, &handle_class, NULL);
if (!ref)
ex = mono_get_exception_type_load (NULL, NULL);
else if (mono_security_get_mode () == MONO_SECURITY_MODE_CORE_CLR)
ex = mono_security_core_clr_ensure_dynamic_method_resolved_object (ref, handle_class);
if (ex) {
g_free (rmb.refs);
mono_raise_exception (ex);
return;
}
}
rmb.refs [i] = ref; /* FIXME: GC object stored in unmanaged memory (change also resolve_object() signature) */
rmb.refs [i + 1] = handle_class;
}
klass = mb->owner ? mono_class_from_mono_type (mono_reflection_type_get_handle ((MonoReflectionType*)mb->owner)) : mono_defaults.object_class;
mb->mhandle = handle = reflection_methodbuilder_to_mono_method (klass, &rmb, sig);
release_data = g_new (DynamicMethodReleaseData, 1);
release_data->handle = handle;
release_data->domain = mono_object_get_domain ((MonoObject*)mb);
if (!mono_gc_reference_queue_add (queue, (MonoObject*)mb, release_data))
g_free (release_data);
/* Fix up refs entries pointing at us */
for (l = mb->referenced_by; l; l = l->next) {
MonoReflectionDynamicMethod *method = (MonoReflectionDynamicMethod*)l->data;
MonoMethodWrapper *wrapper = (MonoMethodWrapper*)method->mhandle;
gpointer *data;
g_assert (method->mhandle);
data = (gpointer*)wrapper->method_data;
for (i = 0; i < GPOINTER_TO_UINT (data [0]); i += 2) {
if ((data [i + 1] == mb) && (data [i + 1 + 1] == mono_defaults.methodhandle_class))
data [i + 1] = mb->mhandle;
}
}
g_slist_free (mb->referenced_by);
g_free (rmb.refs);
/* ilgen is no longer needed */
mb->ilgen = NULL;
}
#endif /* DISABLE_REFLECTION_EMIT */
/**
*
* mono_reflection_is_valid_dynamic_token:
*
* Returns TRUE if token is valid.
*
*/
gboolean
mono_reflection_is_valid_dynamic_token (MonoDynamicImage *image, guint32 token)
{
return mono_g_hash_table_lookup (image->tokens, GUINT_TO_POINTER (token)) != NULL;
}
MonoMethodSignature *
mono_reflection_lookup_signature (MonoImage *image, MonoMethod *method, guint32 token)
{
MonoMethodSignature *sig;
g_assert (image->dynamic);
sig = g_hash_table_lookup (((MonoDynamicImage*)image)->vararg_aux_hash, GUINT_TO_POINTER (token));
if (sig)
return sig;
return mono_method_signature (method);
}
#ifndef DISABLE_REFLECTION_EMIT
/**
* mono_reflection_lookup_dynamic_token:
*
* Finish the Builder object pointed to by TOKEN and return the corresponding
* runtime structure. If HANDLE_CLASS is not NULL, it is set to the class required by
* mono_ldtoken. If valid_token is TRUE, assert if it is not found in the token->object
* mapping table.
*
* LOCKING: Take the loader lock
*/
gpointer
mono_reflection_lookup_dynamic_token (MonoImage *image, guint32 token, gboolean valid_token, MonoClass **handle_class, MonoGenericContext *context)
{
MonoDynamicImage *assembly = (MonoDynamicImage*)image;
MonoObject *obj;
MonoClass *klass;
mono_loader_lock ();
obj = mono_g_hash_table_lookup (assembly->tokens, GUINT_TO_POINTER (token));
mono_loader_unlock ();
if (!obj) {
if (valid_token)
g_error ("Could not find required dynamic token 0x%08x", token);
else
return NULL;
}
if (!handle_class)
handle_class = &klass;
return resolve_object (image, obj, handle_class, context);
}
/*
* ensure_complete_type:
*
* Ensure that KLASS is completed if it is a dynamic type, or references
* dynamic types.
*/
static void
ensure_complete_type (MonoClass *klass)
{
if (klass->image->dynamic && !klass->wastypebuilder && mono_class_get_ref_info (klass)) {
MonoReflectionTypeBuilder *tb = mono_class_get_ref_info (klass);
mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb);
// Asserting here could break a lot of code
//g_assert (klass->wastypebuilder);
}
if (klass->generic_class) {
MonoGenericInst *inst = klass->generic_class->context.class_inst;
int i;
for (i = 0; i < inst->type_argc; ++i) {
ensure_complete_type (mono_class_from_mono_type (inst->type_argv [i]));
}
}
}
static gpointer
resolve_object (MonoImage *image, MonoObject *obj, MonoClass **handle_class, MonoGenericContext *context)
{
gpointer result = NULL;
if (strcmp (obj->vtable->klass->name, "String") == 0) {
result = mono_string_intern ((MonoString*)obj);
*handle_class = mono_defaults.string_class;
g_assert (result);
} else if (strcmp (obj->vtable->klass->name, "MonoType") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)obj);
MonoClass *mc = mono_class_from_mono_type (type);
if (!mono_class_init (mc))
mono_raise_exception (mono_class_get_exception_for_failure (mc));
if (context) {
MonoType *inflated = mono_class_inflate_generic_type (type, context);
result = mono_class_from_mono_type (inflated);
mono_metadata_free_type (inflated);
} else {
result = mono_class_from_mono_type (type);
}
*handle_class = mono_defaults.typehandle_class;
g_assert (result);
} else if (strcmp (obj->vtable->klass->name, "MonoMethod") == 0 ||
strcmp (obj->vtable->klass->name, "MonoCMethod") == 0 ||
strcmp (obj->vtable->klass->name, "MonoGenericCMethod") == 0 ||
strcmp (obj->vtable->klass->name, "MonoGenericMethod") == 0) {
result = ((MonoReflectionMethod*)obj)->method;
if (context)
result = mono_class_inflate_generic_method (result, context);
*handle_class = mono_defaults.methodhandle_class;
g_assert (result);
} else if (strcmp (obj->vtable->klass->name, "MethodBuilder") == 0) {
MonoReflectionMethodBuilder *mb = (MonoReflectionMethodBuilder*)obj;
result = mb->mhandle;
if (!result) {
/* Type is not yet created */
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)mb->type;
mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb);
/*
* Hopefully this has been filled in by calling CreateType() on the
* TypeBuilder.
*/
/*
* TODO: This won't work if the application finishes another
* TypeBuilder instance instead of this one.
*/
result = mb->mhandle;
}
if (context)
result = mono_class_inflate_generic_method (result, context);
*handle_class = mono_defaults.methodhandle_class;
} else if (strcmp (obj->vtable->klass->name, "ConstructorBuilder") == 0) {
MonoReflectionCtorBuilder *cb = (MonoReflectionCtorBuilder*)obj;
result = cb->mhandle;
if (!result) {
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)cb->type;
mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb);
result = cb->mhandle;
}
if (context)
result = mono_class_inflate_generic_method (result, context);
*handle_class = mono_defaults.methodhandle_class;
} else if (strcmp (obj->vtable->klass->name, "MonoField") == 0) {
MonoClassField *field = ((MonoReflectionField*)obj)->field;
ensure_complete_type (field->parent);
if (context) {
MonoType *inflated = mono_class_inflate_generic_type (&field->parent->byval_arg, context);
MonoClass *class = mono_class_from_mono_type (inflated);
MonoClassField *inflated_field;
gpointer iter = NULL;
mono_metadata_free_type (inflated);
while ((inflated_field = mono_class_get_fields (class, &iter))) {
if (!strcmp (field->name, inflated_field->name))
break;
}
g_assert (inflated_field && !strcmp (field->name, inflated_field->name));
result = inflated_field;
} else {
result = field;
}
*handle_class = mono_defaults.fieldhandle_class;
g_assert (result);
} else if (strcmp (obj->vtable->klass->name, "FieldBuilder") == 0) {
MonoReflectionFieldBuilder *fb = (MonoReflectionFieldBuilder*)obj;
result = fb->handle;
if (!result) {
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)fb->typeb;
mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb);
result = fb->handle;
}
if (fb->handle && fb->handle->parent->generic_container) {
MonoClass *klass = fb->handle->parent;
MonoType *type = mono_class_inflate_generic_type (&klass->byval_arg, context);
MonoClass *inflated = mono_class_from_mono_type (type);
result = mono_class_get_field_from_name (inflated, mono_field_get_name (fb->handle));
g_assert (result);
mono_metadata_free_type (type);
}
*handle_class = mono_defaults.fieldhandle_class;
} else if (strcmp (obj->vtable->klass->name, "TypeBuilder") == 0) {
MonoReflectionTypeBuilder *tb = (MonoReflectionTypeBuilder*)obj;
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)tb);
MonoClass *klass;
klass = type->data.klass;
if (klass->wastypebuilder) {
/* Already created */
result = klass;
}
else {
mono_domain_try_type_resolve (mono_domain_get (), NULL, (MonoObject*)tb);
result = type->data.klass;
g_assert (result);
}
*handle_class = mono_defaults.typehandle_class;
} else if (strcmp (obj->vtable->klass->name, "SignatureHelper") == 0) {
MonoReflectionSigHelper *helper = (MonoReflectionSigHelper*)obj;
MonoMethodSignature *sig;
int nargs, i;
if (helper->arguments)
nargs = mono_array_length (helper->arguments);
else
nargs = 0;
sig = mono_metadata_signature_alloc (image, nargs);
sig->explicit_this = helper->call_conv & 64 ? 1 : 0;
sig->hasthis = helper->call_conv & 32 ? 1 : 0;
if (helper->unmanaged_call_conv) { /* unmanaged */
sig->call_convention = helper->unmanaged_call_conv - 1;
sig->pinvoke = TRUE;
} else if (helper->call_conv & 0x02) {
sig->call_convention = MONO_CALL_VARARG;
} else {
sig->call_convention = MONO_CALL_DEFAULT;
}
sig->param_count = nargs;
/* TODO: Copy type ? */
sig->ret = helper->return_type->type;
for (i = 0; i < nargs; ++i)
sig->params [i] = mono_type_array_get_and_resolve (helper->arguments, i);
result = sig;
*handle_class = NULL;
} else if (strcmp (obj->vtable->klass->name, "DynamicMethod") == 0) {
MonoReflectionDynamicMethod *method = (MonoReflectionDynamicMethod*)obj;
/* Already created by the managed code */
g_assert (method->mhandle);
result = method->mhandle;
*handle_class = mono_defaults.methodhandle_class;
} else if (strcmp (obj->vtable->klass->name, "GenericTypeParameterBuilder") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)obj);
type = mono_class_inflate_generic_type (type, context);
result = mono_class_from_mono_type (type);
*handle_class = mono_defaults.typehandle_class;
g_assert (result);
mono_metadata_free_type (type);
} else if (strcmp (obj->vtable->klass->name, "MonoGenericClass") == 0) {
MonoType *type = mono_reflection_type_get_handle ((MonoReflectionType*)obj);
type = mono_class_inflate_generic_type (type, context);
result = mono_class_from_mono_type (type);
*handle_class = mono_defaults.typehandle_class;
g_assert (result);
mono_metadata_free_type (type);
} else if (strcmp (obj->vtable->klass->name, "FieldOnTypeBuilderInst") == 0) {
MonoReflectionFieldOnTypeBuilderInst *f = (MonoReflectionFieldOnTypeBuilderInst*)obj;
MonoClass *inflated;
MonoType *type;
MonoClassField *field;
if (is_sre_field_builder (mono_object_class (f->fb)))
field = ((MonoReflectionFieldBuilder*)f->fb)->handle;
else if (is_sr_mono_field (mono_object_class (f->fb)))
field = ((MonoReflectionField*)f->fb)->field;
else
g_error ("resolve_object:: can't handle a FTBI with base_method of type %s", mono_type_get_full_name (mono_object_class (f->fb)));
type = mono_class_inflate_generic_type (mono_reflection_type_get_handle ((MonoReflectionType*)f->inst), context);
inflated = mono_class_from_mono_type (type);
result = field = mono_class_get_field_from_name (inflated, mono_field_get_name (field));
ensure_complete_type (field->parent);
g_assert (result);
mono_metadata_free_type (type);
*handle_class = mono_defaults.fieldhandle_class;
} else if (strcmp (obj->vtable->klass->name, "ConstructorOnTypeBuilderInst") == 0) {
MonoReflectionCtorOnTypeBuilderInst *c = (MonoReflectionCtorOnTypeBuilderInst*)obj;
MonoType *type = mono_class_inflate_generic_type (mono_reflection_type_get_handle ((MonoReflectionType*)c->inst), context);
MonoClass *inflated_klass = mono_class_from_mono_type (type);
MonoMethod *method;
if (is_sre_ctor_builder (mono_object_class (c->cb)))
method = ((MonoReflectionCtorBuilder *)c->cb)->mhandle;
else if (is_sr_mono_cmethod (mono_object_class (c->cb)))
method = ((MonoReflectionMethod *)c->cb)->method;
else
g_error ("resolve_object:: can't handle a CTBI with base_method of type %s", mono_type_get_full_name (mono_object_class (c->cb)));
result = inflate_mono_method (inflated_klass, method, (MonoObject*)c->cb);
*handle_class = mono_defaults.methodhandle_class;
mono_metadata_free_type (type);
} else if (strcmp (obj->vtable->klass->name, "MethodOnTypeBuilderInst") == 0) {
MonoReflectionMethodOnTypeBuilderInst *m = (MonoReflectionMethodOnTypeBuilderInst*)obj;
if (m->method_args) {
result = mono_reflection_method_on_tb_inst_get_handle (m);
if (context)
result = mono_class_inflate_generic_method (result, context);
} else {
MonoType *type = mono_class_inflate_generic_type (mono_reflection_type_get_handle ((MonoReflectionType*)m->inst), context);
MonoClass *inflated_klass = mono_class_from_mono_type (type);
MonoMethod *method;
if (is_sre_method_builder (mono_object_class (m->mb)))
method = ((MonoReflectionMethodBuilder *)m->mb)->mhandle;
else if (is_sr_mono_method (mono_object_class (m->mb)))
method = ((MonoReflectionMethod *)m->mb)->method;
else
g_error ("resolve_object:: can't handle a MTBI with base_method of type %s", mono_type_get_full_name (mono_object_class (m->mb)));
result = inflate_mono_method (inflated_klass, method, (MonoObject*)m->mb);
mono_metadata_free_type (type);
}
*handle_class = mono_defaults.methodhandle_class;
} else if (strcmp (obj->vtable->klass->name, "MonoArrayMethod") == 0) {
MonoReflectionArrayMethod *m = (MonoReflectionArrayMethod*)obj;
MonoType *mtype;
MonoClass *klass;
MonoMethod *method;
gpointer iter;
char *name;
mtype = mono_reflection_type_get_handle (m->parent);
klass = mono_class_from_mono_type (mtype);
/* Find the method */
name = mono_string_to_utf8 (m->name);
iter = NULL;
while ((method = mono_class_get_methods (klass, &iter))) {
if (!strcmp (method->name, name))
break;
}
g_free (name);
// FIXME:
g_assert (method);
// FIXME: Check parameters/return value etc. match
result = method;
*handle_class = mono_defaults.methodhandle_class;
} else if (is_sre_array (mono_object_get_class(obj)) ||
is_sre_byref (mono_object_get_class(obj)) ||
is_sre_pointer (mono_object_get_class(obj))) {
MonoReflectionType *ref_type = (MonoReflectionType *)obj;
MonoType *type = mono_reflection_type_get_handle (ref_type);
result = mono_class_from_mono_type (type);
*handle_class = mono_defaults.typehandle_class;
} else {
g_print ("%s\n", obj->vtable->klass->name);
g_assert_not_reached ();
}
return result;
}
#else /* DISABLE_REFLECTION_EMIT */
MonoArray*
mono_reflection_get_custom_attrs_blob (MonoReflectionAssembly *assembly, MonoObject *ctor, MonoArray *ctorArgs, MonoArray *properties, MonoArray *propValues, MonoArray *fields, MonoArray* fieldValues)
{
g_assert_not_reached ();
return NULL;
}
void
mono_reflection_setup_internal_class (MonoReflectionTypeBuilder *tb)
{
g_assert_not_reached ();
}
void
mono_reflection_setup_generic_class (MonoReflectionTypeBuilder *tb)
{
g_assert_not_reached ();
}
void
mono_reflection_create_generic_class (MonoReflectionTypeBuilder *tb)
{
g_assert_not_reached ();
}
void
mono_reflection_create_internal_class (MonoReflectionTypeBuilder *tb)
{
g_assert_not_reached ();
}
void
mono_image_basic_init (MonoReflectionAssemblyBuilder *assemblyb)
{
g_error ("This mono runtime was configured with --enable-minimal=reflection_emit, so System.Reflection.Emit is not supported.");
}
void
mono_image_module_basic_init (MonoReflectionModuleBuilder *moduleb)
{
g_assert_not_reached ();
}
void
mono_image_set_wrappers_type (MonoReflectionModuleBuilder *moduleb, MonoReflectionType *type)
{
g_assert_not_reached ();
}
MonoReflectionModule *
mono_image_load_module_dynamic (MonoReflectionAssemblyBuilder *ab, MonoString *fileName)
{
g_assert_not_reached ();
return NULL;
}
guint32
mono_image_insert_string (MonoReflectionModuleBuilder *module, MonoString *str)
{
g_assert_not_reached ();
return 0;
}
guint32
mono_image_create_method_token (MonoDynamicImage *assembly, MonoObject *obj, MonoArray *opt_param_types)
{
g_assert_not_reached ();
return 0;
}
guint32
mono_image_create_token (MonoDynamicImage *assembly, MonoObject *obj,
gboolean create_open_instance, gboolean register_token)
{
g_assert_not_reached ();
return 0;
}
void
mono_image_register_token (MonoDynamicImage *assembly, guint32 token, MonoObject *obj)
{
}
void
mono_reflection_generic_class_initialize (MonoReflectionGenericClass *type, MonoArray *methods,
MonoArray *ctors, MonoArray *fields, MonoArray *properties,
MonoArray *events)
{
g_assert_not_reached ();
}
void
mono_reflection_get_dynamic_overrides (MonoClass *klass, MonoMethod ***overrides, int *num_overrides)
{
*overrides = NULL;
*num_overrides = 0;
}
MonoReflectionEvent *
mono_reflection_event_builder_get_event_info (MonoReflectionTypeBuilder *tb, MonoReflectionEventBuilder *eb)
{
g_assert_not_reached ();
return NULL;
}
MonoReflectionType*
mono_reflection_create_runtime_class (MonoReflectionTypeBuilder *tb)
{
g_assert_not_reached ();
return NULL;
}
void
mono_reflection_initialize_generic_parameter (MonoReflectionGenericParam *gparam)
{
g_assert_not_reached ();
}
MonoArray *
mono_reflection_sighelper_get_signature_local (MonoReflectionSigHelper *sig)
{
g_assert_not_reached ();
return NULL;
}
MonoArray *
mono_reflection_sighelper_get_signature_field (MonoReflectionSigHelper *sig)
{
g_assert_not_reached ();
return NULL;
}
void
mono_reflection_create_dynamic_method (MonoReflectionDynamicMethod *mb)
{
}
gpointer
mono_reflection_lookup_dynamic_token (MonoImage *image, guint32 token, gboolean valid_token, MonoClass **handle_class, MonoGenericContext *context)
{
return NULL;
}
MonoType*
mono_reflection_type_get_handle (MonoReflectionType* ref)
{
if (!ref)
return NULL;
return ref->type;
}
void
mono_reflection_free_dynamic_generic_class (MonoGenericClass *gclass)
{
g_assert_not_reached ();
}
#endif /* DISABLE_REFLECTION_EMIT */
/* SECURITY_ACTION_* are defined in mono/metadata/tabledefs.h */
const static guint32 declsec_flags_map[] = {
0x00000000, /* empty */
MONO_DECLSEC_FLAG_REQUEST, /* SECURITY_ACTION_REQUEST (x01) */
MONO_DECLSEC_FLAG_DEMAND, /* SECURITY_ACTION_DEMAND (x02) */
MONO_DECLSEC_FLAG_ASSERT, /* SECURITY_ACTION_ASSERT (x03) */
MONO_DECLSEC_FLAG_DENY, /* SECURITY_ACTION_DENY (x04) */
MONO_DECLSEC_FLAG_PERMITONLY, /* SECURITY_ACTION_PERMITONLY (x05) */
MONO_DECLSEC_FLAG_LINKDEMAND, /* SECURITY_ACTION_LINKDEMAND (x06) */
MONO_DECLSEC_FLAG_INHERITANCEDEMAND, /* SECURITY_ACTION_INHERITANCEDEMAND (x07) */
MONO_DECLSEC_FLAG_REQUEST_MINIMUM, /* SECURITY_ACTION_REQUEST_MINIMUM (x08) */
MONO_DECLSEC_FLAG_REQUEST_OPTIONAL, /* SECURITY_ACTION_REQUEST_OPTIONAL (x09) */
MONO_DECLSEC_FLAG_REQUEST_REFUSE, /* SECURITY_ACTION_REQUEST_REFUSE (x0A) */
MONO_DECLSEC_FLAG_PREJIT_GRANT, /* SECURITY_ACTION_PREJIT_GRANT (x0B) */
MONO_DECLSEC_FLAG_PREJIT_DENY, /* SECURITY_ACTION_PREJIT_DENY (x0C) */
MONO_DECLSEC_FLAG_NONCAS_DEMAND, /* SECURITY_ACTION_NONCAS_DEMAND (x0D) */
MONO_DECLSEC_FLAG_NONCAS_LINKDEMAND, /* SECURITY_ACTION_NONCAS_LINKDEMAND (x0E) */
MONO_DECLSEC_FLAG_NONCAS_INHERITANCEDEMAND, /* SECURITY_ACTION_NONCAS_INHERITANCEDEMAND (x0F) */
MONO_DECLSEC_FLAG_LINKDEMAND_CHOICE, /* SECURITY_ACTION_LINKDEMAND_CHOICE (x10) */
MONO_DECLSEC_FLAG_INHERITANCEDEMAND_CHOICE, /* SECURITY_ACTION_INHERITANCEDEMAND_CHOICE (x11) */
MONO_DECLSEC_FLAG_DEMAND_CHOICE, /* SECURITY_ACTION_DEMAND_CHOICE (x12) */
};
/*
* Returns flags that includes all available security action associated to the handle.
* @token: metadata token (either for a class or a method)
* @image: image where resides the metadata.
*/
static guint32
mono_declsec_get_flags (MonoImage *image, guint32 token)
{
int index = mono_metadata_declsec_from_index (image, token);
MonoTableInfo *t = &image->tables [MONO_TABLE_DECLSECURITY];
guint32 result = 0;
guint32 action;
int i;
/* HasSecurity can be present for other, not specially encoded, attributes,
e.g. SuppressUnmanagedCodeSecurityAttribute */
if (index < 0)
return 0;
for (i = index; i < t->rows; i++) {
guint32 cols [MONO_DECL_SECURITY_SIZE];
mono_metadata_decode_row (t, i, cols, MONO_DECL_SECURITY_SIZE);
if (cols [MONO_DECL_SECURITY_PARENT] != token)
break;
action = cols [MONO_DECL_SECURITY_ACTION];
if ((action >= MONO_DECLSEC_ACTION_MIN) && (action <= MONO_DECLSEC_ACTION_MAX)) {
result |= declsec_flags_map [action];
} else {
g_assert_not_reached ();
}
}
return result;
}
/*
* Get the security actions (in the form of flags) associated with the specified method.
*
* @method: The method for which we want the declarative security flags.
* Return the declarative security flags for the method (only).
*
* Note: To keep MonoMethod size down we do not cache the declarative security flags
* (except for the stack modifiers which are kept in the MonoJitInfo structure)
*/
guint32
mono_declsec_flags_from_method (MonoMethod *method)
{
if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) {
/* FIXME: No cache (for the moment) */
guint32 idx = mono_method_get_index (method);
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_METHODDEF;
return mono_declsec_get_flags (method->klass->image, idx);
}
return 0;
}
/*
* Get the security actions (in the form of flags) associated with the specified class.
*
* @klass: The class for which we want the declarative security flags.
* Return the declarative security flags for the class.
*
* Note: We cache the flags inside the MonoClass structure as this will get
* called very often (at least for each method).
*/
guint32
mono_declsec_flags_from_class (MonoClass *klass)
{
if (klass->flags & TYPE_ATTRIBUTE_HAS_SECURITY) {
if (!klass->ext || !klass->ext->declsec_flags) {
guint32 idx;
idx = mono_metadata_token_index (klass->type_token);
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_TYPEDEF;
mono_loader_lock ();
mono_class_alloc_ext (klass);
mono_loader_unlock ();
/* we cache the flags on classes */
klass->ext->declsec_flags = mono_declsec_get_flags (klass->image, idx);
}
return klass->ext->declsec_flags;
}
return 0;
}
/*
* Get the security actions (in the form of flags) associated with the specified assembly.
*
* @assembly: The assembly for which we want the declarative security flags.
* Return the declarative security flags for the assembly.
*/
guint32
mono_declsec_flags_from_assembly (MonoAssembly *assembly)
{
guint32 idx = 1; /* there is only one assembly */
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_ASSEMBLY;
return mono_declsec_get_flags (assembly->image, idx);
}
/*
* Fill actions for the specific index (which may either be an encoded class token or
* an encoded method token) from the metadata image.
* Returns TRUE if some actions requiring code generation are present, FALSE otherwise.
*/
static MonoBoolean
fill_actions_from_index (MonoImage *image, guint32 token, MonoDeclSecurityActions* actions,
guint32 id_std, guint32 id_noncas, guint32 id_choice)
{
MonoBoolean result = FALSE;
MonoTableInfo *t;
guint32 cols [MONO_DECL_SECURITY_SIZE];
int index = mono_metadata_declsec_from_index (image, token);
int i;
t = &image->tables [MONO_TABLE_DECLSECURITY];
for (i = index; i < t->rows; i++) {
mono_metadata_decode_row (t, i, cols, MONO_DECL_SECURITY_SIZE);
if (cols [MONO_DECL_SECURITY_PARENT] != token)
return result;
/* if present only replace (class) permissions with method permissions */
/* if empty accept either class or method permissions */
if (cols [MONO_DECL_SECURITY_ACTION] == id_std) {
if (!actions->demand.blob) {
const char *blob = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]);
actions->demand.index = cols [MONO_DECL_SECURITY_PERMISSIONSET];
actions->demand.blob = (char*) (blob + 2);
actions->demand.size = mono_metadata_decode_blob_size (blob, &blob);
result = TRUE;
}
} else if (cols [MONO_DECL_SECURITY_ACTION] == id_noncas) {
if (!actions->noncasdemand.blob) {
const char *blob = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]);
actions->noncasdemand.index = cols [MONO_DECL_SECURITY_PERMISSIONSET];
actions->noncasdemand.blob = (char*) (blob + 2);
actions->noncasdemand.size = mono_metadata_decode_blob_size (blob, &blob);
result = TRUE;
}
} else if (cols [MONO_DECL_SECURITY_ACTION] == id_choice) {
if (!actions->demandchoice.blob) {
const char *blob = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]);
actions->demandchoice.index = cols [MONO_DECL_SECURITY_PERMISSIONSET];
actions->demandchoice.blob = (char*) (blob + 2);
actions->demandchoice.size = mono_metadata_decode_blob_size (blob, &blob);
result = TRUE;
}
}
}
return result;
}
static MonoBoolean
mono_declsec_get_class_demands_params (MonoClass *klass, MonoDeclSecurityActions* demands,
guint32 id_std, guint32 id_noncas, guint32 id_choice)
{
guint32 idx = mono_metadata_token_index (klass->type_token);
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_TYPEDEF;
return fill_actions_from_index (klass->image, idx, demands, id_std, id_noncas, id_choice);
}
static MonoBoolean
mono_declsec_get_method_demands_params (MonoMethod *method, MonoDeclSecurityActions* demands,
guint32 id_std, guint32 id_noncas, guint32 id_choice)
{
guint32 idx = mono_method_get_index (method);
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_METHODDEF;
return fill_actions_from_index (method->klass->image, idx, demands, id_std, id_noncas, id_choice);
}
/*
* Collect all actions (that requires to generate code in mini) assigned for
* the specified method.
* Note: Don't use the content of actions if the function return FALSE.
*/
MonoBoolean
mono_declsec_get_demands (MonoMethod *method, MonoDeclSecurityActions* demands)
{
guint32 mask = MONO_DECLSEC_FLAG_DEMAND | MONO_DECLSEC_FLAG_NONCAS_DEMAND |
MONO_DECLSEC_FLAG_DEMAND_CHOICE;
MonoBoolean result = FALSE;
guint32 flags;
/* quick exit if no declarative security is present in the metadata */
if (!method->klass->image->tables [MONO_TABLE_DECLSECURITY].rows)
return FALSE;
/* we want the original as the wrapper is "free" of the security informations */
if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || method->wrapper_type == MONO_WRAPPER_MANAGED_TO_MANAGED) {
method = mono_marshal_method_from_wrapper (method);
if (!method)
return FALSE;
}
/* First we look for method-level attributes */
if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) {
mono_class_init (method->klass);
memset (demands, 0, sizeof (MonoDeclSecurityActions));
result = mono_declsec_get_method_demands_params (method, demands,
SECURITY_ACTION_DEMAND, SECURITY_ACTION_NONCASDEMAND, SECURITY_ACTION_DEMANDCHOICE);
}
/* Here we use (or create) the class declarative cache to look for demands */
flags = mono_declsec_flags_from_class (method->klass);
if (flags & mask) {
if (!result) {
mono_class_init (method->klass);
memset (demands, 0, sizeof (MonoDeclSecurityActions));
}
result |= mono_declsec_get_class_demands_params (method->klass, demands,
SECURITY_ACTION_DEMAND, SECURITY_ACTION_NONCASDEMAND, SECURITY_ACTION_DEMANDCHOICE);
}
/* The boolean return value is used as a shortcut in case nothing needs to
be generated (e.g. LinkDemand[Choice] and InheritanceDemand[Choice]) */
return result;
}
/*
* Collect all Link actions: LinkDemand, NonCasLinkDemand and LinkDemandChoice (2.0).
*
* Note: Don't use the content of actions if the function return FALSE.
*/
MonoBoolean
mono_declsec_get_linkdemands (MonoMethod *method, MonoDeclSecurityActions* klass, MonoDeclSecurityActions *cmethod)
{
MonoBoolean result = FALSE;
guint32 flags;
/* quick exit if no declarative security is present in the metadata */
if (!method->klass->image->tables [MONO_TABLE_DECLSECURITY].rows)
return FALSE;
/* we want the original as the wrapper is "free" of the security informations */
if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || method->wrapper_type == MONO_WRAPPER_MANAGED_TO_MANAGED) {
method = mono_marshal_method_from_wrapper (method);
if (!method)
return FALSE;
}
/* results are independant - zeroize both */
memset (cmethod, 0, sizeof (MonoDeclSecurityActions));
memset (klass, 0, sizeof (MonoDeclSecurityActions));
/* First we look for method-level attributes */
if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) {
mono_class_init (method->klass);
result = mono_declsec_get_method_demands_params (method, cmethod,
SECURITY_ACTION_LINKDEMAND, SECURITY_ACTION_NONCASLINKDEMAND, SECURITY_ACTION_LINKDEMANDCHOICE);
}
/* Here we use (or create) the class declarative cache to look for demands */
flags = mono_declsec_flags_from_class (method->klass);
if (flags & (MONO_DECLSEC_FLAG_LINKDEMAND | MONO_DECLSEC_FLAG_NONCAS_LINKDEMAND | MONO_DECLSEC_FLAG_LINKDEMAND_CHOICE)) {
mono_class_init (method->klass);
result |= mono_declsec_get_class_demands_params (method->klass, klass,
SECURITY_ACTION_LINKDEMAND, SECURITY_ACTION_NONCASLINKDEMAND, SECURITY_ACTION_LINKDEMANDCHOICE);
}
return result;
}
/*
* Collect all Inherit actions: InheritanceDemand, NonCasInheritanceDemand and InheritanceDemandChoice (2.0).
*
* @klass The inherited class - this is the class that provides the security check (attributes)
* @demans
* return TRUE if inheritance demands (any kind) are present, FALSE otherwise.
*
* Note: Don't use the content of actions if the function return FALSE.
*/
MonoBoolean
mono_declsec_get_inheritdemands_class (MonoClass *klass, MonoDeclSecurityActions* demands)
{
MonoBoolean result = FALSE;
guint32 flags;
/* quick exit if no declarative security is present in the metadata */
if (!klass->image->tables [MONO_TABLE_DECLSECURITY].rows)
return FALSE;
/* Here we use (or create) the class declarative cache to look for demands */
flags = mono_declsec_flags_from_class (klass);
if (flags & (MONO_DECLSEC_FLAG_INHERITANCEDEMAND | MONO_DECLSEC_FLAG_NONCAS_INHERITANCEDEMAND | MONO_DECLSEC_FLAG_INHERITANCEDEMAND_CHOICE)) {
mono_class_init (klass);
memset (demands, 0, sizeof (MonoDeclSecurityActions));
result |= mono_declsec_get_class_demands_params (klass, demands,
SECURITY_ACTION_INHERITDEMAND, SECURITY_ACTION_NONCASINHERITANCE, SECURITY_ACTION_INHERITDEMANDCHOICE);
}
return result;
}
/*
* Collect all Inherit actions: InheritanceDemand, NonCasInheritanceDemand and InheritanceDemandChoice (2.0).
*
* Note: Don't use the content of actions if the function return FALSE.
*/
MonoBoolean
mono_declsec_get_inheritdemands_method (MonoMethod *method, MonoDeclSecurityActions* demands)
{
/* quick exit if no declarative security is present in the metadata */
if (!method->klass->image->tables [MONO_TABLE_DECLSECURITY].rows)
return FALSE;
/* we want the original as the wrapper is "free" of the security informations */
if (method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE || method->wrapper_type == MONO_WRAPPER_MANAGED_TO_MANAGED) {
method = mono_marshal_method_from_wrapper (method);
if (!method)
return FALSE;
}
if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) {
mono_class_init (method->klass);
memset (demands, 0, sizeof (MonoDeclSecurityActions));
return mono_declsec_get_method_demands_params (method, demands,
SECURITY_ACTION_INHERITDEMAND, SECURITY_ACTION_NONCASINHERITANCE, SECURITY_ACTION_INHERITDEMANDCHOICE);
}
return FALSE;
}
static MonoBoolean
get_declsec_action (MonoImage *image, guint32 token, guint32 action, MonoDeclSecurityEntry *entry)
{
guint32 cols [MONO_DECL_SECURITY_SIZE];
MonoTableInfo *t;
int i;
int index = mono_metadata_declsec_from_index (image, token);
if (index == -1)
return FALSE;
t = &image->tables [MONO_TABLE_DECLSECURITY];
for (i = index; i < t->rows; i++) {
mono_metadata_decode_row (t, i, cols, MONO_DECL_SECURITY_SIZE);
/* shortcut - index are ordered */
if (token != cols [MONO_DECL_SECURITY_PARENT])
return FALSE;
if (cols [MONO_DECL_SECURITY_ACTION] == action) {
const char *metadata = mono_metadata_blob_heap (image, cols [MONO_DECL_SECURITY_PERMISSIONSET]);
entry->blob = (char*) (metadata + 2);
entry->size = mono_metadata_decode_blob_size (metadata, &metadata);
return TRUE;
}
}
return FALSE;
}
MonoBoolean
mono_declsec_get_method_action (MonoMethod *method, guint32 action, MonoDeclSecurityEntry *entry)
{
if (method->flags & METHOD_ATTRIBUTE_HAS_SECURITY) {
guint32 idx = mono_method_get_index (method);
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_METHODDEF;
return get_declsec_action (method->klass->image, idx, action, entry);
}
return FALSE;
}
MonoBoolean
mono_declsec_get_class_action (MonoClass *klass, guint32 action, MonoDeclSecurityEntry *entry)
{
/* use cache */
guint32 flags = mono_declsec_flags_from_class (klass);
if (declsec_flags_map [action] & flags) {
guint32 idx = mono_metadata_token_index (klass->type_token);
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_TYPEDEF;
return get_declsec_action (klass->image, idx, action, entry);
}
return FALSE;
}
MonoBoolean
mono_declsec_get_assembly_action (MonoAssembly *assembly, guint32 action, MonoDeclSecurityEntry *entry)
{
guint32 idx = 1; /* there is only one assembly */
idx <<= MONO_HAS_DECL_SECURITY_BITS;
idx |= MONO_HAS_DECL_SECURITY_ASSEMBLY;
return get_declsec_action (assembly->image, idx, action, entry);
}
gboolean
mono_reflection_call_is_assignable_to (MonoClass *klass, MonoClass *oklass)
{
MonoObject *res, *exc;
void *params [1];
static MonoClass *System_Reflection_Emit_TypeBuilder = NULL;
static MonoMethod *method = NULL;
if (!System_Reflection_Emit_TypeBuilder) {
System_Reflection_Emit_TypeBuilder = mono_class_from_name (mono_defaults.corlib, "System.Reflection.Emit", "TypeBuilder");
g_assert (System_Reflection_Emit_TypeBuilder);
}
if (method == NULL) {
method = mono_class_get_method_from_name (System_Reflection_Emit_TypeBuilder, "IsAssignableTo", 1);
g_assert (method);
}
/*
* The result of mono_type_get_object () might be a System.MonoType but we
* need a TypeBuilder so use mono_class_get_ref_info (klass).
*/
g_assert (mono_class_get_ref_info (klass));
g_assert (!strcmp (((MonoObject*)(mono_class_get_ref_info (klass)))->vtable->klass->name, "TypeBuilder"));
params [0] = mono_type_get_object (mono_domain_get (), &oklass->byval_arg);
res = mono_runtime_invoke (method, (MonoObject*)(mono_class_get_ref_info (klass)), params, &exc);
if (exc)
return FALSE;
else
return *(MonoBoolean*)mono_object_unbox (res);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3432_3 |
crossvul-cpp_data_good_5587_0 | /*
* Resizable virtual memory filesystem for Linux.
*
* Copyright (C) 2000 Linus Torvalds.
* 2000 Transmeta Corp.
* 2000-2001 Christoph Rohland
* 2000-2001 SAP AG
* 2002 Red Hat Inc.
* Copyright (C) 2002-2011 Hugh Dickins.
* Copyright (C) 2011 Google Inc.
* Copyright (C) 2002-2005 VERITAS Software Corporation.
* Copyright (C) 2004 Andi Kleen, SuSE Labs
*
* Extended attribute support for tmpfs:
* Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
* Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
*
* tiny-shmem:
* Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
*
* This file is released under the GPL.
*/
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/vfs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/swap.h>
static struct vfsmount *shm_mnt;
#ifdef CONFIG_SHMEM
/*
* This virtual memory filesystem is heavily based on the ramfs. It
* extends ramfs by the ability to use swap and honor resource limits
* which makes it a completely usable filesystem.
*/
#include <linux/xattr.h>
#include <linux/exportfs.h>
#include <linux/posix_acl.h>
#include <linux/generic_acl.h>
#include <linux/mman.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/backing-dev.h>
#include <linux/shmem_fs.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
#include <linux/pagevec.h>
#include <linux/percpu_counter.h>
#include <linux/falloc.h>
#include <linux/splice.h>
#include <linux/security.h>
#include <linux/swapops.h>
#include <linux/mempolicy.h>
#include <linux/namei.h>
#include <linux/ctype.h>
#include <linux/migrate.h>
#include <linux/highmem.h>
#include <linux/seq_file.h>
#include <linux/magic.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
/* Pretend that each entry is of this size in directory's i_size */
#define BOGO_DIRENT_SIZE 20
/* Symlink up to this size is kmalloc'ed instead of using a swappable page */
#define SHORT_SYMLINK_LEN 128
/*
* shmem_fallocate and shmem_writepage communicate via inode->i_private
* (with i_mutex making sure that it has only one user at a time):
* we would prefer not to enlarge the shmem inode just for that.
*/
struct shmem_falloc {
pgoff_t start; /* start of range currently being fallocated */
pgoff_t next; /* the next page offset to be fallocated */
pgoff_t nr_falloced; /* how many new pages have been fallocated */
pgoff_t nr_unswapped; /* how often writepage refused to swap out */
};
/* Flag allocation requirements to shmem_getpage */
enum sgp_type {
SGP_READ, /* don't exceed i_size, don't allocate page */
SGP_CACHE, /* don't exceed i_size, may allocate page */
SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
};
#ifdef CONFIG_TMPFS
static unsigned long shmem_default_max_blocks(void)
{
return totalram_pages / 2;
}
static unsigned long shmem_default_max_inodes(void)
{
return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
}
#endif
static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
static int shmem_replace_page(struct page **pagep, gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index);
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
static inline int shmem_getpage(struct inode *inode, pgoff_t index,
struct page **pagep, enum sgp_type sgp, int *fault_type)
{
return shmem_getpage_gfp(inode, index, pagep, sgp,
mapping_gfp_mask(inode->i_mapping), fault_type);
}
static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
{
return sb->s_fs_info;
}
/*
* shmem_file_setup pre-accounts the whole fixed size of a VM object,
* for shared memory and for shared anonymous (/dev/zero) mappings
* (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
* consistent with the pre-accounting of private mappings ...
*/
static inline int shmem_acct_size(unsigned long flags, loff_t size)
{
return (flags & VM_NORESERVE) ?
0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
}
static inline void shmem_unacct_size(unsigned long flags, loff_t size)
{
if (!(flags & VM_NORESERVE))
vm_unacct_memory(VM_ACCT(size));
}
/*
* ... whereas tmpfs objects are accounted incrementally as
* pages are allocated, in order to allow huge sparse files.
* shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
* so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
*/
static inline int shmem_acct_block(unsigned long flags)
{
return (flags & VM_NORESERVE) ?
security_vm_enough_memory_mm(current->mm, VM_ACCT(PAGE_CACHE_SIZE)) : 0;
}
static inline void shmem_unacct_blocks(unsigned long flags, long pages)
{
if (flags & VM_NORESERVE)
vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
}
static const struct super_operations shmem_ops;
static const struct address_space_operations shmem_aops;
static const struct file_operations shmem_file_operations;
static const struct inode_operations shmem_inode_operations;
static const struct inode_operations shmem_dir_inode_operations;
static const struct inode_operations shmem_special_inode_operations;
static const struct vm_operations_struct shmem_vm_ops;
static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
.ra_pages = 0, /* No readahead */
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
};
static LIST_HEAD(shmem_swaplist);
static DEFINE_MUTEX(shmem_swaplist_mutex);
static int shmem_reserve_inode(struct super_block *sb)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
if (sbinfo->max_inodes) {
spin_lock(&sbinfo->stat_lock);
if (!sbinfo->free_inodes) {
spin_unlock(&sbinfo->stat_lock);
return -ENOSPC;
}
sbinfo->free_inodes--;
spin_unlock(&sbinfo->stat_lock);
}
return 0;
}
static void shmem_free_inode(struct super_block *sb)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
if (sbinfo->max_inodes) {
spin_lock(&sbinfo->stat_lock);
sbinfo->free_inodes++;
spin_unlock(&sbinfo->stat_lock);
}
}
/**
* shmem_recalc_inode - recalculate the block usage of an inode
* @inode: inode to recalc
*
* We have to calculate the free blocks since the mm can drop
* undirtied hole pages behind our back.
*
* But normally info->alloced == inode->i_mapping->nrpages + info->swapped
* So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
*
* It has to be called with the spinlock held.
*/
static void shmem_recalc_inode(struct inode *inode)
{
struct shmem_inode_info *info = SHMEM_I(inode);
long freed;
freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
if (freed > 0) {
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks)
percpu_counter_add(&sbinfo->used_blocks, -freed);
info->alloced -= freed;
inode->i_blocks -= freed * BLOCKS_PER_PAGE;
shmem_unacct_blocks(info->flags, freed);
}
}
/*
* Replace item expected in radix tree by a new item, while holding tree lock.
*/
static int shmem_radix_tree_replace(struct address_space *mapping,
pgoff_t index, void *expected, void *replacement)
{
void **pslot;
void *item = NULL;
VM_BUG_ON(!expected);
pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
if (pslot)
item = radix_tree_deref_slot_protected(pslot,
&mapping->tree_lock);
if (item != expected)
return -ENOENT;
if (replacement)
radix_tree_replace_slot(pslot, replacement);
else
radix_tree_delete(&mapping->page_tree, index);
return 0;
}
/*
* Sometimes, before we decide whether to proceed or to fail, we must check
* that an entry was not already brought back from swap by a racing thread.
*
* Checking page is not enough: by the time a SwapCache page is locked, it
* might be reused, and again be SwapCache, using the same swap as before.
*/
static bool shmem_confirm_swap(struct address_space *mapping,
pgoff_t index, swp_entry_t swap)
{
void *item;
rcu_read_lock();
item = radix_tree_lookup(&mapping->page_tree, index);
rcu_read_unlock();
return item == swp_to_radix_entry(swap);
}
/*
* Like add_to_page_cache_locked, but error if expected item has gone.
*/
static int shmem_add_to_page_cache(struct page *page,
struct address_space *mapping,
pgoff_t index, gfp_t gfp, void *expected)
{
int error;
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(!PageSwapBacked(page));
page_cache_get(page);
page->mapping = mapping;
page->index = index;
spin_lock_irq(&mapping->tree_lock);
if (!expected)
error = radix_tree_insert(&mapping->page_tree, index, page);
else
error = shmem_radix_tree_replace(mapping, index, expected,
page);
if (!error) {
mapping->nrpages++;
__inc_zone_page_state(page, NR_FILE_PAGES);
__inc_zone_page_state(page, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
} else {
page->mapping = NULL;
spin_unlock_irq(&mapping->tree_lock);
page_cache_release(page);
}
return error;
}
/*
* Like delete_from_page_cache, but substitutes swap for page.
*/
static void shmem_delete_from_page_cache(struct page *page, void *radswap)
{
struct address_space *mapping = page->mapping;
int error;
spin_lock_irq(&mapping->tree_lock);
error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
page->mapping = NULL;
mapping->nrpages--;
__dec_zone_page_state(page, NR_FILE_PAGES);
__dec_zone_page_state(page, NR_SHMEM);
spin_unlock_irq(&mapping->tree_lock);
page_cache_release(page);
BUG_ON(error);
}
/*
* Like find_get_pages, but collecting swap entries as well as pages.
*/
static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
pgoff_t start, unsigned int nr_pages,
struct page **pages, pgoff_t *indices)
{
void **slot;
unsigned int ret = 0;
struct radix_tree_iter iter;
if (!nr_pages)
return 0;
rcu_read_lock();
restart:
radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
struct page *page;
repeat:
page = radix_tree_deref_slot(slot);
if (unlikely(!page))
continue;
if (radix_tree_exception(page)) {
if (radix_tree_deref_retry(page))
goto restart;
/*
* Otherwise, we must be storing a swap entry
* here as an exceptional entry: so return it
* without attempting to raise page count.
*/
goto export;
}
if (!page_cache_get_speculative(page))
goto repeat;
/* Has the page moved? */
if (unlikely(page != *slot)) {
page_cache_release(page);
goto repeat;
}
export:
indices[ret] = iter.index;
pages[ret] = page;
if (++ret == nr_pages)
break;
}
rcu_read_unlock();
return ret;
}
/*
* Remove swap entry from radix tree, free the swap and its page cache.
*/
static int shmem_free_swap(struct address_space *mapping,
pgoff_t index, void *radswap)
{
int error;
spin_lock_irq(&mapping->tree_lock);
error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
spin_unlock_irq(&mapping->tree_lock);
if (!error)
free_swap_and_cache(radix_to_swp_entry(radswap));
return error;
}
/*
* Pagevec may contain swap entries, so shuffle up pages before releasing.
*/
static void shmem_deswap_pagevec(struct pagevec *pvec)
{
int i, j;
for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
if (!radix_tree_exceptional_entry(page))
pvec->pages[j++] = page;
}
pvec->nr = j;
}
/*
* SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
*/
void shmem_unlock_mapping(struct address_space *mapping)
{
struct pagevec pvec;
pgoff_t indices[PAGEVEC_SIZE];
pgoff_t index = 0;
pagevec_init(&pvec, 0);
/*
* Minor point, but we might as well stop if someone else SHM_LOCKs it.
*/
while (!mapping_unevictable(mapping)) {
/*
* Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
* has finished, if it hits a row of PAGEVEC_SIZE swap entries.
*/
pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
PAGEVEC_SIZE, pvec.pages, indices);
if (!pvec.nr)
break;
index = indices[pvec.nr - 1] + 1;
shmem_deswap_pagevec(&pvec);
check_move_unevictable_pages(pvec.pages, pvec.nr);
pagevec_release(&pvec);
cond_resched();
}
}
/*
* Remove range of pages and swap entries from radix tree, and free them.
* If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
*/
static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
bool unfalloc)
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info = SHMEM_I(inode);
pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
struct pagevec pvec;
pgoff_t indices[PAGEVEC_SIZE];
long nr_swaps_freed = 0;
pgoff_t index;
int i;
if (lend == -1)
end = -1; /* unsigned, so actually very big */
pagevec_init(&pvec, 0);
index = start;
while (index < end) {
pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
pvec.pages, indices);
if (!pvec.nr)
break;
mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
index = indices[i];
if (index >= end)
break;
if (radix_tree_exceptional_entry(page)) {
if (unfalloc)
continue;
nr_swaps_freed += !shmem_free_swap(mapping,
index, page);
continue;
}
if (!trylock_page(page))
continue;
if (!unfalloc || !PageUptodate(page)) {
if (page->mapping == mapping) {
VM_BUG_ON(PageWriteback(page));
truncate_inode_page(mapping, page);
}
}
unlock_page(page);
}
shmem_deswap_pagevec(&pvec);
pagevec_release(&pvec);
mem_cgroup_uncharge_end();
cond_resched();
index++;
}
if (partial_start) {
struct page *page = NULL;
shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
if (page) {
unsigned int top = PAGE_CACHE_SIZE;
if (start > end) {
top = partial_end;
partial_end = 0;
}
zero_user_segment(page, partial_start, top);
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
}
}
if (partial_end) {
struct page *page = NULL;
shmem_getpage(inode, end, &page, SGP_READ, NULL);
if (page) {
zero_user_segment(page, 0, partial_end);
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
}
}
if (start >= end)
return;
index = start;
for ( ; ; ) {
cond_resched();
pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
pvec.pages, indices);
if (!pvec.nr) {
if (index == start || unfalloc)
break;
index = start;
continue;
}
if ((index == start || unfalloc) && indices[0] >= end) {
shmem_deswap_pagevec(&pvec);
pagevec_release(&pvec);
break;
}
mem_cgroup_uncharge_start();
for (i = 0; i < pagevec_count(&pvec); i++) {
struct page *page = pvec.pages[i];
index = indices[i];
if (index >= end)
break;
if (radix_tree_exceptional_entry(page)) {
if (unfalloc)
continue;
nr_swaps_freed += !shmem_free_swap(mapping,
index, page);
continue;
}
lock_page(page);
if (!unfalloc || !PageUptodate(page)) {
if (page->mapping == mapping) {
VM_BUG_ON(PageWriteback(page));
truncate_inode_page(mapping, page);
}
}
unlock_page(page);
}
shmem_deswap_pagevec(&pvec);
pagevec_release(&pvec);
mem_cgroup_uncharge_end();
index++;
}
spin_lock(&info->lock);
info->swapped -= nr_swaps_freed;
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
}
void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
{
shmem_undo_range(inode, lstart, lend, false);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
}
EXPORT_SYMBOL_GPL(shmem_truncate_range);
static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
int error;
error = inode_change_ok(inode, attr);
if (error)
return error;
if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
loff_t oldsize = inode->i_size;
loff_t newsize = attr->ia_size;
if (newsize != oldsize) {
i_size_write(inode, newsize);
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
}
if (newsize < oldsize) {
loff_t holebegin = round_up(newsize, PAGE_SIZE);
unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
shmem_truncate_range(inode, newsize, (loff_t)-1);
/* unmap again to remove racily COWed private pages */
unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
}
}
setattr_copy(inode, attr);
#ifdef CONFIG_TMPFS_POSIX_ACL
if (attr->ia_valid & ATTR_MODE)
error = generic_acl_chmod(inode);
#endif
return error;
}
static void shmem_evict_inode(struct inode *inode)
{
struct shmem_inode_info *info = SHMEM_I(inode);
if (inode->i_mapping->a_ops == &shmem_aops) {
shmem_unacct_size(info->flags, inode->i_size);
inode->i_size = 0;
shmem_truncate_range(inode, 0, (loff_t)-1);
if (!list_empty(&info->swaplist)) {
mutex_lock(&shmem_swaplist_mutex);
list_del_init(&info->swaplist);
mutex_unlock(&shmem_swaplist_mutex);
}
} else
kfree(info->symlink);
simple_xattrs_free(&info->xattrs);
WARN_ON(inode->i_blocks);
shmem_free_inode(inode->i_sb);
clear_inode(inode);
}
/*
* If swap found in inode, free it and move page from swapcache to filecache.
*/
static int shmem_unuse_inode(struct shmem_inode_info *info,
swp_entry_t swap, struct page **pagep)
{
struct address_space *mapping = info->vfs_inode.i_mapping;
void *radswap;
pgoff_t index;
gfp_t gfp;
int error = 0;
radswap = swp_to_radix_entry(swap);
index = radix_tree_locate_item(&mapping->page_tree, radswap);
if (index == -1)
return 0;
/*
* Move _head_ to start search for next from here.
* But be careful: shmem_evict_inode checks list_empty without taking
* mutex, and there's an instant in list_move_tail when info->swaplist
* would appear empty, if it were the only one on shmem_swaplist.
*/
if (shmem_swaplist.next != &info->swaplist)
list_move_tail(&shmem_swaplist, &info->swaplist);
gfp = mapping_gfp_mask(mapping);
if (shmem_should_replace_page(*pagep, gfp)) {
mutex_unlock(&shmem_swaplist_mutex);
error = shmem_replace_page(pagep, gfp, info, index);
mutex_lock(&shmem_swaplist_mutex);
/*
* We needed to drop mutex to make that restrictive page
* allocation, but the inode might have been freed while we
* dropped it: although a racing shmem_evict_inode() cannot
* complete without emptying the radix_tree, our page lock
* on this swapcache page is not enough to prevent that -
* free_swap_and_cache() of our swap entry will only
* trylock_page(), removing swap from radix_tree whatever.
*
* We must not proceed to shmem_add_to_page_cache() if the
* inode has been freed, but of course we cannot rely on
* inode or mapping or info to check that. However, we can
* safely check if our swap entry is still in use (and here
* it can't have got reused for another page): if it's still
* in use, then the inode cannot have been freed yet, and we
* can safely proceed (if it's no longer in use, that tells
* nothing about the inode, but we don't need to unuse swap).
*/
if (!page_swapcount(*pagep))
error = -ENOENT;
}
/*
* We rely on shmem_swaplist_mutex, not only to protect the swaplist,
* but also to hold up shmem_evict_inode(): so inode cannot be freed
* beneath us (pagelock doesn't help until the page is in pagecache).
*/
if (!error)
error = shmem_add_to_page_cache(*pagep, mapping, index,
GFP_NOWAIT, radswap);
if (error != -ENOMEM) {
/*
* Truncation and eviction use free_swap_and_cache(), which
* only does trylock page: if we raced, best clean up here.
*/
delete_from_swap_cache(*pagep);
set_page_dirty(*pagep);
if (!error) {
spin_lock(&info->lock);
info->swapped--;
spin_unlock(&info->lock);
swap_free(swap);
}
error = 1; /* not an error, but entry was found */
}
return error;
}
/*
* Search through swapped inodes to find and replace swap by page.
*/
int shmem_unuse(swp_entry_t swap, struct page *page)
{
struct list_head *this, *next;
struct shmem_inode_info *info;
int found = 0;
int error = 0;
/*
* There's a faint possibility that swap page was replaced before
* caller locked it: caller will come back later with the right page.
*/
if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
goto out;
/*
* Charge page using GFP_KERNEL while we can wait, before taking
* the shmem_swaplist_mutex which might hold up shmem_writepage().
* Charged back to the user (not to caller) when swap account is used.
*/
error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
if (error)
goto out;
/* No radix_tree_preload: swap entry keeps a place for page in tree */
mutex_lock(&shmem_swaplist_mutex);
list_for_each_safe(this, next, &shmem_swaplist) {
info = list_entry(this, struct shmem_inode_info, swaplist);
if (info->swapped)
found = shmem_unuse_inode(info, swap, &page);
else
list_del_init(&info->swaplist);
cond_resched();
if (found)
break;
}
mutex_unlock(&shmem_swaplist_mutex);
if (found < 0)
error = found;
out:
unlock_page(page);
page_cache_release(page);
return error;
}
/*
* Move the page from the page cache to the swap cache.
*/
static int shmem_writepage(struct page *page, struct writeback_control *wbc)
{
struct shmem_inode_info *info;
struct address_space *mapping;
struct inode *inode;
swp_entry_t swap;
pgoff_t index;
BUG_ON(!PageLocked(page));
mapping = page->mapping;
index = page->index;
inode = mapping->host;
info = SHMEM_I(inode);
if (info->flags & VM_LOCKED)
goto redirty;
if (!total_swap_pages)
goto redirty;
/*
* shmem_backing_dev_info's capabilities prevent regular writeback or
* sync from ever calling shmem_writepage; but a stacking filesystem
* might use ->writepage of its underlying filesystem, in which case
* tmpfs should write out to swap only in response to memory pressure,
* and not for the writeback threads or sync.
*/
if (!wbc->for_reclaim) {
WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
goto redirty;
}
/*
* This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
* value into swapfile.c, the only way we can correctly account for a
* fallocated page arriving here is now to initialize it and write it.
*
* That's okay for a page already fallocated earlier, but if we have
* not yet completed the fallocation, then (a) we want to keep track
* of this page in case we have to undo it, and (b) it may not be a
* good idea to continue anyway, once we're pushing into swap. So
* reactivate the page, and let shmem_fallocate() quit when too many.
*/
if (!PageUptodate(page)) {
if (inode->i_private) {
struct shmem_falloc *shmem_falloc;
spin_lock(&inode->i_lock);
shmem_falloc = inode->i_private;
if (shmem_falloc &&
index >= shmem_falloc->start &&
index < shmem_falloc->next)
shmem_falloc->nr_unswapped++;
else
shmem_falloc = NULL;
spin_unlock(&inode->i_lock);
if (shmem_falloc)
goto redirty;
}
clear_highpage(page);
flush_dcache_page(page);
SetPageUptodate(page);
}
swap = get_swap_page();
if (!swap.val)
goto redirty;
/*
* Add inode to shmem_unuse()'s list of swapped-out inodes,
* if it's not already there. Do it now before the page is
* moved to swap cache, when its pagelock no longer protects
* the inode from eviction. But don't unlock the mutex until
* we've incremented swapped, because shmem_unuse_inode() will
* prune a !swapped inode from the swaplist under this mutex.
*/
mutex_lock(&shmem_swaplist_mutex);
if (list_empty(&info->swaplist))
list_add_tail(&info->swaplist, &shmem_swaplist);
if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
swap_shmem_alloc(swap);
shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
spin_lock(&info->lock);
info->swapped++;
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
mutex_unlock(&shmem_swaplist_mutex);
BUG_ON(page_mapped(page));
swap_writepage(page, wbc);
return 0;
}
mutex_unlock(&shmem_swaplist_mutex);
swapcache_free(swap, NULL);
redirty:
set_page_dirty(page);
if (wbc->for_reclaim)
return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
unlock_page(page);
return 0;
}
#ifdef CONFIG_NUMA
#ifdef CONFIG_TMPFS
static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
{
char buffer[64];
if (!mpol || mpol->mode == MPOL_DEFAULT)
return; /* show nothing */
mpol_to_str(buffer, sizeof(buffer), mpol);
seq_printf(seq, ",mpol=%s", buffer);
}
static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
{
struct mempolicy *mpol = NULL;
if (sbinfo->mpol) {
spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
mpol = sbinfo->mpol;
mpol_get(mpol);
spin_unlock(&sbinfo->stat_lock);
}
return mpol;
}
#endif /* CONFIG_TMPFS */
static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
struct vm_area_struct pvma;
struct page *page;
/* Create a pseudo vma that just contains the policy */
pvma.vm_start = 0;
/* Bias interleave by inode number to distribute better across nodes */
pvma.vm_pgoff = index + info->vfs_inode.i_ino;
pvma.vm_ops = NULL;
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
page = swapin_readahead(swap, gfp, &pvma, 0);
/* Drop reference taken by mpol_shared_policy_lookup() */
mpol_cond_put(pvma.vm_policy);
return page;
}
static struct page *shmem_alloc_page(gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
struct vm_area_struct pvma;
struct page *page;
/* Create a pseudo vma that just contains the policy */
pvma.vm_start = 0;
/* Bias interleave by inode number to distribute better across nodes */
pvma.vm_pgoff = index + info->vfs_inode.i_ino;
pvma.vm_ops = NULL;
pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
page = alloc_page_vma(gfp, &pvma, 0);
/* Drop reference taken by mpol_shared_policy_lookup() */
mpol_cond_put(pvma.vm_policy);
return page;
}
#else /* !CONFIG_NUMA */
#ifdef CONFIG_TMPFS
static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
{
}
#endif /* CONFIG_TMPFS */
static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
return swapin_readahead(swap, gfp, NULL, 0);
}
static inline struct page *shmem_alloc_page(gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
return alloc_page(gfp);
}
#endif /* CONFIG_NUMA */
#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
{
return NULL;
}
#endif
/*
* When a page is moved from swapcache to shmem filecache (either by the
* usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
* shmem_unuse_inode()), it may have been read in earlier from swap, in
* ignorance of the mapping it belongs to. If that mapping has special
* constraints (like the gma500 GEM driver, which requires RAM below 4GB),
* we may need to copy to a suitable page before moving to filecache.
*
* In a future release, this may well be extended to respect cpuset and
* NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
* but for now it is a simple matter of zone.
*/
static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
{
return page_zonenum(page) > gfp_zone(gfp);
}
static int shmem_replace_page(struct page **pagep, gfp_t gfp,
struct shmem_inode_info *info, pgoff_t index)
{
struct page *oldpage, *newpage;
struct address_space *swap_mapping;
pgoff_t swap_index;
int error;
oldpage = *pagep;
swap_index = page_private(oldpage);
swap_mapping = page_mapping(oldpage);
/*
* We have arrived here because our zones are constrained, so don't
* limit chance of success by further cpuset and node constraints.
*/
gfp &= ~GFP_CONSTRAINT_MASK;
newpage = shmem_alloc_page(gfp, info, index);
if (!newpage)
return -ENOMEM;
page_cache_get(newpage);
copy_highpage(newpage, oldpage);
flush_dcache_page(newpage);
__set_page_locked(newpage);
SetPageUptodate(newpage);
SetPageSwapBacked(newpage);
set_page_private(newpage, swap_index);
SetPageSwapCache(newpage);
/*
* Our caller will very soon move newpage out of swapcache, but it's
* a nice clean interface for us to replace oldpage by newpage there.
*/
spin_lock_irq(&swap_mapping->tree_lock);
error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
newpage);
if (!error) {
__inc_zone_page_state(newpage, NR_FILE_PAGES);
__dec_zone_page_state(oldpage, NR_FILE_PAGES);
}
spin_unlock_irq(&swap_mapping->tree_lock);
if (unlikely(error)) {
/*
* Is this possible? I think not, now that our callers check
* both PageSwapCache and page_private after getting page lock;
* but be defensive. Reverse old to newpage for clear and free.
*/
oldpage = newpage;
} else {
mem_cgroup_replace_page_cache(oldpage, newpage);
lru_cache_add_anon(newpage);
*pagep = newpage;
}
ClearPageSwapCache(oldpage);
set_page_private(oldpage, 0);
unlock_page(oldpage);
page_cache_release(oldpage);
page_cache_release(oldpage);
return error;
}
/*
* shmem_getpage_gfp - find page in cache, or get from swap, or allocate
*
* If we allocate a new one we do not mark it dirty. That's up to the
* vm. If we swap it in we mark it dirty since we also free the swap
* entry since a page cannot live in both the swap and page cache
*/
static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
{
struct address_space *mapping = inode->i_mapping;
struct shmem_inode_info *info;
struct shmem_sb_info *sbinfo;
struct page *page;
swp_entry_t swap;
int error;
int once = 0;
int alloced = 0;
if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
return -EFBIG;
repeat:
swap.val = 0;
page = find_lock_page(mapping, index);
if (radix_tree_exceptional_entry(page)) {
swap = radix_to_swp_entry(page);
page = NULL;
}
if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
error = -EINVAL;
goto failed;
}
/* fallocated page? */
if (page && !PageUptodate(page)) {
if (sgp != SGP_READ)
goto clear;
unlock_page(page);
page_cache_release(page);
page = NULL;
}
if (page || (sgp == SGP_READ && !swap.val)) {
*pagep = page;
return 0;
}
/*
* Fast cache lookup did not find it:
* bring it back from swap or allocate.
*/
info = SHMEM_I(inode);
sbinfo = SHMEM_SB(inode->i_sb);
if (swap.val) {
/* Look it up and read it in.. */
page = lookup_swap_cache(swap);
if (!page) {
/* here we actually do the io */
if (fault_type)
*fault_type |= VM_FAULT_MAJOR;
page = shmem_swapin(swap, gfp, info, index);
if (!page) {
error = -ENOMEM;
goto failed;
}
}
/* We have to do this with page locked to prevent races */
lock_page(page);
if (!PageSwapCache(page) || page_private(page) != swap.val ||
!shmem_confirm_swap(mapping, index, swap)) {
error = -EEXIST; /* try again */
goto unlock;
}
if (!PageUptodate(page)) {
error = -EIO;
goto failed;
}
wait_on_page_writeback(page);
if (shmem_should_replace_page(page, gfp)) {
error = shmem_replace_page(&page, gfp, info, index);
if (error)
goto failed;
}
error = mem_cgroup_cache_charge(page, current->mm,
gfp & GFP_RECLAIM_MASK);
if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
gfp, swp_to_radix_entry(swap));
/*
* We already confirmed swap under page lock, and make
* no memory allocation here, so usually no possibility
* of error; but free_swap_and_cache() only trylocks a
* page, so it is just possible that the entry has been
* truncated or holepunched since swap was confirmed.
* shmem_undo_range() will have done some of the
* unaccounting, now delete_from_swap_cache() will do
* the rest (including mem_cgroup_uncharge_swapcache).
* Reset swap.val? No, leave it so "failed" goes back to
* "repeat": reading a hole and writing should succeed.
*/
if (error)
delete_from_swap_cache(page);
}
if (error)
goto failed;
spin_lock(&info->lock);
info->swapped--;
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
delete_from_swap_cache(page);
set_page_dirty(page);
swap_free(swap);
} else {
if (shmem_acct_block(info->flags)) {
error = -ENOSPC;
goto failed;
}
if (sbinfo->max_blocks) {
if (percpu_counter_compare(&sbinfo->used_blocks,
sbinfo->max_blocks) >= 0) {
error = -ENOSPC;
goto unacct;
}
percpu_counter_inc(&sbinfo->used_blocks);
}
page = shmem_alloc_page(gfp, info, index);
if (!page) {
error = -ENOMEM;
goto decused;
}
SetPageSwapBacked(page);
__set_page_locked(page);
error = mem_cgroup_cache_charge(page, current->mm,
gfp & GFP_RECLAIM_MASK);
if (error)
goto decused;
error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
if (!error) {
error = shmem_add_to_page_cache(page, mapping, index,
gfp, NULL);
radix_tree_preload_end();
}
if (error) {
mem_cgroup_uncharge_cache_page(page);
goto decused;
}
lru_cache_add_anon(page);
spin_lock(&info->lock);
info->alloced++;
inode->i_blocks += BLOCKS_PER_PAGE;
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
alloced = true;
/*
* Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
*/
if (sgp == SGP_FALLOC)
sgp = SGP_WRITE;
clear:
/*
* Let SGP_WRITE caller clear ends if write does not fill page;
* but SGP_FALLOC on a page fallocated earlier must initialize
* it now, lest undo on failure cancel our earlier guarantee.
*/
if (sgp != SGP_WRITE) {
clear_highpage(page);
flush_dcache_page(page);
SetPageUptodate(page);
}
if (sgp == SGP_DIRTY)
set_page_dirty(page);
}
/* Perhaps the file has been truncated since we checked */
if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
error = -EINVAL;
if (alloced)
goto trunc;
else
goto failed;
}
*pagep = page;
return 0;
/*
* Error recovery.
*/
trunc:
info = SHMEM_I(inode);
ClearPageDirty(page);
delete_from_page_cache(page);
spin_lock(&info->lock);
info->alloced--;
inode->i_blocks -= BLOCKS_PER_PAGE;
spin_unlock(&info->lock);
decused:
sbinfo = SHMEM_SB(inode->i_sb);
if (sbinfo->max_blocks)
percpu_counter_add(&sbinfo->used_blocks, -1);
unacct:
shmem_unacct_blocks(info->flags, 1);
failed:
if (swap.val && error != -EINVAL &&
!shmem_confirm_swap(mapping, index, swap))
error = -EEXIST;
unlock:
if (page) {
unlock_page(page);
page_cache_release(page);
}
if (error == -ENOSPC && !once++) {
info = SHMEM_I(inode);
spin_lock(&info->lock);
shmem_recalc_inode(inode);
spin_unlock(&info->lock);
goto repeat;
}
if (error == -EEXIST) /* from above or from radix_tree_insert */
goto repeat;
return error;
}
static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
int error;
int ret = VM_FAULT_LOCKED;
error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
if (error)
return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
if (ret & VM_FAULT_MAJOR) {
count_vm_event(PGMAJFAULT);
mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
}
return ret;
}
#ifdef CONFIG_NUMA
static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
{
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
}
static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
unsigned long addr)
{
struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
pgoff_t index;
index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
}
#endif
int shmem_lock(struct file *file, int lock, struct user_struct *user)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct shmem_inode_info *info = SHMEM_I(inode);
int retval = -ENOMEM;
spin_lock(&info->lock);
if (lock && !(info->flags & VM_LOCKED)) {
if (!user_shm_lock(inode->i_size, user))
goto out_nomem;
info->flags |= VM_LOCKED;
mapping_set_unevictable(file->f_mapping);
}
if (!lock && (info->flags & VM_LOCKED) && user) {
user_shm_unlock(inode->i_size, user);
info->flags &= ~VM_LOCKED;
mapping_clear_unevictable(file->f_mapping);
}
retval = 0;
out_nomem:
spin_unlock(&info->lock);
return retval;
}
static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
{
file_accessed(file);
vma->vm_ops = &shmem_vm_ops;
return 0;
}
static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
umode_t mode, dev_t dev, unsigned long flags)
{
struct inode *inode;
struct shmem_inode_info *info;
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
if (shmem_reserve_inode(sb))
return NULL;
inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
inode->i_blocks = 0;
inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_generation = get_seconds();
info = SHMEM_I(inode);
memset(info, 0, (char *)inode - (char *)info);
spin_lock_init(&info->lock);
info->flags = flags & VM_NORESERVE;
INIT_LIST_HEAD(&info->swaplist);
simple_xattrs_init(&info->xattrs);
cache_no_acl(inode);
switch (mode & S_IFMT) {
default:
inode->i_op = &shmem_special_inode_operations;
init_special_inode(inode, mode, dev);
break;
case S_IFREG:
inode->i_mapping->a_ops = &shmem_aops;
inode->i_op = &shmem_inode_operations;
inode->i_fop = &shmem_file_operations;
mpol_shared_policy_init(&info->policy,
shmem_get_sbmpol(sbinfo));
break;
case S_IFDIR:
inc_nlink(inode);
/* Some things misbehave if size == 0 on a directory */
inode->i_size = 2 * BOGO_DIRENT_SIZE;
inode->i_op = &shmem_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
break;
case S_IFLNK:
/*
* Must not load anything in the rbtree,
* mpol_free_shared_policy will not be called.
*/
mpol_shared_policy_init(&info->policy, NULL);
break;
}
} else
shmem_free_inode(sb);
return inode;
}
#ifdef CONFIG_TMPFS
static const struct inode_operations shmem_symlink_inode_operations;
static const struct inode_operations shmem_short_symlink_operations;
#ifdef CONFIG_TMPFS_XATTR
static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
#else
#define shmem_initxattrs NULL
#endif
static int
shmem_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
struct inode *inode = mapping->host;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
}
static int
shmem_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
struct inode *inode = mapping->host;
if (pos + copied > inode->i_size)
i_size_write(inode, pos + copied);
if (!PageUptodate(page)) {
if (copied < PAGE_CACHE_SIZE) {
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
zero_user_segments(page, 0, from,
from + copied, PAGE_CACHE_SIZE);
}
SetPageUptodate(page);
}
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
return copied;
}
static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
{
struct inode *inode = filp->f_path.dentry->d_inode;
struct address_space *mapping = inode->i_mapping;
pgoff_t index;
unsigned long offset;
enum sgp_type sgp = SGP_READ;
/*
* Might this read be for a stacking filesystem? Then when reading
* holes of a sparse file, we actually need to allocate those pages,
* and even mark them dirty, so it cannot exceed the max_blocks limit.
*/
if (segment_eq(get_fs(), KERNEL_DS))
sgp = SGP_DIRTY;
index = *ppos >> PAGE_CACHE_SHIFT;
offset = *ppos & ~PAGE_CACHE_MASK;
for (;;) {
struct page *page = NULL;
pgoff_t end_index;
unsigned long nr, ret;
loff_t i_size = i_size_read(inode);
end_index = i_size >> PAGE_CACHE_SHIFT;
if (index > end_index)
break;
if (index == end_index) {
nr = i_size & ~PAGE_CACHE_MASK;
if (nr <= offset)
break;
}
desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
if (desc->error) {
if (desc->error == -EINVAL)
desc->error = 0;
break;
}
if (page)
unlock_page(page);
/*
* We must evaluate after, since reads (unlike writes)
* are called without i_mutex protection against truncate
*/
nr = PAGE_CACHE_SIZE;
i_size = i_size_read(inode);
end_index = i_size >> PAGE_CACHE_SHIFT;
if (index == end_index) {
nr = i_size & ~PAGE_CACHE_MASK;
if (nr <= offset) {
if (page)
page_cache_release(page);
break;
}
}
nr -= offset;
if (page) {
/*
* If users can be writing to this page using arbitrary
* virtual addresses, take care about potential aliasing
* before reading the page on the kernel side.
*/
if (mapping_writably_mapped(mapping))
flush_dcache_page(page);
/*
* Mark the page accessed if we read the beginning.
*/
if (!offset)
mark_page_accessed(page);
} else {
page = ZERO_PAGE(0);
page_cache_get(page);
}
/*
* Ok, we have the page, and it's up-to-date, so
* now we can copy it to user space...
*
* The actor routine returns how many bytes were actually used..
* NOTE! This may not be the same as how much of a user buffer
* we filled up (we may be padding etc), so we can only update
* "pos" here (the actor routine has to update the user buffer
* pointers and the remaining count).
*/
ret = actor(desc, page, offset, nr);
offset += ret;
index += offset >> PAGE_CACHE_SHIFT;
offset &= ~PAGE_CACHE_MASK;
page_cache_release(page);
if (ret != nr || !desc->count)
break;
cond_resched();
}
*ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
file_accessed(filp);
}
static ssize_t shmem_file_aio_read(struct kiocb *iocb,
const struct iovec *iov, unsigned long nr_segs, loff_t pos)
{
struct file *filp = iocb->ki_filp;
ssize_t retval;
unsigned long seg;
size_t count;
loff_t *ppos = &iocb->ki_pos;
retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
if (retval)
return retval;
for (seg = 0; seg < nr_segs; seg++) {
read_descriptor_t desc;
desc.written = 0;
desc.arg.buf = iov[seg].iov_base;
desc.count = iov[seg].iov_len;
if (desc.count == 0)
continue;
desc.error = 0;
do_shmem_file_read(filp, ppos, &desc, file_read_actor);
retval += desc.written;
if (desc.error) {
retval = retval ?: desc.error;
break;
}
if (desc.count > 0)
break;
}
return retval;
}
static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags)
{
struct address_space *mapping = in->f_mapping;
struct inode *inode = mapping->host;
unsigned int loff, nr_pages, req_pages;
struct page *pages[PIPE_DEF_BUFFERS];
struct partial_page partial[PIPE_DEF_BUFFERS];
struct page *page;
pgoff_t index, end_index;
loff_t isize, left;
int error, page_nr;
struct splice_pipe_desc spd = {
.pages = pages,
.partial = partial,
.nr_pages_max = PIPE_DEF_BUFFERS,
.flags = flags,
.ops = &page_cache_pipe_buf_ops,
.spd_release = spd_release_page,
};
isize = i_size_read(inode);
if (unlikely(*ppos >= isize))
return 0;
left = isize - *ppos;
if (unlikely(left < len))
len = left;
if (splice_grow_spd(pipe, &spd))
return -ENOMEM;
index = *ppos >> PAGE_CACHE_SHIFT;
loff = *ppos & ~PAGE_CACHE_MASK;
req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
nr_pages = min(req_pages, pipe->buffers);
spd.nr_pages = find_get_pages_contig(mapping, index,
nr_pages, spd.pages);
index += spd.nr_pages;
error = 0;
while (spd.nr_pages < nr_pages) {
error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
if (error)
break;
unlock_page(page);
spd.pages[spd.nr_pages++] = page;
index++;
}
index = *ppos >> PAGE_CACHE_SHIFT;
nr_pages = spd.nr_pages;
spd.nr_pages = 0;
for (page_nr = 0; page_nr < nr_pages; page_nr++) {
unsigned int this_len;
if (!len)
break;
this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
page = spd.pages[page_nr];
if (!PageUptodate(page) || page->mapping != mapping) {
error = shmem_getpage(inode, index, &page,
SGP_CACHE, NULL);
if (error)
break;
unlock_page(page);
page_cache_release(spd.pages[page_nr]);
spd.pages[page_nr] = page;
}
isize = i_size_read(inode);
end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
if (unlikely(!isize || index > end_index))
break;
if (end_index == index) {
unsigned int plen;
plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
if (plen <= loff)
break;
this_len = min(this_len, plen - loff);
len = this_len;
}
spd.partial[page_nr].offset = loff;
spd.partial[page_nr].len = this_len;
len -= this_len;
loff = 0;
spd.nr_pages++;
index++;
}
while (page_nr < nr_pages)
page_cache_release(spd.pages[page_nr++]);
if (spd.nr_pages)
error = splice_to_pipe(pipe, &spd);
splice_shrink_spd(&spd);
if (error > 0) {
*ppos += error;
file_accessed(in);
}
return error;
}
/*
* llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
*/
static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
pgoff_t index, pgoff_t end, int whence)
{
struct page *page;
struct pagevec pvec;
pgoff_t indices[PAGEVEC_SIZE];
bool done = false;
int i;
pagevec_init(&pvec, 0);
pvec.nr = 1; /* start small: we may be there already */
while (!done) {
pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
pvec.nr, pvec.pages, indices);
if (!pvec.nr) {
if (whence == SEEK_DATA)
index = end;
break;
}
for (i = 0; i < pvec.nr; i++, index++) {
if (index < indices[i]) {
if (whence == SEEK_HOLE) {
done = true;
break;
}
index = indices[i];
}
page = pvec.pages[i];
if (page && !radix_tree_exceptional_entry(page)) {
if (!PageUptodate(page))
page = NULL;
}
if (index >= end ||
(page && whence == SEEK_DATA) ||
(!page && whence == SEEK_HOLE)) {
done = true;
break;
}
}
shmem_deswap_pagevec(&pvec);
pagevec_release(&pvec);
pvec.nr = PAGEVEC_SIZE;
cond_resched();
}
return index;
}
static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
{
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
pgoff_t start, end;
loff_t new_offset;
if (whence != SEEK_DATA && whence != SEEK_HOLE)
return generic_file_llseek_size(file, offset, whence,
MAX_LFS_FILESIZE, i_size_read(inode));
mutex_lock(&inode->i_mutex);
/* We're holding i_mutex so we can access i_size directly */
if (offset < 0)
offset = -EINVAL;
else if (offset >= inode->i_size)
offset = -ENXIO;
else {
start = offset >> PAGE_CACHE_SHIFT;
end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
new_offset = shmem_seek_hole_data(mapping, start, end, whence);
new_offset <<= PAGE_CACHE_SHIFT;
if (new_offset > offset) {
if (new_offset < inode->i_size)
offset = new_offset;
else if (whence == SEEK_DATA)
offset = -ENXIO;
else
offset = inode->i_size;
}
}
if (offset >= 0 && offset != file->f_pos) {
file->f_pos = offset;
file->f_version = 0;
}
mutex_unlock(&inode->i_mutex);
return offset;
}
static long shmem_fallocate(struct file *file, int mode, loff_t offset,
loff_t len)
{
struct inode *inode = file->f_path.dentry->d_inode;
struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
struct shmem_falloc shmem_falloc;
pgoff_t start, index, end;
int error;
mutex_lock(&inode->i_mutex);
if (mode & FALLOC_FL_PUNCH_HOLE) {
struct address_space *mapping = file->f_mapping;
loff_t unmap_start = round_up(offset, PAGE_SIZE);
loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
if ((u64)unmap_end > (u64)unmap_start)
unmap_mapping_range(mapping, unmap_start,
1 + unmap_end - unmap_start, 0);
shmem_truncate_range(inode, offset, offset + len - 1);
/* No need to unmap again: hole-punching leaves COWed pages */
error = 0;
goto out;
}
/* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
error = inode_newsize_ok(inode, offset + len);
if (error)
goto out;
start = offset >> PAGE_CACHE_SHIFT;
end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
/* Try to avoid a swapstorm if len is impossible to satisfy */
if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
error = -ENOSPC;
goto out;
}
shmem_falloc.start = start;
shmem_falloc.next = start;
shmem_falloc.nr_falloced = 0;
shmem_falloc.nr_unswapped = 0;
spin_lock(&inode->i_lock);
inode->i_private = &shmem_falloc;
spin_unlock(&inode->i_lock);
for (index = start; index < end; index++) {
struct page *page;
/*
* Good, the fallocate(2) manpage permits EINTR: we may have
* been interrupted because we are using up too much memory.
*/
if (signal_pending(current))
error = -EINTR;
else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
error = -ENOMEM;
else
error = shmem_getpage(inode, index, &page, SGP_FALLOC,
NULL);
if (error) {
/* Remove the !PageUptodate pages we added */
shmem_undo_range(inode,
(loff_t)start << PAGE_CACHE_SHIFT,
(loff_t)index << PAGE_CACHE_SHIFT, true);
goto undone;
}
/*
* Inform shmem_writepage() how far we have reached.
* No need for lock or barrier: we have the page lock.
*/
shmem_falloc.next++;
if (!PageUptodate(page))
shmem_falloc.nr_falloced++;
/*
* If !PageUptodate, leave it that way so that freeable pages
* can be recognized if we need to rollback on error later.
* But set_page_dirty so that memory pressure will swap rather
* than free the pages we are allocating (and SGP_CACHE pages
* might still be clean: we now need to mark those dirty too).
*/
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
cond_resched();
}
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
i_size_write(inode, offset + len);
inode->i_ctime = CURRENT_TIME;
undone:
spin_lock(&inode->i_lock);
inode->i_private = NULL;
spin_unlock(&inode->i_lock);
out:
mutex_unlock(&inode->i_mutex);
return error;
}
static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
buf->f_type = TMPFS_MAGIC;
buf->f_bsize = PAGE_CACHE_SIZE;
buf->f_namelen = NAME_MAX;
if (sbinfo->max_blocks) {
buf->f_blocks = sbinfo->max_blocks;
buf->f_bavail =
buf->f_bfree = sbinfo->max_blocks -
percpu_counter_sum(&sbinfo->used_blocks);
}
if (sbinfo->max_inodes) {
buf->f_files = sbinfo->max_inodes;
buf->f_ffree = sbinfo->free_inodes;
}
/* else leave those fields 0 like simple_statfs */
return 0;
}
/*
* File creation. Allocate an inode, and we're done..
*/
static int
shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
struct inode *inode;
int error = -ENOSPC;
inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
if (inode) {
error = security_inode_init_security(inode, dir,
&dentry->d_name,
shmem_initxattrs, NULL);
if (error) {
if (error != -EOPNOTSUPP) {
iput(inode);
return error;
}
}
#ifdef CONFIG_TMPFS_POSIX_ACL
error = generic_acl_init(inode, dir);
if (error) {
iput(inode);
return error;
}
#else
error = 0;
#endif
dir->i_size += BOGO_DIRENT_SIZE;
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d_instantiate(dentry, inode);
dget(dentry); /* Extra count - pin the dentry in core */
}
return error;
}
static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int error;
if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
return error;
inc_nlink(dir);
return 0;
}
static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
}
/*
* Link a file..
*/
static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
struct inode *inode = old_dentry->d_inode;
int ret;
/*
* No ordinary (disk based) filesystem counts links as inodes;
* but each new link needs a new dentry, pinning lowmem, and
* tmpfs dentries cannot be pruned until they are unlinked.
*/
ret = shmem_reserve_inode(inode->i_sb);
if (ret)
goto out;
dir->i_size += BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
inc_nlink(inode);
ihold(inode); /* New dentry reference */
dget(dentry); /* Extra pinning count for the created dentry */
d_instantiate(dentry, inode);
out:
return ret;
}
static int shmem_unlink(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
shmem_free_inode(inode->i_sb);
dir->i_size -= BOGO_DIRENT_SIZE;
inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
drop_nlink(inode);
dput(dentry); /* Undo the count from "create" - this does all the work */
return 0;
}
static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
{
if (!simple_empty(dentry))
return -ENOTEMPTY;
drop_nlink(dentry->d_inode);
drop_nlink(dir);
return shmem_unlink(dir, dentry);
}
/*
* The VFS layer already does all the dentry stuff for rename,
* we just have to decrement the usage count for the target if
* it exists so that the VFS layer correctly free's it when it
* gets overwritten.
*/
static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
{
struct inode *inode = old_dentry->d_inode;
int they_are_dirs = S_ISDIR(inode->i_mode);
if (!simple_empty(new_dentry))
return -ENOTEMPTY;
if (new_dentry->d_inode) {
(void) shmem_unlink(new_dir, new_dentry);
if (they_are_dirs)
drop_nlink(old_dir);
} else if (they_are_dirs) {
drop_nlink(old_dir);
inc_nlink(new_dir);
}
old_dir->i_size -= BOGO_DIRENT_SIZE;
new_dir->i_size += BOGO_DIRENT_SIZE;
old_dir->i_ctime = old_dir->i_mtime =
new_dir->i_ctime = new_dir->i_mtime =
inode->i_ctime = CURRENT_TIME;
return 0;
}
static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
{
int error;
int len;
struct inode *inode;
struct page *page;
char *kaddr;
struct shmem_inode_info *info;
len = strlen(symname) + 1;
if (len > PAGE_CACHE_SIZE)
return -ENAMETOOLONG;
inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
if (!inode)
return -ENOSPC;
error = security_inode_init_security(inode, dir, &dentry->d_name,
shmem_initxattrs, NULL);
if (error) {
if (error != -EOPNOTSUPP) {
iput(inode);
return error;
}
error = 0;
}
info = SHMEM_I(inode);
inode->i_size = len-1;
if (len <= SHORT_SYMLINK_LEN) {
info->symlink = kmemdup(symname, len, GFP_KERNEL);
if (!info->symlink) {
iput(inode);
return -ENOMEM;
}
inode->i_op = &shmem_short_symlink_operations;
} else {
error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
if (error) {
iput(inode);
return error;
}
inode->i_mapping->a_ops = &shmem_aops;
inode->i_op = &shmem_symlink_inode_operations;
kaddr = kmap_atomic(page);
memcpy(kaddr, symname, len);
kunmap_atomic(kaddr);
SetPageUptodate(page);
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
}
dir->i_size += BOGO_DIRENT_SIZE;
dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d_instantiate(dentry, inode);
dget(dentry);
return 0;
}
static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
{
nd_set_link(nd, SHMEM_I(dentry->d_inode)->symlink);
return NULL;
}
static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct page *page = NULL;
int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
if (page)
unlock_page(page);
return page;
}
static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
{
if (!IS_ERR(nd_get_link(nd))) {
struct page *page = cookie;
kunmap(page);
mark_page_accessed(page);
page_cache_release(page);
}
}
#ifdef CONFIG_TMPFS_XATTR
/*
* Superblocks without xattr inode operations may get some security.* xattr
* support from the LSM "for free". As soon as we have any other xattrs
* like ACLs, we also need to implement the security.* handlers at
* filesystem level, though.
*/
/*
* Callback for security_inode_init_security() for acquiring xattrs.
*/
static int shmem_initxattrs(struct inode *inode,
const struct xattr *xattr_array,
void *fs_info)
{
struct shmem_inode_info *info = SHMEM_I(inode);
const struct xattr *xattr;
struct simple_xattr *new_xattr;
size_t len;
for (xattr = xattr_array; xattr->name != NULL; xattr++) {
new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
if (!new_xattr)
return -ENOMEM;
len = strlen(xattr->name) + 1;
new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
GFP_KERNEL);
if (!new_xattr->name) {
kfree(new_xattr);
return -ENOMEM;
}
memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
XATTR_SECURITY_PREFIX_LEN);
memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
xattr->name, len);
simple_xattr_list_add(&info->xattrs, new_xattr);
}
return 0;
}
static const struct xattr_handler *shmem_xattr_handlers[] = {
#ifdef CONFIG_TMPFS_POSIX_ACL
&generic_acl_access_handler,
&generic_acl_default_handler,
#endif
NULL
};
static int shmem_xattr_validate(const char *name)
{
struct { const char *prefix; size_t len; } arr[] = {
{ XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
{ XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
};
int i;
for (i = 0; i < ARRAY_SIZE(arr); i++) {
size_t preflen = arr[i].len;
if (strncmp(name, arr[i].prefix, preflen) == 0) {
if (!name[preflen])
return -EINVAL;
return 0;
}
}
return -EOPNOTSUPP;
}
static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
void *buffer, size_t size)
{
struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
int err;
/*
* If this is a request for a synthetic attribute in the system.*
* namespace use the generic infrastructure to resolve a handler
* for it via sb->s_xattr.
*/
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_getxattr(dentry, name, buffer, size);
err = shmem_xattr_validate(name);
if (err)
return err;
return simple_xattr_get(&info->xattrs, name, buffer, size);
}
static int shmem_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
int err;
/*
* If this is a request for a synthetic attribute in the system.*
* namespace use the generic infrastructure to resolve a handler
* for it via sb->s_xattr.
*/
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_setxattr(dentry, name, value, size, flags);
err = shmem_xattr_validate(name);
if (err)
return err;
return simple_xattr_set(&info->xattrs, name, value, size, flags);
}
static int shmem_removexattr(struct dentry *dentry, const char *name)
{
struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
int err;
/*
* If this is a request for a synthetic attribute in the system.*
* namespace use the generic infrastructure to resolve a handler
* for it via sb->s_xattr.
*/
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_removexattr(dentry, name);
err = shmem_xattr_validate(name);
if (err)
return err;
return simple_xattr_remove(&info->xattrs, name);
}
static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
{
struct shmem_inode_info *info = SHMEM_I(dentry->d_inode);
return simple_xattr_list(&info->xattrs, buffer, size);
}
#endif /* CONFIG_TMPFS_XATTR */
static const struct inode_operations shmem_short_symlink_operations = {
.readlink = generic_readlink,
.follow_link = shmem_follow_short_symlink,
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.getxattr = shmem_getxattr,
.listxattr = shmem_listxattr,
.removexattr = shmem_removexattr,
#endif
};
static const struct inode_operations shmem_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = shmem_follow_link,
.put_link = shmem_put_link,
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.getxattr = shmem_getxattr,
.listxattr = shmem_listxattr,
.removexattr = shmem_removexattr,
#endif
};
static struct dentry *shmem_get_parent(struct dentry *child)
{
return ERR_PTR(-ESTALE);
}
static int shmem_match(struct inode *ino, void *vfh)
{
__u32 *fh = vfh;
__u64 inum = fh[2];
inum = (inum << 32) | fh[1];
return ino->i_ino == inum && fh[0] == ino->i_generation;
}
static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
struct fid *fid, int fh_len, int fh_type)
{
struct inode *inode;
struct dentry *dentry = NULL;
u64 inum;
if (fh_len < 3)
return NULL;
inum = fid->raw[2];
inum = (inum << 32) | fid->raw[1];
inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
shmem_match, fid->raw);
if (inode) {
dentry = d_find_alias(inode);
iput(inode);
}
return dentry;
}
static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
struct inode *parent)
{
if (*len < 3) {
*len = 3;
return 255;
}
if (inode_unhashed(inode)) {
/* Unfortunately insert_inode_hash is not idempotent,
* so as we hash inodes here rather than at creation
* time, we need a lock to ensure we only try
* to do it once
*/
static DEFINE_SPINLOCK(lock);
spin_lock(&lock);
if (inode_unhashed(inode))
__insert_inode_hash(inode,
inode->i_ino + inode->i_generation);
spin_unlock(&lock);
}
fh[0] = inode->i_generation;
fh[1] = inode->i_ino;
fh[2] = ((__u64)inode->i_ino) >> 32;
*len = 3;
return 1;
}
static const struct export_operations shmem_export_ops = {
.get_parent = shmem_get_parent,
.encode_fh = shmem_encode_fh,
.fh_to_dentry = shmem_fh_to_dentry,
};
static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
bool remount)
{
char *this_char, *value, *rest;
uid_t uid;
gid_t gid;
while (options != NULL) {
this_char = options;
for (;;) {
/*
* NUL-terminate this option: unfortunately,
* mount options form a comma-separated list,
* but mpol's nodelist may also contain commas.
*/
options = strchr(options, ',');
if (options == NULL)
break;
options++;
if (!isdigit(*options)) {
options[-1] = '\0';
break;
}
}
if (!*this_char)
continue;
if ((value = strchr(this_char,'=')) != NULL) {
*value++ = 0;
} else {
printk(KERN_ERR
"tmpfs: No value for mount option '%s'\n",
this_char);
return 1;
}
if (!strcmp(this_char,"size")) {
unsigned long long size;
size = memparse(value,&rest);
if (*rest == '%') {
size <<= PAGE_SHIFT;
size *= totalram_pages;
do_div(size, 100);
rest++;
}
if (*rest)
goto bad_val;
sbinfo->max_blocks =
DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
} else if (!strcmp(this_char,"nr_blocks")) {
sbinfo->max_blocks = memparse(value, &rest);
if (*rest)
goto bad_val;
} else if (!strcmp(this_char,"nr_inodes")) {
sbinfo->max_inodes = memparse(value, &rest);
if (*rest)
goto bad_val;
} else if (!strcmp(this_char,"mode")) {
if (remount)
continue;
sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
if (*rest)
goto bad_val;
} else if (!strcmp(this_char,"uid")) {
if (remount)
continue;
uid = simple_strtoul(value, &rest, 0);
if (*rest)
goto bad_val;
sbinfo->uid = make_kuid(current_user_ns(), uid);
if (!uid_valid(sbinfo->uid))
goto bad_val;
} else if (!strcmp(this_char,"gid")) {
if (remount)
continue;
gid = simple_strtoul(value, &rest, 0);
if (*rest)
goto bad_val;
sbinfo->gid = make_kgid(current_user_ns(), gid);
if (!gid_valid(sbinfo->gid))
goto bad_val;
} else if (!strcmp(this_char,"mpol")) {
if (mpol_parse_str(value, &sbinfo->mpol))
goto bad_val;
} else {
printk(KERN_ERR "tmpfs: Bad mount option %s\n",
this_char);
return 1;
}
}
return 0;
bad_val:
printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
value, this_char);
return 1;
}
static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
struct shmem_sb_info config = *sbinfo;
unsigned long inodes;
int error = -EINVAL;
config.mpol = NULL;
if (shmem_parse_options(data, &config, true))
return error;
spin_lock(&sbinfo->stat_lock);
inodes = sbinfo->max_inodes - sbinfo->free_inodes;
if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
goto out;
if (config.max_inodes < inodes)
goto out;
/*
* Those tests disallow limited->unlimited while any are in use;
* but we must separately disallow unlimited->limited, because
* in that case we have no record of how much is already in use.
*/
if (config.max_blocks && !sbinfo->max_blocks)
goto out;
if (config.max_inodes && !sbinfo->max_inodes)
goto out;
error = 0;
sbinfo->max_blocks = config.max_blocks;
sbinfo->max_inodes = config.max_inodes;
sbinfo->free_inodes = config.max_inodes - inodes;
/*
* Preserve previous mempolicy unless mpol remount option was specified.
*/
if (config.mpol) {
mpol_put(sbinfo->mpol);
sbinfo->mpol = config.mpol; /* transfers initial ref */
}
out:
spin_unlock(&sbinfo->stat_lock);
return error;
}
static int shmem_show_options(struct seq_file *seq, struct dentry *root)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
if (sbinfo->max_blocks != shmem_default_max_blocks())
seq_printf(seq, ",size=%luk",
sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
if (sbinfo->max_inodes != shmem_default_max_inodes())
seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
seq_printf(seq, ",mode=%03ho", sbinfo->mode);
if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
seq_printf(seq, ",uid=%u",
from_kuid_munged(&init_user_ns, sbinfo->uid));
if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
seq_printf(seq, ",gid=%u",
from_kgid_munged(&init_user_ns, sbinfo->gid));
shmem_show_mpol(seq, sbinfo->mpol);
return 0;
}
#endif /* CONFIG_TMPFS */
static void shmem_put_super(struct super_block *sb)
{
struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
percpu_counter_destroy(&sbinfo->used_blocks);
kfree(sbinfo);
sb->s_fs_info = NULL;
}
int shmem_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *inode;
struct shmem_sb_info *sbinfo;
int err = -ENOMEM;
/* Round up to L1_CACHE_BYTES to resist false sharing */
sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
L1_CACHE_BYTES), GFP_KERNEL);
if (!sbinfo)
return -ENOMEM;
sbinfo->mode = S_IRWXUGO | S_ISVTX;
sbinfo->uid = current_fsuid();
sbinfo->gid = current_fsgid();
sb->s_fs_info = sbinfo;
#ifdef CONFIG_TMPFS
/*
* Per default we only allow half of the physical ram per
* tmpfs instance, limiting inodes to one per page of lowmem;
* but the internal instance is left unlimited.
*/
if (!(sb->s_flags & MS_NOUSER)) {
sbinfo->max_blocks = shmem_default_max_blocks();
sbinfo->max_inodes = shmem_default_max_inodes();
if (shmem_parse_options(data, sbinfo, false)) {
err = -EINVAL;
goto failed;
}
}
sb->s_export_op = &shmem_export_ops;
sb->s_flags |= MS_NOSEC;
#else
sb->s_flags |= MS_NOUSER;
#endif
spin_lock_init(&sbinfo->stat_lock);
if (percpu_counter_init(&sbinfo->used_blocks, 0))
goto failed;
sbinfo->free_inodes = sbinfo->max_inodes;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = TMPFS_MAGIC;
sb->s_op = &shmem_ops;
sb->s_time_gran = 1;
#ifdef CONFIG_TMPFS_XATTR
sb->s_xattr = shmem_xattr_handlers;
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
sb->s_flags |= MS_POSIXACL;
#endif
inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
if (!inode)
goto failed;
inode->i_uid = sbinfo->uid;
inode->i_gid = sbinfo->gid;
sb->s_root = d_make_root(inode);
if (!sb->s_root)
goto failed;
return 0;
failed:
shmem_put_super(sb);
return err;
}
static struct kmem_cache *shmem_inode_cachep;
static struct inode *shmem_alloc_inode(struct super_block *sb)
{
struct shmem_inode_info *info;
info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
if (!info)
return NULL;
return &info->vfs_inode;
}
static void shmem_destroy_callback(struct rcu_head *head)
{
struct inode *inode = container_of(head, struct inode, i_rcu);
kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
}
static void shmem_destroy_inode(struct inode *inode)
{
if (S_ISREG(inode->i_mode))
mpol_free_shared_policy(&SHMEM_I(inode)->policy);
call_rcu(&inode->i_rcu, shmem_destroy_callback);
}
static void shmem_init_inode(void *foo)
{
struct shmem_inode_info *info = foo;
inode_init_once(&info->vfs_inode);
}
static int shmem_init_inodecache(void)
{
shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
sizeof(struct shmem_inode_info),
0, SLAB_PANIC, shmem_init_inode);
return 0;
}
static void shmem_destroy_inodecache(void)
{
kmem_cache_destroy(shmem_inode_cachep);
}
static const struct address_space_operations shmem_aops = {
.writepage = shmem_writepage,
.set_page_dirty = __set_page_dirty_no_writeback,
#ifdef CONFIG_TMPFS
.write_begin = shmem_write_begin,
.write_end = shmem_write_end,
#endif
.migratepage = migrate_page,
.error_remove_page = generic_error_remove_page,
};
static const struct file_operations shmem_file_operations = {
.mmap = shmem_mmap,
#ifdef CONFIG_TMPFS
.llseek = shmem_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
.aio_read = shmem_file_aio_read,
.aio_write = generic_file_aio_write,
.fsync = noop_fsync,
.splice_read = shmem_file_splice_read,
.splice_write = generic_file_splice_write,
.fallocate = shmem_fallocate,
#endif
};
static const struct inode_operations shmem_inode_operations = {
.setattr = shmem_setattr,
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.getxattr = shmem_getxattr,
.listxattr = shmem_listxattr,
.removexattr = shmem_removexattr,
#endif
};
static const struct inode_operations shmem_dir_inode_operations = {
#ifdef CONFIG_TMPFS
.create = shmem_create,
.lookup = simple_lookup,
.link = shmem_link,
.unlink = shmem_unlink,
.symlink = shmem_symlink,
.mkdir = shmem_mkdir,
.rmdir = shmem_rmdir,
.mknod = shmem_mknod,
.rename = shmem_rename,
#endif
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.getxattr = shmem_getxattr,
.listxattr = shmem_listxattr,
.removexattr = shmem_removexattr,
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
.setattr = shmem_setattr,
#endif
};
static const struct inode_operations shmem_special_inode_operations = {
#ifdef CONFIG_TMPFS_XATTR
.setxattr = shmem_setxattr,
.getxattr = shmem_getxattr,
.listxattr = shmem_listxattr,
.removexattr = shmem_removexattr,
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
.setattr = shmem_setattr,
#endif
};
static const struct super_operations shmem_ops = {
.alloc_inode = shmem_alloc_inode,
.destroy_inode = shmem_destroy_inode,
#ifdef CONFIG_TMPFS
.statfs = shmem_statfs,
.remount_fs = shmem_remount_fs,
.show_options = shmem_show_options,
#endif
.evict_inode = shmem_evict_inode,
.drop_inode = generic_delete_inode,
.put_super = shmem_put_super,
};
static const struct vm_operations_struct shmem_vm_ops = {
.fault = shmem_fault,
#ifdef CONFIG_NUMA
.set_policy = shmem_set_policy,
.get_policy = shmem_get_policy,
#endif
.remap_pages = generic_file_remap_pages,
};
static struct dentry *shmem_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
return mount_nodev(fs_type, flags, data, shmem_fill_super);
}
static struct file_system_type shmem_fs_type = {
.owner = THIS_MODULE,
.name = "tmpfs",
.mount = shmem_mount,
.kill_sb = kill_litter_super,
};
int __init shmem_init(void)
{
int error;
error = bdi_init(&shmem_backing_dev_info);
if (error)
goto out4;
error = shmem_init_inodecache();
if (error)
goto out3;
error = register_filesystem(&shmem_fs_type);
if (error) {
printk(KERN_ERR "Could not register tmpfs\n");
goto out2;
}
shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
shmem_fs_type.name, NULL);
if (IS_ERR(shm_mnt)) {
error = PTR_ERR(shm_mnt);
printk(KERN_ERR "Could not kern_mount tmpfs\n");
goto out1;
}
return 0;
out1:
unregister_filesystem(&shmem_fs_type);
out2:
shmem_destroy_inodecache();
out3:
bdi_destroy(&shmem_backing_dev_info);
out4:
shm_mnt = ERR_PTR(error);
return error;
}
#else /* !CONFIG_SHMEM */
/*
* tiny-shmem: simple shmemfs and tmpfs using ramfs code
*
* This is intended for small system where the benefits of the full
* shmem code (swap-backed and resource-limited) are outweighed by
* their complexity. On systems without swap this code should be
* effectively equivalent, but much lighter weight.
*/
#include <linux/ramfs.h>
static struct file_system_type shmem_fs_type = {
.name = "tmpfs",
.mount = ramfs_mount,
.kill_sb = kill_litter_super,
};
int __init shmem_init(void)
{
BUG_ON(register_filesystem(&shmem_fs_type) != 0);
shm_mnt = kern_mount(&shmem_fs_type);
BUG_ON(IS_ERR(shm_mnt));
return 0;
}
int shmem_unuse(swp_entry_t swap, struct page *page)
{
return 0;
}
int shmem_lock(struct file *file, int lock, struct user_struct *user)
{
return 0;
}
void shmem_unlock_mapping(struct address_space *mapping)
{
}
void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
{
truncate_inode_pages_range(inode->i_mapping, lstart, lend);
}
EXPORT_SYMBOL_GPL(shmem_truncate_range);
#define shmem_vm_ops generic_file_vm_ops
#define shmem_file_operations ramfs_file_operations
#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
#define shmem_acct_size(flags, size) 0
#define shmem_unacct_size(flags, size) do {} while (0)
#endif /* CONFIG_SHMEM */
/* common code */
/**
* shmem_file_setup - get an unlinked file living in tmpfs
* @name: name for dentry (to be seen in /proc/<pid>/maps
* @size: size to be set for the file
* @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
*/
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
{
int error;
struct file *file;
struct inode *inode;
struct path path;
struct dentry *root;
struct qstr this;
if (IS_ERR(shm_mnt))
return (void *)shm_mnt;
if (size < 0 || size > MAX_LFS_FILESIZE)
return ERR_PTR(-EINVAL);
if (shmem_acct_size(flags, size))
return ERR_PTR(-ENOMEM);
error = -ENOMEM;
this.name = name;
this.len = strlen(name);
this.hash = 0; /* will go */
root = shm_mnt->mnt_root;
path.dentry = d_alloc(root, &this);
if (!path.dentry)
goto put_memory;
path.mnt = mntget(shm_mnt);
error = -ENOSPC;
inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
if (!inode)
goto put_dentry;
d_instantiate(path.dentry, inode);
inode->i_size = size;
clear_nlink(inode); /* It is unlinked */
#ifndef CONFIG_MMU
error = ramfs_nommu_expand_for_mapping(inode, size);
if (error)
goto put_dentry;
#endif
error = -ENFILE;
file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
&shmem_file_operations);
if (!file)
goto put_dentry;
return file;
put_dentry:
path_put(&path);
put_memory:
shmem_unacct_size(flags, size);
return ERR_PTR(error);
}
EXPORT_SYMBOL_GPL(shmem_file_setup);
/**
* shmem_zero_setup - setup a shared anonymous mapping
* @vma: the vma to be mmapped is prepared by do_mmap_pgoff
*/
int shmem_zero_setup(struct vm_area_struct *vma)
{
struct file *file;
loff_t size = vma->vm_end - vma->vm_start;
file = shmem_file_setup("dev/zero", size, vma->vm_flags);
if (IS_ERR(file))
return PTR_ERR(file);
if (vma->vm_file)
fput(vma->vm_file);
vma->vm_file = file;
vma->vm_ops = &shmem_vm_ops;
return 0;
}
/**
* shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
* @mapping: the page's address_space
* @index: the page index
* @gfp: the page allocator flags to use if allocating
*
* This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
* with any new page allocations done using the specified allocation flags.
* But read_cache_page_gfp() uses the ->readpage() method: which does not
* suit tmpfs, since it may have pages in swapcache, and needs to find those
* for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
*
* i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
* with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
*/
struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp)
{
#ifdef CONFIG_SHMEM
struct inode *inode = mapping->host;
struct page *page;
int error;
BUG_ON(mapping->a_ops != &shmem_aops);
error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
if (error)
page = ERR_PTR(error);
else
unlock_page(page);
return page;
#else
/*
* The tiny !SHMEM case uses ramfs without swap
*/
return read_cache_page_gfp(mapping, index, gfp);
#endif
}
EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_5587_0 |
crossvul-cpp_data_good_2321_0 | /******************************************************************************
* emulate.c
*
* Generic x86 (32-bit and 64-bit) instruction decoder and emulator.
*
* Copyright (c) 2005 Keir Fraser
*
* Linux coding style, mod r/m decoder, segment base fixes, real-mode
* privileged instructions:
*
* Copyright (C) 2006 Qumranet
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Avi Kivity <avi@qumranet.com>
* Yaniv Kamay <yaniv@qumranet.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
* From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4
*/
#include <linux/kvm_host.h>
#include "kvm_cache_regs.h"
#include <linux/module.h>
#include <asm/kvm_emulate.h>
#include <linux/stringify.h>
#include "x86.h"
#include "tss.h"
/*
* Operand types
*/
#define OpNone 0ull
#define OpImplicit 1ull /* No generic decode */
#define OpReg 2ull /* Register */
#define OpMem 3ull /* Memory */
#define OpAcc 4ull /* Accumulator: AL/AX/EAX/RAX */
#define OpDI 5ull /* ES:DI/EDI/RDI */
#define OpMem64 6ull /* Memory, 64-bit */
#define OpImmUByte 7ull /* Zero-extended 8-bit immediate */
#define OpDX 8ull /* DX register */
#define OpCL 9ull /* CL register (for shifts) */
#define OpImmByte 10ull /* 8-bit sign extended immediate */
#define OpOne 11ull /* Implied 1 */
#define OpImm 12ull /* Sign extended up to 32-bit immediate */
#define OpMem16 13ull /* Memory operand (16-bit). */
#define OpMem32 14ull /* Memory operand (32-bit). */
#define OpImmU 15ull /* Immediate operand, zero extended */
#define OpSI 16ull /* SI/ESI/RSI */
#define OpImmFAddr 17ull /* Immediate far address */
#define OpMemFAddr 18ull /* Far address in memory */
#define OpImmU16 19ull /* Immediate operand, 16 bits, zero extended */
#define OpES 20ull /* ES */
#define OpCS 21ull /* CS */
#define OpSS 22ull /* SS */
#define OpDS 23ull /* DS */
#define OpFS 24ull /* FS */
#define OpGS 25ull /* GS */
#define OpMem8 26ull /* 8-bit zero extended memory operand */
#define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */
#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */
#define OpAccLo 29ull /* Low part of extended acc (AX/AX/EAX/RAX) */
#define OpAccHi 30ull /* High part of extended acc (-/DX/EDX/RDX) */
#define OpBits 5 /* Width of operand field */
#define OpMask ((1ull << OpBits) - 1)
/*
* Opcode effective-address decode tables.
* Note that we only emulate instructions that have at least one memory
* operand (excluding implicit stack references). We assume that stack
* references and instruction fetches will never occur in special memory
* areas that require emulation. So, for example, 'mov <imm>,<reg>' need
* not be handled.
*/
/* Operand sizes: 8-bit operands or specified/overridden size. */
#define ByteOp (1<<0) /* 8-bit operands. */
/* Destination operand type. */
#define DstShift 1
#define ImplicitOps (OpImplicit << DstShift)
#define DstReg (OpReg << DstShift)
#define DstMem (OpMem << DstShift)
#define DstAcc (OpAcc << DstShift)
#define DstDI (OpDI << DstShift)
#define DstMem64 (OpMem64 << DstShift)
#define DstImmUByte (OpImmUByte << DstShift)
#define DstDX (OpDX << DstShift)
#define DstAccLo (OpAccLo << DstShift)
#define DstMask (OpMask << DstShift)
/* Source operand type. */
#define SrcShift 6
#define SrcNone (OpNone << SrcShift)
#define SrcReg (OpReg << SrcShift)
#define SrcMem (OpMem << SrcShift)
#define SrcMem16 (OpMem16 << SrcShift)
#define SrcMem32 (OpMem32 << SrcShift)
#define SrcImm (OpImm << SrcShift)
#define SrcImmByte (OpImmByte << SrcShift)
#define SrcOne (OpOne << SrcShift)
#define SrcImmUByte (OpImmUByte << SrcShift)
#define SrcImmU (OpImmU << SrcShift)
#define SrcSI (OpSI << SrcShift)
#define SrcXLat (OpXLat << SrcShift)
#define SrcImmFAddr (OpImmFAddr << SrcShift)
#define SrcMemFAddr (OpMemFAddr << SrcShift)
#define SrcAcc (OpAcc << SrcShift)
#define SrcImmU16 (OpImmU16 << SrcShift)
#define SrcImm64 (OpImm64 << SrcShift)
#define SrcDX (OpDX << SrcShift)
#define SrcMem8 (OpMem8 << SrcShift)
#define SrcAccHi (OpAccHi << SrcShift)
#define SrcMask (OpMask << SrcShift)
#define BitOp (1<<11)
#define MemAbs (1<<12) /* Memory operand is absolute displacement */
#define String (1<<13) /* String instruction (rep capable) */
#define Stack (1<<14) /* Stack instruction (push/pop) */
#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
#define Escape (5<<15) /* Escape to coprocessor instruction */
#define Sse (1<<18) /* SSE Vector instruction */
/* Generic ModRM decode. */
#define ModRM (1<<19)
/* Destination is only written; never read. */
#define Mov (1<<20)
/* Misc flags */
#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
#define EmulateOnUD (1<<22) /* Emulate if unsupported by the host */
#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
#define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */
#define Undefined (1<<25) /* No Such Instruction */
#define Lock (1<<26) /* lock prefix is allowed for the instruction */
#define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */
#define No64 (1<<28)
#define PageTable (1 << 29) /* instruction used to write page table */
#define NotImpl (1 << 30) /* instruction is not implemented */
/* Source 2 operand type */
#define Src2Shift (31)
#define Src2None (OpNone << Src2Shift)
#define Src2Mem (OpMem << Src2Shift)
#define Src2CL (OpCL << Src2Shift)
#define Src2ImmByte (OpImmByte << Src2Shift)
#define Src2One (OpOne << Src2Shift)
#define Src2Imm (OpImm << Src2Shift)
#define Src2ES (OpES << Src2Shift)
#define Src2CS (OpCS << Src2Shift)
#define Src2SS (OpSS << Src2Shift)
#define Src2DS (OpDS << Src2Shift)
#define Src2FS (OpFS << Src2Shift)
#define Src2GS (OpGS << Src2Shift)
#define Src2Mask (OpMask << Src2Shift)
#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
#define NoWrite ((u64)1 << 45) /* No writeback */
#define SrcWrite ((u64)1 << 46) /* Write back src operand */
#define NoMod ((u64)1 << 47) /* Mod field is ignored */
#define Intercept ((u64)1 << 48) /* Has valid intercept field */
#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
#define NoBigReal ((u64)1 << 50) /* No big real mode */
#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
#define X2(x...) x, x
#define X3(x...) X2(x), x
#define X4(x...) X2(x), X2(x)
#define X5(x...) X4(x), x
#define X6(x...) X4(x), X2(x)
#define X7(x...) X4(x), X3(x)
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
#define FASTOP_SIZE 8
/*
* fastop functions have a special calling convention:
*
* dst: rax (in/out)
* src: rdx (in/out)
* src2: rcx (in)
* flags: rflags (in/out)
* ex: rsi (in:fastop pointer, out:zero if exception)
*
* Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
* different operand sizes can be reached by calculation, rather than a jump
* table (which would be bigger than the code).
*
* fastop functions are declared as taking a never-defined fastop parameter,
* so they can't be called from C directly.
*/
struct fastop;
struct opcode {
u64 flags : 56;
u64 intercept : 8;
union {
int (*execute)(struct x86_emulate_ctxt *ctxt);
const struct opcode *group;
const struct group_dual *gdual;
const struct gprefix *gprefix;
const struct escape *esc;
void (*fastop)(struct fastop *fake);
} u;
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
};
struct group_dual {
struct opcode mod012[8];
struct opcode mod3[8];
};
struct gprefix {
struct opcode pfx_no;
struct opcode pfx_66;
struct opcode pfx_f2;
struct opcode pfx_f3;
};
struct escape {
struct opcode op[8];
struct opcode high[64];
};
/* EFLAGS bit definitions. */
#define EFLG_ID (1<<21)
#define EFLG_VIP (1<<20)
#define EFLG_VIF (1<<19)
#define EFLG_AC (1<<18)
#define EFLG_VM (1<<17)
#define EFLG_RF (1<<16)
#define EFLG_IOPL (3<<12)
#define EFLG_NT (1<<14)
#define EFLG_OF (1<<11)
#define EFLG_DF (1<<10)
#define EFLG_IF (1<<9)
#define EFLG_TF (1<<8)
#define EFLG_SF (1<<7)
#define EFLG_ZF (1<<6)
#define EFLG_AF (1<<4)
#define EFLG_PF (1<<2)
#define EFLG_CF (1<<0)
#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
#define EFLG_RESERVED_ONE_MASK 2
static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
if (!(ctxt->regs_valid & (1 << nr))) {
ctxt->regs_valid |= 1 << nr;
ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
}
return ctxt->_regs[nr];
}
static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
ctxt->regs_valid |= 1 << nr;
ctxt->regs_dirty |= 1 << nr;
return &ctxt->_regs[nr];
}
static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
{
reg_read(ctxt, nr);
return reg_write(ctxt, nr);
}
static void writeback_registers(struct x86_emulate_ctxt *ctxt)
{
unsigned reg;
for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
}
static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
{
ctxt->regs_dirty = 0;
ctxt->regs_valid = 0;
}
/*
* These EFLAGS bits are restored from saved value during emulation, and
* any changes are written back to the saved value after emulation.
*/
#define EFLAGS_MASK (EFLG_OF|EFLG_SF|EFLG_ZF|EFLG_AF|EFLG_PF|EFLG_CF)
#ifdef CONFIG_X86_64
#define ON64(x) x
#else
#define ON64(x)
#endif
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
#define FOP_ALIGN ".align " __stringify(FASTOP_SIZE) " \n\t"
#define FOP_RET "ret \n\t"
#define FOP_START(op) \
extern void em_##op(struct fastop *fake); \
asm(".pushsection .text, \"ax\" \n\t" \
".global em_" #op " \n\t" \
FOP_ALIGN \
"em_" #op ": \n\t"
#define FOP_END \
".popsection")
#define FOPNOP() FOP_ALIGN FOP_RET
#define FOP1E(op, dst) \
FOP_ALIGN "10: " #op " %" #dst " \n\t" FOP_RET
#define FOP1EEX(op, dst) \
FOP1E(op, dst) _ASM_EXTABLE(10b, kvm_fastop_exception)
#define FASTOP1(op) \
FOP_START(op) \
FOP1E(op##b, al) \
FOP1E(op##w, ax) \
FOP1E(op##l, eax) \
ON64(FOP1E(op##q, rax)) \
FOP_END
/* 1-operand, using src2 (for MUL/DIV r/m) */
#define FASTOP1SRC2(op, name) \
FOP_START(name) \
FOP1E(op, cl) \
FOP1E(op, cx) \
FOP1E(op, ecx) \
ON64(FOP1E(op, rcx)) \
FOP_END
/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
#define FASTOP1SRC2EX(op, name) \
FOP_START(name) \
FOP1EEX(op, cl) \
FOP1EEX(op, cx) \
FOP1EEX(op, ecx) \
ON64(FOP1EEX(op, rcx)) \
FOP_END
#define FOP2E(op, dst, src) \
FOP_ALIGN #op " %" #src ", %" #dst " \n\t" FOP_RET
#define FASTOP2(op) \
FOP_START(op) \
FOP2E(op##b, al, dl) \
FOP2E(op##w, ax, dx) \
FOP2E(op##l, eax, edx) \
ON64(FOP2E(op##q, rax, rdx)) \
FOP_END
/* 2 operand, word only */
#define FASTOP2W(op) \
FOP_START(op) \
FOPNOP() \
FOP2E(op##w, ax, dx) \
FOP2E(op##l, eax, edx) \
ON64(FOP2E(op##q, rax, rdx)) \
FOP_END
/* 2 operand, src is CL */
#define FASTOP2CL(op) \
FOP_START(op) \
FOP2E(op##b, al, cl) \
FOP2E(op##w, ax, cl) \
FOP2E(op##l, eax, cl) \
ON64(FOP2E(op##q, rax, cl)) \
FOP_END
#define FOP3E(op, dst, src, src2) \
FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
/* 3-operand, word-only, src2=cl */
#define FASTOP3WCL(op) \
FOP_START(op) \
FOPNOP() \
FOP3E(op##w, ax, dx, cl) \
FOP3E(op##l, eax, edx, cl) \
ON64(FOP3E(op##q, rax, rdx, cl)) \
FOP_END
/* Special case for SETcc - 1 instruction per cc */
#define FOP_SETCC(op) ".align 4; " #op " %al; ret \n\t"
asm(".global kvm_fastop_exception \n"
"kvm_fastop_exception: xor %esi, %esi; ret");
FOP_START(setcc)
FOP_SETCC(seto)
FOP_SETCC(setno)
FOP_SETCC(setc)
FOP_SETCC(setnc)
FOP_SETCC(setz)
FOP_SETCC(setnz)
FOP_SETCC(setbe)
FOP_SETCC(setnbe)
FOP_SETCC(sets)
FOP_SETCC(setns)
FOP_SETCC(setp)
FOP_SETCC(setnp)
FOP_SETCC(setl)
FOP_SETCC(setnl)
FOP_SETCC(setle)
FOP_SETCC(setnle)
FOP_END;
FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET
FOP_END;
static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
enum x86_intercept intercept,
enum x86_intercept_stage stage)
{
struct x86_instruction_info info = {
.intercept = intercept,
.rep_prefix = ctxt->rep_prefix,
.modrm_mod = ctxt->modrm_mod,
.modrm_reg = ctxt->modrm_reg,
.modrm_rm = ctxt->modrm_rm,
.src_val = ctxt->src.val64,
.dst_val = ctxt->dst.val64,
.src_bytes = ctxt->src.bytes,
.dst_bytes = ctxt->dst.bytes,
.ad_bytes = ctxt->ad_bytes,
.next_rip = ctxt->eip,
};
return ctxt->ops->intercept(ctxt, &info, stage);
}
static void assign_masked(ulong *dest, ulong src, ulong mask)
{
*dest = (*dest & ~mask) | (src & mask);
}
static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
{
return (1UL << (ctxt->ad_bytes << 3)) - 1;
}
static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
{
u16 sel;
struct desc_struct ss;
if (ctxt->mode == X86EMUL_MODE_PROT64)
return ~0UL;
ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
}
static int stack_size(struct x86_emulate_ctxt *ctxt)
{
return (__fls(stack_mask(ctxt)) + 1) >> 3;
}
/* Access/update address held in a register, based on addressing mode. */
static inline unsigned long
address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
{
if (ctxt->ad_bytes == sizeof(unsigned long))
return reg;
else
return reg & ad_mask(ctxt);
}
static inline unsigned long
register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
{
return address_mask(ctxt, reg);
}
static void masked_increment(ulong *reg, ulong mask, int inc)
{
assign_masked(reg, *reg + inc, mask);
}
static inline void
register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
{
ulong mask;
if (ctxt->ad_bytes == sizeof(unsigned long))
mask = ~0UL;
else
mask = ad_mask(ctxt);
masked_increment(reg, mask, inc);
}
static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{
masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
}
static u32 desc_limit_scaled(struct desc_struct *desc)
{
u32 limit = get_desc_limit(desc);
return desc->g ? (limit << 12) | 0xfff : limit;
}
static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
{
if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
return 0;
return ctxt->ops->get_cached_segment_base(ctxt, seg);
}
static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
u32 error, bool valid)
{
WARN_ON(vec > 0x1f);
ctxt->exception.vector = vec;
ctxt->exception.error_code = error;
ctxt->exception.error_code_valid = valid;
return X86EMUL_PROPAGATE_FAULT;
}
static int emulate_db(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, DB_VECTOR, 0, false);
}
static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
{
return emulate_exception(ctxt, GP_VECTOR, err, true);
}
static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err)
{
return emulate_exception(ctxt, SS_VECTOR, err, true);
}
static int emulate_ud(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, UD_VECTOR, 0, false);
}
static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
{
return emulate_exception(ctxt, TS_VECTOR, err, true);
}
static int emulate_de(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, DE_VECTOR, 0, false);
}
static int emulate_nm(struct x86_emulate_ctxt *ctxt)
{
return emulate_exception(ctxt, NM_VECTOR, 0, false);
}
static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
int cs_l)
{
switch (ctxt->op_bytes) {
case 2:
ctxt->_eip = (u16)dst;
break;
case 4:
ctxt->_eip = (u32)dst;
break;
case 8:
if ((cs_l && is_noncanonical_address(dst)) ||
(!cs_l && (dst & ~(u32)-1)))
return emulate_gp(ctxt, 0);
ctxt->_eip = dst;
break;
default:
WARN(1, "unsupported eip assignment size\n");
}
return X86EMUL_CONTINUE;
}
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
}
static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{
return assign_eip_near(ctxt, ctxt->_eip + rel);
}
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
u16 selector;
struct desc_struct desc;
ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
return selector;
}
static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
unsigned seg)
{
u16 dummy;
u32 base3;
struct desc_struct desc;
ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}
/*
* x86 defines three classes of vector instructions: explicitly
* aligned, explicitly unaligned, and the rest, which change behaviour
* depending on whether they're AVX encoded or not.
*
* Also included is CMPXCHG16B which is not a vector instruction, yet it is
* subject to the same check.
*/
static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
{
if (likely(size < 16))
return false;
if (ctxt->d & Aligned)
return true;
else if (ctxt->d & Unaligned)
return false;
else if (ctxt->d & Avx)
return false;
else
return true;
}
static int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned size, bool write, bool fetch,
ulong *linear)
{
struct desc_struct desc;
bool usable;
ulong la;
u32 lim;
u16 sel;
unsigned cpl;
la = seg_base(ctxt, addr.seg) + addr.ea;
switch (ctxt->mode) {
case X86EMUL_MODE_PROT64:
if (((signed long)la << 16) >> 16 != la)
return emulate_gp(ctxt, 0);
break;
default:
usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
addr.seg);
if (!usable)
goto bad;
/* code segment in protected mode or read-only data segment */
if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8))
|| !(desc.type & 2)) && write)
goto bad;
/* unreadable code segment */
if (!fetch && (desc.type & 8) && !(desc.type & 2))
goto bad;
lim = desc_limit_scaled(&desc);
if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
(ctxt->d & NoBigReal)) {
/* la is between zero and 0xffff */
if (la > 0xffff || (u32)(la + size - 1) > 0xffff)
goto bad;
} else if ((desc.type & 8) || !(desc.type & 4)) {
/* expand-up segment */
if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
goto bad;
} else {
/* expand-down segment */
if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
goto bad;
lim = desc.d ? 0xffffffff : 0xffff;
if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
goto bad;
}
cpl = ctxt->ops->cpl(ctxt);
if (!(desc.type & 8)) {
/* data segment */
if (cpl > desc.dpl)
goto bad;
} else if ((desc.type & 8) && !(desc.type & 4)) {
/* nonconforming code segment */
if (cpl != desc.dpl)
goto bad;
} else if ((desc.type & 8) && (desc.type & 4)) {
/* conforming code segment */
if (cpl < desc.dpl)
goto bad;
}
break;
}
if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
la &= (u32)-1;
if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
return emulate_gp(ctxt, 0);
*linear = la;
return X86EMUL_CONTINUE;
bad:
if (addr.seg == VCPU_SREG_SS)
return emulate_ss(ctxt, sel);
else
return emulate_gp(ctxt, sel);
}
static int linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
unsigned size, bool write,
ulong *linear)
{
return __linearize(ctxt, addr, size, write, false, linear);
}
static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, false, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
}
/*
* Prefetch the remaining bytes of the instruction without crossing page
* boundary if they are not in fetch_cache yet.
*/
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
{
int rc;
unsigned size;
unsigned long linear;
int cur_size = ctxt->fetch.end - ctxt->fetch.data;
struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = ctxt->eip + cur_size };
size = 15UL ^ cur_size;
rc = __linearize(ctxt, addr, size, false, true, &linear);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
/*
* One instruction can only straddle two pages,
* and one has been loaded at the beginning of
* x86_decode_insn. So, if not enough bytes
* still, we must have hit the 15-byte boundary.
*/
if (unlikely(size < op_size))
return X86EMUL_UNHANDLEABLE;
rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
size, &ctxt->exception);
if (unlikely(rc != X86EMUL_CONTINUE))
return rc;
ctxt->fetch.end += size;
return X86EMUL_CONTINUE;
}
static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
unsigned size)
{
unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
if (unlikely(done_size < size))
return __do_insn_fetch_bytes(ctxt, size - done_size);
else
return X86EMUL_CONTINUE;
}
/* Fetch next part of the instruction being emulated. */
#define insn_fetch(_type, _ctxt) \
({ _type _x; \
\
rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
if (rc != X86EMUL_CONTINUE) \
goto done; \
ctxt->_eip += sizeof(_type); \
_x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
ctxt->fetch.ptr += sizeof(_type); \
_x; \
})
#define insn_fetch_arr(_arr, _size, _ctxt) \
({ \
rc = do_insn_fetch_bytes(_ctxt, _size); \
if (rc != X86EMUL_CONTINUE) \
goto done; \
ctxt->_eip += (_size); \
memcpy(_arr, ctxt->fetch.ptr, _size); \
ctxt->fetch.ptr += (_size); \
})
/*
* Given the 'reg' portion of a ModRM byte, and a register block, return a
* pointer into the block that addresses the relevant register.
* @highbyte_regs specifies whether to decode AH,CH,DH,BH.
*/
static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
int byteop)
{
void *p;
int highbyte_regs = (ctxt->rex_prefix == 0) && byteop;
if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
else
p = reg_rmw(ctxt, modrm_reg);
return p;
}
static int read_descriptor(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
u16 *size, unsigned long *address, int op_bytes)
{
int rc;
if (op_bytes == 2)
op_bytes = 3;
*address = 0;
rc = segmented_read_std(ctxt, addr, size, 2);
if (rc != X86EMUL_CONTINUE)
return rc;
addr.ea += 2;
rc = segmented_read_std(ctxt, addr, address, op_bytes);
return rc;
}
FASTOP2(add);
FASTOP2(or);
FASTOP2(adc);
FASTOP2(sbb);
FASTOP2(and);
FASTOP2(sub);
FASTOP2(xor);
FASTOP2(cmp);
FASTOP2(test);
FASTOP1SRC2(mul, mul_ex);
FASTOP1SRC2(imul, imul_ex);
FASTOP1SRC2EX(div, div_ex);
FASTOP1SRC2EX(idiv, idiv_ex);
FASTOP3WCL(shld);
FASTOP3WCL(shrd);
FASTOP2W(imul);
FASTOP1(not);
FASTOP1(neg);
FASTOP1(inc);
FASTOP1(dec);
FASTOP2CL(rol);
FASTOP2CL(ror);
FASTOP2CL(rcl);
FASTOP2CL(rcr);
FASTOP2CL(shl);
FASTOP2CL(shr);
FASTOP2CL(sar);
FASTOP2W(bsf);
FASTOP2W(bsr);
FASTOP2W(bt);
FASTOP2W(bts);
FASTOP2W(btr);
FASTOP2W(btc);
FASTOP2(xadd);
static u8 test_cc(unsigned int condition, unsigned long flags)
{
u8 rc;
void (*fop)(void) = (void *)em_setcc + 4 * (condition & 0xf);
flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
asm("push %[flags]; popf; call *%[fastop]"
: "=a"(rc) : [fastop]"r"(fop), [flags]"r"(flags));
return rc;
}
static void fetch_register_operand(struct operand *op)
{
switch (op->bytes) {
case 1:
op->val = *(u8 *)op->addr.reg;
break;
case 2:
op->val = *(u16 *)op->addr.reg;
break;
case 4:
op->val = *(u32 *)op->addr.reg;
break;
case 8:
op->val = *(u64 *)op->addr.reg;
break;
}
}
static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
#ifdef CONFIG_X86_64
case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
#endif
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
#ifdef CONFIG_X86_64
case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
#endif
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
{
ctxt->ops->get_fpu(ctxt);
switch (reg) {
case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
default: BUG();
}
ctxt->ops->put_fpu(ctxt);
}
static int em_fninit(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
ctxt->ops->get_fpu(ctxt);
asm volatile("fninit");
ctxt->ops->put_fpu(ctxt);
return X86EMUL_CONTINUE;
}
static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
{
u16 fcw;
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
ctxt->ops->get_fpu(ctxt);
asm volatile("fnstcw %0": "+m"(fcw));
ctxt->ops->put_fpu(ctxt);
/* force 2 byte destination */
ctxt->dst.bytes = 2;
ctxt->dst.val = fcw;
return X86EMUL_CONTINUE;
}
static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
{
u16 fsw;
if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
return emulate_nm(ctxt);
ctxt->ops->get_fpu(ctxt);
asm volatile("fnstsw %0": "+m"(fsw));
ctxt->ops->put_fpu(ctxt);
/* force 2 byte destination */
ctxt->dst.bytes = 2;
ctxt->dst.val = fsw;
return X86EMUL_CONTINUE;
}
static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
unsigned reg = ctxt->modrm_reg;
if (!(ctxt->d & ModRM))
reg = (ctxt->b & 7) | ((ctxt->rex_prefix & 1) << 3);
if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = reg;
read_sse_reg(ctxt, &op->vec_val, reg);
return;
}
if (ctxt->d & Mmx) {
reg &= 7;
op->type = OP_MM;
op->bytes = 8;
op->addr.mm = reg;
return;
}
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = decode_register(ctxt, reg, ctxt->d & ByteOp);
fetch_register_operand(op);
op->orig_val = op->val;
}
static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
{
if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
ctxt->modrm_seg = VCPU_SREG_SS;
}
static int decode_modrm(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
u8 sib;
int index_reg, base_reg, scale;
int rc = X86EMUL_CONTINUE;
ulong modrm_ea = 0;
ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
ctxt->modrm_seg = VCPU_SREG_DS;
if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = decode_register(ctxt, ctxt->modrm_rm,
ctxt->d & ByteOp);
if (ctxt->d & Sse) {
op->type = OP_XMM;
op->bytes = 16;
op->addr.xmm = ctxt->modrm_rm;
read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
return rc;
}
if (ctxt->d & Mmx) {
op->type = OP_MM;
op->bytes = 8;
op->addr.mm = ctxt->modrm_rm & 7;
return rc;
}
fetch_register_operand(op);
return rc;
}
op->type = OP_MEM;
if (ctxt->ad_bytes == 2) {
unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
/* 16-bit ModR/M decode. */
switch (ctxt->modrm_mod) {
case 0:
if (ctxt->modrm_rm == 6)
modrm_ea += insn_fetch(u16, ctxt);
break;
case 1:
modrm_ea += insn_fetch(s8, ctxt);
break;
case 2:
modrm_ea += insn_fetch(u16, ctxt);
break;
}
switch (ctxt->modrm_rm) {
case 0:
modrm_ea += bx + si;
break;
case 1:
modrm_ea += bx + di;
break;
case 2:
modrm_ea += bp + si;
break;
case 3:
modrm_ea += bp + di;
break;
case 4:
modrm_ea += si;
break;
case 5:
modrm_ea += di;
break;
case 6:
if (ctxt->modrm_mod != 0)
modrm_ea += bp;
break;
case 7:
modrm_ea += bx;
break;
}
if (ctxt->modrm_rm == 2 || ctxt->modrm_rm == 3 ||
(ctxt->modrm_rm == 6 && ctxt->modrm_mod != 0))
ctxt->modrm_seg = VCPU_SREG_SS;
modrm_ea = (u16)modrm_ea;
} else {
/* 32/64-bit ModR/M decode. */
if ((ctxt->modrm_rm & 7) == 4) {
sib = insn_fetch(u8, ctxt);
index_reg |= (sib >> 3) & 7;
base_reg |= sib & 7;
scale = sib >> 6;
if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
modrm_ea += insn_fetch(s32, ctxt);
else {
modrm_ea += reg_read(ctxt, base_reg);
adjust_modrm_seg(ctxt, base_reg);
}
if (index_reg != 4)
modrm_ea += reg_read(ctxt, index_reg) << scale;
} else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->rip_relative = 1;
} else {
base_reg = ctxt->modrm_rm;
modrm_ea += reg_read(ctxt, base_reg);
adjust_modrm_seg(ctxt, base_reg);
}
switch (ctxt->modrm_mod) {
case 0:
if (ctxt->modrm_rm == 5)
modrm_ea += insn_fetch(s32, ctxt);
break;
case 1:
modrm_ea += insn_fetch(s8, ctxt);
break;
case 2:
modrm_ea += insn_fetch(s32, ctxt);
break;
}
}
op->addr.mem.ea = modrm_ea;
if (ctxt->ad_bytes != 8)
ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
done:
return rc;
}
static int decode_abs(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
int rc = X86EMUL_CONTINUE;
op->type = OP_MEM;
switch (ctxt->ad_bytes) {
case 2:
op->addr.mem.ea = insn_fetch(u16, ctxt);
break;
case 4:
op->addr.mem.ea = insn_fetch(u32, ctxt);
break;
case 8:
op->addr.mem.ea = insn_fetch(u64, ctxt);
break;
}
done:
return rc;
}
static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
{
long sv = 0, mask;
if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
mask = ~((long)ctxt->dst.bytes * 8 - 1);
if (ctxt->src.bytes == 2)
sv = (s16)ctxt->src.val & (s16)mask;
else if (ctxt->src.bytes == 4)
sv = (s32)ctxt->src.val & (s32)mask;
else
sv = (s64)ctxt->src.val & (s64)mask;
ctxt->dst.addr.mem.ea += (sv >> 3);
}
/* only subword offset */
ctxt->src.val &= (ctxt->dst.bytes << 3) - 1;
}
static int read_emulated(struct x86_emulate_ctxt *ctxt,
unsigned long addr, void *dest, unsigned size)
{
int rc;
struct read_cache *mc = &ctxt->mem_read;
if (mc->pos < mc->end)
goto read_cached;
WARN_ON((mc->end + size) >= sizeof(mc->data));
rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
&ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
mc->end += size;
read_cached:
memcpy(dest, mc->data + mc->pos, size);
mc->pos += size;
return X86EMUL_CONTINUE;
}
static int segmented_read(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, false, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return read_emulated(ctxt, linear, data, size);
}
static int segmented_write(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
const void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->write_emulated(ctxt, linear, data, size,
&ctxt->exception);
}
static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
const void *orig_data, const void *data,
unsigned size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
size, &ctxt->exception);
}
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
unsigned int size, unsigned short port,
void *dest)
{
struct read_cache *rc = &ctxt->io_read;
if (rc->pos == rc->end) { /* refill pio read ahead */
unsigned int in_page, n;
unsigned int count = ctxt->rep_prefix ?
address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
in_page = (ctxt->eflags & EFLG_DF) ?
offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
if (n == 0)
n = 1;
rc->pos = rc->end = 0;
if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
return 0;
rc->end = n * size;
}
if (ctxt->rep_prefix && (ctxt->d & String) &&
!(ctxt->eflags & EFLG_DF)) {
ctxt->dst.data = rc->data + rc->pos;
ctxt->dst.type = OP_MEM_STR;
ctxt->dst.count = (rc->end - rc->pos) / size;
rc->pos = rc->end;
} else {
memcpy(dest, rc->data + rc->pos, size);
rc->pos += size;
}
return 1;
}
static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
u16 index, struct desc_struct *desc)
{
struct desc_ptr dt;
ulong addr;
ctxt->ops->get_idt(ctxt, &dt);
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, index << 3 | 0x2);
addr = dt.address + index * 8;
return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
&ctxt->exception);
}
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_ptr *dt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
u32 base3 = 0;
if (selector & 1 << 2) {
struct desc_struct desc;
u16 sel;
memset (dt, 0, sizeof *dt);
if (!ops->get_segment(ctxt, &sel, &desc, &base3,
VCPU_SREG_LDTR))
return;
dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
} else
ops->get_gdt(ctxt, dt);
}
/* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_struct *desc,
ulong *desc_addr_p)
{
struct desc_ptr dt;
u16 index = selector >> 3;
ulong addr;
get_descriptor_table_ptr(ctxt, selector, &dt);
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, selector & 0xfffc);
*desc_addr_p = addr = dt.address + index * 8;
return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
&ctxt->exception);
}
/* allowed just for 8 bytes segments */
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, struct desc_struct *desc)
{
struct desc_ptr dt;
u16 index = selector >> 3;
ulong addr;
get_descriptor_table_ptr(ctxt, selector, &dt);
if (dt.size < index * 8 + 7)
return emulate_gp(ctxt, selector & 0xfffc);
addr = dt.address + index * 8;
return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
&ctxt->exception);
}
/* Does not support long mode */
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg, u8 cpl,
bool in_task_switch,
struct desc_struct *desc)
{
struct desc_struct seg_desc, old_desc;
u8 dpl, rpl;
unsigned err_vec = GP_VECTOR;
u32 err_code = 0;
bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
ulong desc_addr;
int ret;
u16 dummy;
u32 base3 = 0;
memset(&seg_desc, 0, sizeof seg_desc);
if (ctxt->mode == X86EMUL_MODE_REAL) {
/* set real mode segment descriptor (keep limit etc. for
* unreal mode) */
ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
set_desc_base(&seg_desc, selector << 4);
goto load;
} else if (seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) {
/* VM86 needs a clean new segment descriptor */
set_desc_base(&seg_desc, selector << 4);
set_desc_limit(&seg_desc, 0xffff);
seg_desc.type = 3;
seg_desc.p = 1;
seg_desc.s = 1;
seg_desc.dpl = 3;
goto load;
}
rpl = selector & 3;
/* NULL selector is not valid for TR, CS and SS (except for long mode) */
if ((seg == VCPU_SREG_CS
|| (seg == VCPU_SREG_SS
&& (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
|| seg == VCPU_SREG_TR)
&& null_selector)
goto exception;
/* TR should be in GDT only */
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
goto exception;
if (null_selector) /* for NULL selector skip all following checks */
goto load;
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
err_code = selector & 0xfffc;
err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR;
/* can't load system descriptor into segment selector */
if (seg <= VCPU_SREG_GS && !seg_desc.s)
goto exception;
if (!seg_desc.p) {
err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
goto exception;
}
dpl = seg_desc.dpl;
switch (seg) {
case VCPU_SREG_SS:
/*
* segment is not a writable data segment or segment
* selector's RPL != CPL or segment selector's RPL != CPL
*/
if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
goto exception;
break;
case VCPU_SREG_CS:
if (!(seg_desc.type & 8))
goto exception;
if (seg_desc.type & 4) {
/* conforming */
if (dpl > cpl)
goto exception;
} else {
/* nonconforming */
if (rpl > cpl || dpl != cpl)
goto exception;
}
/* in long-mode d/b must be clear if l is set */
if (seg_desc.d && seg_desc.l) {
u64 efer = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
goto exception;
}
/* CS(RPL) <- CPL */
selector = (selector & 0xfffc) | cpl;
break;
case VCPU_SREG_TR:
if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
goto exception;
old_desc = seg_desc;
seg_desc.type |= 2; /* busy */
ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
sizeof(seg_desc), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
break;
case VCPU_SREG_LDTR:
if (seg_desc.s || seg_desc.type != 2)
goto exception;
break;
default: /* DS, ES, FS, or GS */
/*
* segment is not a data or readable code segment or
* ((segment is a data or nonconforming code segment)
* and (both RPL and CPL > DPL))
*/
if ((seg_desc.type & 0xa) == 0x8 ||
(((seg_desc.type & 0xc) != 0xc) &&
(rpl > dpl && cpl > dpl)))
goto exception;
break;
}
if (seg_desc.s) {
/* mark segment as accessed */
seg_desc.type |= 1;
ret = write_segment_descriptor(ctxt, selector, &seg_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
} else if (ctxt->mode == X86EMUL_MODE_PROT64) {
ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
sizeof(base3), &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
return ret;
}
load:
ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
if (desc)
*desc = seg_desc;
return X86EMUL_CONTINUE;
exception:
return emulate_exception(ctxt, err_vec, err_code, true);
}
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg)
{
u8 cpl = ctxt->ops->cpl(ctxt);
return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
}
static void write_register_operand(struct operand *op)
{
/* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
switch (op->bytes) {
case 1:
*(u8 *)op->addr.reg = (u8)op->val;
break;
case 2:
*(u16 *)op->addr.reg = (u16)op->val;
break;
case 4:
*op->addr.reg = (u32)op->val;
break; /* 64b: zero-extend */
case 8:
*op->addr.reg = op->val;
break;
}
}
static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
{
switch (op->type) {
case OP_REG:
write_register_operand(op);
break;
case OP_MEM:
if (ctxt->lock_prefix)
return segmented_cmpxchg(ctxt,
op->addr.mem,
&op->orig_val,
&op->val,
op->bytes);
else
return segmented_write(ctxt,
op->addr.mem,
&op->val,
op->bytes);
break;
case OP_MEM_STR:
return segmented_write(ctxt,
op->addr.mem,
op->data,
op->bytes * op->count);
break;
case OP_XMM:
write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
break;
case OP_MM:
write_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
break;
case OP_NONE:
/* no writeback */
break;
default:
break;
}
return X86EMUL_CONTINUE;
}
static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
{
struct segmented_address addr;
rsp_increment(ctxt, -bytes);
addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
return segmented_write(ctxt, addr, data, bytes);
}
static int em_push(struct x86_emulate_ctxt *ctxt)
{
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
}
static int emulate_pop(struct x86_emulate_ctxt *ctxt,
void *dest, int len)
{
int rc;
struct segmented_address addr;
addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS;
rc = segmented_read(ctxt, addr, dest, len);
if (rc != X86EMUL_CONTINUE)
return rc;
rsp_increment(ctxt, len);
return rc;
}
static int em_pop(struct x86_emulate_ctxt *ctxt)
{
return emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
}
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
void *dest, int len)
{
int rc;
unsigned long val, change_mask;
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
int cpl = ctxt->ops->cpl(ctxt);
rc = emulate_pop(ctxt, &val, len);
if (rc != X86EMUL_CONTINUE)
return rc;
change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
switch(ctxt->mode) {
case X86EMUL_MODE_PROT64:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT16:
if (cpl == 0)
change_mask |= EFLG_IOPL;
if (cpl <= iopl)
change_mask |= EFLG_IF;
break;
case X86EMUL_MODE_VM86:
if (iopl < 3)
return emulate_gp(ctxt, 0);
change_mask |= EFLG_IF;
break;
default: /* real mode */
change_mask |= (EFLG_IOPL | EFLG_IF);
break;
}
*(unsigned long *)dest =
(ctxt->eflags & ~change_mask) | (val & change_mask);
return rc;
}
static int em_popf(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.type = OP_REG;
ctxt->dst.addr.reg = &ctxt->eflags;
ctxt->dst.bytes = ctxt->op_bytes;
return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
}
static int em_enter(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned frame_size = ctxt->src.val;
unsigned nesting_level = ctxt->src2.val & 31;
ulong rbp;
if (nesting_level)
return X86EMUL_UNHANDLEABLE;
rbp = reg_read(ctxt, VCPU_REGS_RBP);
rc = push(ctxt, &rbp, stack_size(ctxt));
if (rc != X86EMUL_CONTINUE)
return rc;
assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
stack_mask(ctxt));
assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
stack_mask(ctxt));
return X86EMUL_CONTINUE;
}
static int em_leave(struct x86_emulate_ctxt *ctxt)
{
assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
stack_mask(ctxt));
return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
}
static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
ctxt->src.val = get_segment_selector(ctxt, seg);
return em_push(ctxt);
}
static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
unsigned long selector;
int rc;
rc = emulate_pop(ctxt, &selector, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
if (ctxt->modrm_reg == VCPU_SREG_SS)
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
rc = load_segment_descriptor(ctxt, (u16)selector, seg);
return rc;
}
static int em_pusha(struct x86_emulate_ctxt *ctxt)
{
unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RAX;
while (reg <= VCPU_REGS_RDI) {
(reg == VCPU_REGS_RSP) ?
(ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
++reg;
}
return rc;
}
static int em_pushf(struct x86_emulate_ctxt *ctxt)
{
ctxt->src.val = (unsigned long)ctxt->eflags;
return em_push(ctxt);
}
static int em_popa(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RDI;
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
rsp_increment(ctxt, ctxt->op_bytes);
--reg;
}
rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
break;
--reg;
}
return rc;
}
static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
const struct x86_emulate_ops *ops = ctxt->ops;
int rc;
struct desc_ptr dt;
gva_t cs_addr;
gva_t eip_addr;
u16 cs, eip;
/* TODO: Add limit checks */
ctxt->src.val = ctxt->eflags;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
ctxt->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->src.val = ctxt->_eip;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
ops->get_idt(ctxt, &dt);
eip_addr = dt.address + (irq << 2);
cs_addr = dt.address + (irq << 2) + 2;
rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->_eip = eip;
return rc;
}
int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
{
int rc;
invalidate_registers(ctxt);
rc = __emulate_int_real(ctxt, irq);
if (rc == X86EMUL_CONTINUE)
writeback_registers(ctxt);
return rc;
}
static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
{
switch(ctxt->mode) {
case X86EMUL_MODE_REAL:
return __emulate_int_real(ctxt, irq);
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT64:
default:
/* Protected mode interrupts unimplemented yet */
return X86EMUL_UNHANDLEABLE;
}
}
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
unsigned long temp_eip = 0;
unsigned long temp_eflags = 0;
unsigned long cs = 0;
unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
/* TODO: Add stack limit check */
rc = emulate_pop(ctxt, &temp_eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
if (temp_eip & ~0xffff)
return emulate_gp(ctxt, 0);
rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = emulate_pop(ctxt, &temp_eflags, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->_eip = temp_eip;
if (ctxt->op_bytes == 4)
ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
else if (ctxt->op_bytes == 2) {
ctxt->eflags &= ~0xffff;
ctxt->eflags |= temp_eflags;
}
ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
return rc;
}
static int em_iret(struct x86_emulate_ctxt *ctxt)
{
switch(ctxt->mode) {
case X86EMUL_MODE_REAL:
return emulate_iret_real(ctxt);
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT64:
default:
/* iret from protected mode unimplemented yet */
return X86EMUL_UNHANDLEABLE;
}
}
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned short sel, old_sel;
struct desc_struct old_desc, new_desc;
const struct x86_emulate_ops *ops = ctxt->ops;
u8 cpl = ctxt->ops->cpl(ctxt);
/* Assignment of RIP may only fail in 64-bit mode */
if (ctxt->mode == X86EMUL_MODE_PROT64)
ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
VCPU_SREG_CS);
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
if (rc != X86EMUL_CONTINUE) {
WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
/* assigning eip failed; restore the old cs */
ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
return rc;
}
return rc;
}
static int em_grp45(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
switch (ctxt->modrm_reg) {
case 2: /* call near abs */ {
long int old_eip;
old_eip = ctxt->_eip;
rc = assign_eip_near(ctxt, ctxt->src.val);
if (rc != X86EMUL_CONTINUE)
break;
ctxt->src.val = old_eip;
rc = em_push(ctxt);
break;
}
case 4: /* jmp abs */
rc = assign_eip_near(ctxt, ctxt->src.val);
break;
case 5: /* jmp far */
rc = em_jmp_far(ctxt);
break;
case 6: /* push */
rc = em_push(ctxt);
break;
}
return rc;
}
static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
{
u64 old = ctxt->dst.orig_val64;
if (ctxt->dst.bytes == 16)
return X86EMUL_UNHANDLEABLE;
if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
*reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
*reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
ctxt->eflags &= ~EFLG_ZF;
} else {
ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
(u32) reg_read(ctxt, VCPU_REGS_RBX);
ctxt->eflags |= EFLG_ZF;
}
return X86EMUL_CONTINUE;
}
static int em_ret(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
return assign_eip_near(ctxt, eip);
}
static int em_ret_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip, cs;
u16 old_cs;
int cpl = ctxt->ops->cpl(ctxt);
struct desc_struct old_desc, new_desc;
const struct x86_emulate_ops *ops = ctxt->ops;
if (ctxt->mode == X86EMUL_MODE_PROT64)
ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
VCPU_SREG_CS);
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
/* Outer-privilege level return is not implemented */
if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
return X86EMUL_UNHANDLEABLE;
rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, eip, new_desc.l);
if (rc != X86EMUL_CONTINUE) {
WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
}
return rc;
}
static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
{
int rc;
rc = em_ret_far(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
rsp_increment(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE;
}
static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
{
/* Save real source value, then compare EAX against destination. */
ctxt->dst.orig_val = ctxt->dst.val;
ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
ctxt->src.orig_val = ctxt->src.val;
ctxt->src.val = ctxt->dst.orig_val;
fastop(ctxt, em_cmp);
if (ctxt->eflags & EFLG_ZF) {
/* Success: write back to memory. */
ctxt->dst.val = ctxt->src.orig_val;
} else {
/* Failure: write the value we saw to EAX. */
ctxt->dst.type = OP_REG;
ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
ctxt->dst.val = ctxt->dst.orig_val;
}
return X86EMUL_CONTINUE;
}
static int em_lseg(struct x86_emulate_ctxt *ctxt)
{
int seg = ctxt->src2.val;
unsigned short sel;
int rc;
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = load_segment_descriptor(ctxt, sel, seg);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->dst.val = ctxt->src.val;
return rc;
}
static void
setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
struct desc_struct *cs, struct desc_struct *ss)
{
cs->l = 0; /* will be adjusted later */
set_desc_base(cs, 0); /* flat segment */
cs->g = 1; /* 4kb granularity */
set_desc_limit(cs, 0xfffff); /* 4GB limit */
cs->type = 0x0b; /* Read, Execute, Accessed */
cs->s = 1;
cs->dpl = 0; /* will be adjusted later */
cs->p = 1;
cs->d = 1;
cs->avl = 0;
set_desc_base(ss, 0); /* flat segment */
set_desc_limit(ss, 0xfffff); /* 4GB limit */
ss->g = 1; /* 4kb granularity */
ss->s = 1;
ss->type = 0x03; /* Read/Write, Accessed */
ss->d = 1; /* 32bit stack segment */
ss->dpl = 0;
ss->p = 1;
ss->l = 0;
ss->avl = 0;
}
static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
eax = ecx = 0;
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
&& ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
&& edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
}
static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
u32 eax, ebx, ecx, edx;
/*
* syscall should always be enabled in longmode - so only become
* vendor specific (cpuid) if other modes are active...
*/
if (ctxt->mode == X86EMUL_MODE_PROT64)
return true;
eax = 0x00000000;
ecx = 0x00000000;
ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
/*
* Intel ("GenuineIntel")
* remark: Intel CPUs only support "syscall" in 64bit
* longmode. Also an 64bit guest with a
* 32bit compat-app running will #UD !! While this
* behaviour can be fixed (by emulating) into AMD
* response - CPUs of AMD can't behave like Intel.
*/
if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
return false;
/* AMD ("AuthenticAMD") */
if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
return true;
/* AMD ("AMDisbetter!") */
if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
return true;
/* default: (not Intel, not AMD), apply Intel's stricter rules... */
return false;
}
static int em_syscall(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
u64 efer = 0;
/* syscall is not available in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86)
return emulate_ud(ctxt);
if (!(em_syscall_is_enabled(ctxt)))
return emulate_ud(ctxt);
ops->get_msr(ctxt, MSR_EFER, &efer);
setup_syscalls_segments(ctxt, &cs, &ss);
if (!(efer & EFER_SCE))
return emulate_ud(ctxt);
ops->get_msr(ctxt, MSR_STAR, &msr_data);
msr_data >>= 32;
cs_sel = (u16)(msr_data & 0xfffc);
ss_sel = (u16)(msr_data + 8);
if (efer & EFER_LMA) {
cs.d = 0;
cs.l = 1;
}
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
*reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
if (efer & EFER_LMA) {
#ifdef CONFIG_X86_64
*reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
ops->get_msr(ctxt,
ctxt->mode == X86EMUL_MODE_PROT64 ?
MSR_LSTAR : MSR_CSTAR, &msr_data);
ctxt->_eip = msr_data;
ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
ctxt->eflags &= ~msr_data;
#endif
} else {
/* legacy mode */
ops->get_msr(ctxt, MSR_STAR, &msr_data);
ctxt->_eip = (u32)msr_data;
ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
}
return X86EMUL_CONTINUE;
}
static int em_sysenter(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data;
u16 cs_sel, ss_sel;
u64 efer = 0;
ops->get_msr(ctxt, MSR_EFER, &efer);
/* inject #GP if in real mode */
if (ctxt->mode == X86EMUL_MODE_REAL)
return emulate_gp(ctxt, 0);
/*
* Not recognized on AMD in compat mode (but is recognized in legacy
* mode).
*/
if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
&& !vendor_intel(ctxt))
return emulate_ud(ctxt);
/* XXX sysenter/sysexit have not been tested in 64bit mode.
* Therefore, we inject an #UD.
*/
if (ctxt->mode == X86EMUL_MODE_PROT64)
return emulate_ud(ctxt);
setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
switch (ctxt->mode) {
case X86EMUL_MODE_PROT32:
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
break;
case X86EMUL_MODE_PROT64:
if (msr_data == 0x0)
return emulate_gp(ctxt, 0);
break;
default:
break;
}
ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
cs_sel = (u16)msr_data;
cs_sel &= ~SELECTOR_RPL_MASK;
ss_sel = cs_sel + 8;
ss_sel &= ~SELECTOR_RPL_MASK;
if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
cs.d = 0;
cs.l = 1;
}
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
ctxt->_eip = msr_data;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
*reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
return X86EMUL_CONTINUE;
}
static int em_sysexit(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss;
u64 msr_data, rcx, rdx;
int usermode;
u16 cs_sel = 0, ss_sel = 0;
/* inject #GP if in real mode or Virtual 8086 mode */
if (ctxt->mode == X86EMUL_MODE_REAL ||
ctxt->mode == X86EMUL_MODE_VM86)
return emulate_gp(ctxt, 0);
setup_syscalls_segments(ctxt, &cs, &ss);
if ((ctxt->rex_prefix & 0x8) != 0x0)
usermode = X86EMUL_MODE_PROT64;
else
usermode = X86EMUL_MODE_PROT32;
rcx = reg_read(ctxt, VCPU_REGS_RCX);
rdx = reg_read(ctxt, VCPU_REGS_RDX);
cs.dpl = 3;
ss.dpl = 3;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
switch (usermode) {
case X86EMUL_MODE_PROT32:
cs_sel = (u16)(msr_data + 16);
if ((msr_data & 0xfffc) == 0x0)
return emulate_gp(ctxt, 0);
ss_sel = (u16)(msr_data + 24);
break;
case X86EMUL_MODE_PROT64:
cs_sel = (u16)(msr_data + 32);
if (msr_data == 0x0)
return emulate_gp(ctxt, 0);
ss_sel = cs_sel + 8;
cs.d = 0;
cs.l = 1;
if (is_noncanonical_address(rcx) ||
is_noncanonical_address(rdx))
return emulate_gp(ctxt, 0);
break;
}
cs_sel |= SELECTOR_RPL_MASK;
ss_sel |= SELECTOR_RPL_MASK;
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ctxt->_eip = rdx;
*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
return X86EMUL_CONTINUE;
}
static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
{
int iopl;
if (ctxt->mode == X86EMUL_MODE_REAL)
return false;
if (ctxt->mode == X86EMUL_MODE_VM86)
return true;
iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
return ctxt->ops->cpl(ctxt) > iopl;
}
static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
u16 port, u16 len)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct tr_seg;
u32 base3;
int r;
u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
unsigned mask = (1 << len) - 1;
unsigned long base;
ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
if (!tr_seg.p)
return false;
if (desc_limit_scaled(&tr_seg) < 103)
return false;
base = get_desc_base(&tr_seg);
#ifdef CONFIG_X86_64
base |= ((u64)base3) << 32;
#endif
r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
if (r != X86EMUL_CONTINUE)
return false;
if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
return false;
r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
if (r != X86EMUL_CONTINUE)
return false;
if ((perm >> bit_idx) & mask)
return false;
return true;
}
static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
u16 port, u16 len)
{
if (ctxt->perm_ok)
return true;
if (emulator_bad_iopl(ctxt))
if (!emulator_io_port_access_allowed(ctxt, port, len))
return false;
ctxt->perm_ok = true;
return true;
}
static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
struct tss_segment_16 *tss)
{
tss->ip = ctxt->_eip;
tss->flag = ctxt->eflags;
tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
tss->si = reg_read(ctxt, VCPU_REGS_RSI);
tss->di = reg_read(ctxt, VCPU_REGS_RDI);
tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
}
static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
struct tss_segment_16 *tss)
{
int ret;
u8 cpl;
ctxt->_eip = tss->ip;
ctxt->eflags = tss->flag | 2;
*reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
*reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
*reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
*reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
*reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
*reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
*reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
*reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
/*
* SDM says that segment selectors are loaded before segment
* descriptors
*/
set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
cpl = tss->cs & 3;
/*
* Now load segment descriptors. If fault happens at this stage
* it is handled in a context of new task
*/
ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
return X86EMUL_CONTINUE;
}
static int task_switch_16(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_16 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
/* FIXME: need to provide precise fault address */
return ret;
save_state_to_tss16(ctxt, &tss_seg);
ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
/* FIXME: need to provide precise fault address */
return ret;
ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
/* FIXME: need to provide precise fault address */
return ret;
if (old_tss_sel != 0xffff) {
tss_seg.prev_task_link = old_tss_sel;
ret = ops->write_std(ctxt, new_tss_base,
&tss_seg.prev_task_link,
sizeof tss_seg.prev_task_link,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
/* FIXME: need to provide precise fault address */
return ret;
}
return load_state_from_tss16(ctxt, &tss_seg);
}
static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
struct tss_segment_32 *tss)
{
/* CR3 and ldt selector are not saved intentionally */
tss->eip = ctxt->_eip;
tss->eflags = ctxt->eflags;
tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
}
static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
struct tss_segment_32 *tss)
{
int ret;
u8 cpl;
if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
return emulate_gp(ctxt, 0);
ctxt->_eip = tss->eip;
ctxt->eflags = tss->eflags | 2;
/* General purpose registers */
*reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
*reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
*reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
*reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
*reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
*reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
*reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
*reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
/*
* SDM says that segment selectors are loaded before segment
* descriptors. This is important because CPL checks will
* use CS.RPL.
*/
set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
/*
* If we're switching between Protected Mode and VM86, we need to make
* sure to update the mode before loading the segment descriptors so
* that the selectors are interpreted correctly.
*/
if (ctxt->eflags & X86_EFLAGS_VM) {
ctxt->mode = X86EMUL_MODE_VM86;
cpl = 3;
} else {
ctxt->mode = X86EMUL_MODE_PROT32;
cpl = tss->cs & 3;
}
/*
* Now load segment descriptors. If fault happenes at this stage
* it is handled in a context of new task
*/
ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
cpl, true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
true, NULL);
if (ret != X86EMUL_CONTINUE)
return ret;
return X86EMUL_CONTINUE;
}
static int task_switch_32(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, u16 old_tss_sel,
ulong old_tss_base, struct desc_struct *new_desc)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct tss_segment_32 tss_seg;
int ret;
u32 new_tss_base = get_desc_base(new_desc);
u32 eip_offset = offsetof(struct tss_segment_32, eip);
u32 ldt_sel_offset = offsetof(struct tss_segment_32, ldt_selector);
ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
/* FIXME: need to provide precise fault address */
return ret;
save_state_to_tss32(ctxt, &tss_seg);
/* Only GP registers and segment selectors are saved */
ret = ops->write_std(ctxt, old_tss_base + eip_offset, &tss_seg.eip,
ldt_sel_offset - eip_offset, &ctxt->exception);
if (ret != X86EMUL_CONTINUE)
/* FIXME: need to provide precise fault address */
return ret;
ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
/* FIXME: need to provide precise fault address */
return ret;
if (old_tss_sel != 0xffff) {
tss_seg.prev_task_link = old_tss_sel;
ret = ops->write_std(ctxt, new_tss_base,
&tss_seg.prev_task_link,
sizeof tss_seg.prev_task_link,
&ctxt->exception);
if (ret != X86EMUL_CONTINUE)
/* FIXME: need to provide precise fault address */
return ret;
}
return load_state_from_tss32(ctxt, &tss_seg);
}
static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int idt_index, int reason,
bool has_error_code, u32 error_code)
{
const struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct curr_tss_desc, next_tss_desc;
int ret;
u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
ulong old_tss_base =
ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
u32 desc_limit;
ulong desc_addr;
/* FIXME: old_tss_base == ~0 ? */
ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
return ret;
/* FIXME: check that next_tss_desc is tss */
/*
* Check privileges. The three cases are task switch caused by...
*
* 1. jmp/call/int to task gate: Check against DPL of the task gate
* 2. Exception/IRQ/iret: No check is performed
* 3. jmp/call to TSS: Check against DPL of the TSS
*/
if (reason == TASK_SWITCH_GATE) {
if (idt_index != -1) {
/* Software interrupts */
struct desc_struct task_gate_desc;
int dpl;
ret = read_interrupt_descriptor(ctxt, idt_index,
&task_gate_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
dpl = task_gate_desc.dpl;
if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
return emulate_gp(ctxt, (idt_index << 3) | 0x2);
}
} else if (reason != TASK_SWITCH_IRET) {
int dpl = next_tss_desc.dpl;
if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
return emulate_gp(ctxt, tss_selector);
}
desc_limit = desc_limit_scaled(&next_tss_desc);
if (!next_tss_desc.p ||
((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
desc_limit < 0x2b)) {
return emulate_ts(ctxt, tss_selector & 0xfffc);
}
if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
}
if (reason == TASK_SWITCH_IRET)
ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
/* set back link to prev task only if NT bit is set in eflags
note that old_tss_sel is not used after this point */
if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
old_tss_sel = 0xffff;
if (next_tss_desc.type & 8)
ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc);
else
ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
old_tss_base, &next_tss_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE)
ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT;
if (reason != TASK_SWITCH_IRET) {
next_tss_desc.type |= (1 << 1); /* set busy flag */
write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
}
ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
if (has_error_code) {
ctxt->op_bytes = ctxt->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
ctxt->lock_prefix = 0;
ctxt->src.val = (unsigned long) error_code;
ret = em_push(ctxt);
}
return ret;
}
int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
u16 tss_selector, int idt_index, int reason,
bool has_error_code, u32 error_code)
{
int rc;
invalidate_registers(ctxt);
ctxt->_eip = ctxt->eip;
ctxt->dst.type = OP_NONE;
rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
has_error_code, error_code);
if (rc == X86EMUL_CONTINUE) {
ctxt->eip = ctxt->_eip;
writeback_registers(ctxt);
}
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
}
static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
struct operand *op)
{
int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
}
static int em_das(struct x86_emulate_ctxt *ctxt)
{
u8 al, old_al;
bool af, cf, old_cf;
cf = ctxt->eflags & X86_EFLAGS_CF;
al = ctxt->dst.val;
old_al = al;
old_cf = cf;
cf = false;
af = ctxt->eflags & X86_EFLAGS_AF;
if ((al & 0x0f) > 9 || af) {
al -= 6;
cf = old_cf | (al >= 250);
af = true;
} else {
af = false;
}
if (old_al > 0x99 || old_cf) {
al -= 0x60;
cf = true;
}
ctxt->dst.val = al;
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF);
if (cf)
ctxt->eflags |= X86_EFLAGS_CF;
if (af)
ctxt->eflags |= X86_EFLAGS_AF;
return X86EMUL_CONTINUE;
}
static int em_aam(struct x86_emulate_ctxt *ctxt)
{
u8 al, ah;
if (ctxt->src.val == 0)
return emulate_de(ctxt);
al = ctxt->dst.val & 0xff;
ah = al / ctxt->src.val;
al %= ctxt->src.val;
ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8);
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
return X86EMUL_CONTINUE;
}
static int em_aad(struct x86_emulate_ctxt *ctxt)
{
u8 al = ctxt->dst.val & 0xff;
u8 ah = (ctxt->dst.val >> 8) & 0xff;
al = (al + (ah * ctxt->src.val)) & 0xff;
ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al;
/* Set PF, ZF, SF */
ctxt->src.type = OP_IMM;
ctxt->src.val = 0;
ctxt->src.bytes = 1;
fastop(ctxt, em_or);
return X86EMUL_CONTINUE;
}
static int em_call(struct x86_emulate_ctxt *ctxt)
{
int rc;
long rel = ctxt->src.val;
ctxt->src.val = (unsigned long)ctxt->_eip;
rc = jmp_rel(ctxt, rel);
if (rc != X86EMUL_CONTINUE)
return rc;
return em_push(ctxt);
}
static int em_call_far(struct x86_emulate_ctxt *ctxt)
{
u16 sel, old_cs;
ulong old_eip;
int rc;
struct desc_struct old_desc, new_desc;
const struct x86_emulate_ops *ops = ctxt->ops;
int cpl = ctxt->ops->cpl(ctxt);
old_eip = ctxt->_eip;
ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
&new_desc);
if (rc != X86EMUL_CONTINUE)
return X86EMUL_CONTINUE;
rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
if (rc != X86EMUL_CONTINUE)
goto fail;
ctxt->src.val = old_cs;
rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE)
goto fail;
ctxt->src.val = old_eip;
rc = em_push(ctxt);
/* If we failed, we tainted the memory, but the very least we should
restore cs */
if (rc != X86EMUL_CONTINUE)
goto fail;
return rc;
fail:
ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
return rc;
}
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_near(ctxt, eip);
if (rc != X86EMUL_CONTINUE)
return rc;
rsp_increment(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE;
}
static int em_xchg(struct x86_emulate_ctxt *ctxt)
{
/* Write back the register source. */
ctxt->src.val = ctxt->dst.val;
write_register_operand(&ctxt->src);
/* Write back the memory destination with implicit LOCK prefix. */
ctxt->dst.val = ctxt->src.orig_val;
ctxt->lock_prefix = 1;
return X86EMUL_CONTINUE;
}
static int em_imul_3op(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.val = ctxt->src2.val;
return fastop(ctxt, em_imul);
}
static int em_cwd(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.type = OP_REG;
ctxt->dst.bytes = ctxt->src.bytes;
ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
return X86EMUL_CONTINUE;
}
static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
{
u64 tsc = 0;
ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
*reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
*reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
return X86EMUL_CONTINUE;
}
static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
{
u64 pmc;
if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
return emulate_gp(ctxt, 0);
*reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
*reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
return X86EMUL_CONTINUE;
}
static int em_mov(struct x86_emulate_ctxt *ctxt)
{
memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
return X86EMUL_CONTINUE;
}
#define FFL(x) bit(X86_FEATURE_##x)
static int em_movbe(struct x86_emulate_ctxt *ctxt)
{
u32 ebx, ecx, edx, eax = 1;
u16 tmp;
/*
* Check MOVBE is set in the guest-visible CPUID leaf.
*/
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
if (!(ecx & FFL(MOVBE)))
return emulate_ud(ctxt);
switch (ctxt->op_bytes) {
case 2:
/*
* From MOVBE definition: "...When the operand size is 16 bits,
* the upper word of the destination register remains unchanged
* ..."
*
* Both casting ->valptr and ->val to u16 breaks strict aliasing
* rules so we have to do the operation almost per hand.
*/
tmp = (u16)ctxt->src.val;
ctxt->dst.val &= ~0xffffUL;
ctxt->dst.val |= (unsigned long)swab16(tmp);
break;
case 4:
ctxt->dst.val = swab32((u32)ctxt->src.val);
break;
case 8:
ctxt->dst.val = swab64(ctxt->src.val);
break;
default:
BUG();
}
return X86EMUL_CONTINUE;
}
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
return emulate_gp(ctxt, 0);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_dr_write(struct x86_emulate_ctxt *ctxt)
{
unsigned long val;
if (ctxt->mode == X86EMUL_MODE_PROT64)
val = ctxt->src.val & ~0ULL;
else
val = ctxt->src.val & ~0U;
/* #UD condition is already handled. */
if (ctxt->ops->set_dr(ctxt, ctxt->modrm_reg, val) < 0)
return emulate_gp(ctxt, 0);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
{
u64 msr_data;
msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
| ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
{
u64 msr_data;
if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
return emulate_gp(ctxt, 0);
*reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
*reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
return X86EMUL_CONTINUE;
}
static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->modrm_reg > VCPU_SREG_GS)
return emulate_ud(ctxt);
ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
return X86EMUL_CONTINUE;
}
static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
if (ctxt->modrm_reg == VCPU_SREG_CS || ctxt->modrm_reg > VCPU_SREG_GS)
return emulate_ud(ctxt);
if (ctxt->modrm_reg == VCPU_SREG_SS)
ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
}
static int em_lldt(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
}
static int em_ltr(struct x86_emulate_ctxt *ctxt)
{
u16 sel = ctxt->src.val;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
}
static int em_invlpg(struct x86_emulate_ctxt *ctxt)
{
int rc;
ulong linear;
rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear);
if (rc == X86EMUL_CONTINUE)
ctxt->ops->invlpg(ctxt, linear);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_clts(struct x86_emulate_ctxt *ctxt)
{
ulong cr0;
cr0 = ctxt->ops->get_cr(ctxt, 0);
cr0 &= ~X86_CR0_TS;
ctxt->ops->set_cr(ctxt, 0, cr0);
return X86EMUL_CONTINUE;
}
static int em_vmcall(struct x86_emulate_ctxt *ctxt)
{
int rc = ctxt->ops->fix_hypercall(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
/* Let the processor re-execute the fixed hypercall */
ctxt->_eip = ctxt->eip;
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
void (*get)(struct x86_emulate_ctxt *ctxt,
struct desc_ptr *ptr))
{
struct desc_ptr desc_ptr;
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
get(ctxt, &desc_ptr);
if (ctxt->op_bytes == 2) {
ctxt->op_bytes = 4;
desc_ptr.address &= 0x00ffffff;
}
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return segmented_write(ctxt, ctxt->dst.addr.mem,
&desc_ptr, 2 + ctxt->op_bytes);
}
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
{
return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
}
static int em_sidt(struct x86_emulate_ctxt *ctxt)
{
return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
}
static int em_lgdt(struct x86_emulate_ctxt *ctxt)
{
struct desc_ptr desc_ptr;
int rc;
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
rc = read_descriptor(ctxt, ctxt->src.addr.mem,
&desc_ptr.size, &desc_ptr.address,
ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->ops->set_gdt(ctxt, &desc_ptr);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
{
int rc;
rc = ctxt->ops->fix_hypercall(ctxt);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return rc;
}
static int em_lidt(struct x86_emulate_ctxt *ctxt)
{
struct desc_ptr desc_ptr;
int rc;
if (ctxt->mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
rc = read_descriptor(ctxt, ctxt->src.addr.mem,
&desc_ptr.size, &desc_ptr.address,
ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
ctxt->ops->set_idt(ctxt, &desc_ptr);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_smsw(struct x86_emulate_ctxt *ctxt)
{
if (ctxt->dst.type == OP_MEM)
ctxt->dst.bytes = 2;
ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int em_lmsw(struct x86_emulate_ctxt *ctxt)
{
ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul)
| (ctxt->src.val & 0x0f));
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_loop(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
(ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
rc = jmp_rel(ctxt, ctxt->src.val);
return rc;
}
static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{
int rc = X86EMUL_CONTINUE;
if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
rc = jmp_rel(ctxt, ctxt->src.val);
return rc;
}
static int em_in(struct x86_emulate_ctxt *ctxt)
{
if (!pio_in_emulated(ctxt, ctxt->dst.bytes, ctxt->src.val,
&ctxt->dst.val))
return X86EMUL_IO_NEEDED;
return X86EMUL_CONTINUE;
}
static int em_out(struct x86_emulate_ctxt *ctxt)
{
ctxt->ops->pio_out_emulated(ctxt, ctxt->src.bytes, ctxt->dst.val,
&ctxt->src.val, 1);
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
return X86EMUL_CONTINUE;
}
static int em_cli(struct x86_emulate_ctxt *ctxt)
{
if (emulator_bad_iopl(ctxt))
return emulate_gp(ctxt, 0);
ctxt->eflags &= ~X86_EFLAGS_IF;
return X86EMUL_CONTINUE;
}
static int em_sti(struct x86_emulate_ctxt *ctxt)
{
if (emulator_bad_iopl(ctxt))
return emulate_gp(ctxt, 0);
ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
ctxt->eflags |= X86_EFLAGS_IF;
return X86EMUL_CONTINUE;
}
static int em_cpuid(struct x86_emulate_ctxt *ctxt)
{
u32 eax, ebx, ecx, edx;
eax = reg_read(ctxt, VCPU_REGS_RAX);
ecx = reg_read(ctxt, VCPU_REGS_RCX);
ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
*reg_write(ctxt, VCPU_REGS_RAX) = eax;
*reg_write(ctxt, VCPU_REGS_RBX) = ebx;
*reg_write(ctxt, VCPU_REGS_RCX) = ecx;
*reg_write(ctxt, VCPU_REGS_RDX) = edx;
return X86EMUL_CONTINUE;
}
static int em_sahf(struct x86_emulate_ctxt *ctxt)
{
u32 flags;
flags = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF;
flags &= *reg_rmw(ctxt, VCPU_REGS_RAX) >> 8;
ctxt->eflags &= ~0xffUL;
ctxt->eflags |= flags | X86_EFLAGS_FIXED;
return X86EMUL_CONTINUE;
}
static int em_lahf(struct x86_emulate_ctxt *ctxt)
{
*reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
*reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
return X86EMUL_CONTINUE;
}
static int em_bswap(struct x86_emulate_ctxt *ctxt)
{
switch (ctxt->op_bytes) {
#ifdef CONFIG_X86_64
case 8:
asm("bswap %0" : "+r"(ctxt->dst.val));
break;
#endif
default:
asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
break;
}
return X86EMUL_CONTINUE;
}
static int em_clflush(struct x86_emulate_ctxt *ctxt)
{
/* emulating clflush regardless of cpuid */
return X86EMUL_CONTINUE;
}
static bool valid_cr(int nr)
{
switch (nr) {
case 0:
case 2 ... 4:
case 8:
return true;
default:
return false;
}
}
static int check_cr_read(struct x86_emulate_ctxt *ctxt)
{
if (!valid_cr(ctxt->modrm_reg))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
}
static int check_cr_write(struct x86_emulate_ctxt *ctxt)
{
u64 new_val = ctxt->src.val64;
int cr = ctxt->modrm_reg;
u64 efer = 0;
static u64 cr_reserved_bits[] = {
0xffffffff00000000ULL,
0, 0, 0, /* CR3 checked later */
CR4_RESERVED_BITS,
0, 0, 0,
CR8_RESERVED_BITS,
};
if (!valid_cr(cr))
return emulate_ud(ctxt);
if (new_val & cr_reserved_bits[cr])
return emulate_gp(ctxt, 0);
switch (cr) {
case 0: {
u64 cr4;
if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) ||
((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
return emulate_gp(ctxt, 0);
cr4 = ctxt->ops->get_cr(ctxt, 4);
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
!(cr4 & X86_CR4_PAE))
return emulate_gp(ctxt, 0);
break;
}
case 3: {
u64 rsvd = 0;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (efer & EFER_LMA)
rsvd = CR3_L_MODE_RESERVED_BITS;
if (new_val & rsvd)
return emulate_gp(ctxt, 0);
break;
}
case 4: {
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
return emulate_gp(ctxt, 0);
break;
}
}
return X86EMUL_CONTINUE;
}
static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
{
unsigned long dr7;
ctxt->ops->get_dr(ctxt, 7, &dr7);
/* Check if DR7.Global_Enable is set */
return dr7 & (1 << 13);
}
static int check_dr_read(struct x86_emulate_ctxt *ctxt)
{
int dr = ctxt->modrm_reg;
u64 cr4;
if (dr > 7)
return emulate_ud(ctxt);
cr4 = ctxt->ops->get_cr(ctxt, 4);
if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
return emulate_ud(ctxt);
if (check_dr7_gd(ctxt))
return emulate_db(ctxt);
return X86EMUL_CONTINUE;
}
static int check_dr_write(struct x86_emulate_ctxt *ctxt)
{
u64 new_val = ctxt->src.val64;
int dr = ctxt->modrm_reg;
if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL))
return emulate_gp(ctxt, 0);
return check_dr_read(ctxt);
}
static int check_svme(struct x86_emulate_ctxt *ctxt)
{
u64 efer;
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
if (!(efer & EFER_SVME))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
}
static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
{
u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
/* Valid physical address? */
if (rax & 0xffff000000000000ULL)
return emulate_gp(ctxt, 0);
return check_svme(ctxt);
}
static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
{
u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
return emulate_ud(ctxt);
return X86EMUL_CONTINUE;
}
static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
{
u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
ctxt->ops->check_pmc(ctxt, rcx))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int check_perm_in(struct x86_emulate_ctxt *ctxt)
{
ctxt->dst.bytes = min(ctxt->dst.bytes, 4u);
if (!emulator_io_permited(ctxt, ctxt->src.val, ctxt->dst.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
static int check_perm_out(struct x86_emulate_ctxt *ctxt)
{
ctxt->src.bytes = min(ctxt->src.bytes, 4u);
if (!emulator_io_permited(ctxt, ctxt->dst.val, ctxt->src.bytes))
return emulate_gp(ctxt, 0);
return X86EMUL_CONTINUE;
}
#define D(_y) { .flags = (_y) }
#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
.intercept = x86_intercept_##_i, .check_perm = (_p) }
#define N D(NotImpl)
#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
#define II(_f, _e, _i) \
{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
#define IIP(_f, _e, _i, _p) \
{ .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
.intercept = x86_intercept_##_i, .check_perm = (_p) }
#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
#define D2bv(_f) D((_f) | ByteOp), D(_f)
#define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p)
#define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e)
#define F2bv(_f, _e) F((_f) | ByteOp, _e), F(_f, _e)
#define I2bvIP(_f, _e, _i, _p) \
IIP((_f) | ByteOp, _e, _i, _p), IIP(_f, _e, _i, _p)
#define F6ALU(_f, _e) F2bv((_f) | DstMem | SrcReg | ModRM, _e), \
F2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
F2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
static const struct opcode group7_rm0[] = {
N,
I(SrcNone | Priv | EmulateOnUD, em_vmcall),
N, N, N, N, N, N,
};
static const struct opcode group7_rm1[] = {
DI(SrcNone | Priv, monitor),
DI(SrcNone | Priv, mwait),
N, N, N, N, N, N,
};
static const struct opcode group7_rm3[] = {
DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
II(SrcNone | Prot | EmulateOnUD, em_vmmcall, vmmcall),
DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
DIP(SrcNone | Prot | Priv, stgi, check_svme),
DIP(SrcNone | Prot | Priv, clgi, check_svme),
DIP(SrcNone | Prot | Priv, skinit, check_svme),
DIP(SrcNone | Prot | Priv, invlpga, check_svme),
};
static const struct opcode group7_rm7[] = {
N,
DIP(SrcNone, rdtscp, check_rdtsc),
N, N, N, N, N, N,
};
static const struct opcode group1[] = {
F(Lock, em_add),
F(Lock | PageTable, em_or),
F(Lock, em_adc),
F(Lock, em_sbb),
F(Lock | PageTable, em_and),
F(Lock, em_sub),
F(Lock, em_xor),
F(NoWrite, em_cmp),
};
static const struct opcode group1A[] = {
I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
};
static const struct opcode group2[] = {
F(DstMem | ModRM, em_rol),
F(DstMem | ModRM, em_ror),
F(DstMem | ModRM, em_rcl),
F(DstMem | ModRM, em_rcr),
F(DstMem | ModRM, em_shl),
F(DstMem | ModRM, em_shr),
F(DstMem | ModRM, em_shl),
F(DstMem | ModRM, em_sar),
};
static const struct opcode group3[] = {
F(DstMem | SrcImm | NoWrite, em_test),
F(DstMem | SrcImm | NoWrite, em_test),
F(DstMem | SrcNone | Lock, em_not),
F(DstMem | SrcNone | Lock, em_neg),
F(DstXacc | Src2Mem, em_mul_ex),
F(DstXacc | Src2Mem, em_imul_ex),
F(DstXacc | Src2Mem, em_div_ex),
F(DstXacc | Src2Mem, em_idiv_ex),
};
static const struct opcode group4[] = {
F(ByteOp | DstMem | SrcNone | Lock, em_inc),
F(ByteOp | DstMem | SrcNone | Lock, em_dec),
N, N, N, N, N, N,
};
static const struct opcode group5[] = {
F(DstMem | SrcNone | Lock, em_inc),
F(DstMem | SrcNone | Lock, em_dec),
I(SrcMem | Stack, em_grp45),
I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
I(SrcMem | Stack, em_grp45),
I(SrcMemFAddr | ImplicitOps, em_grp45),
I(SrcMem | Stack, em_grp45), D(Undefined),
};
static const struct opcode group6[] = {
DI(Prot, sldt),
DI(Prot, str),
II(Prot | Priv | SrcMem16, em_lldt, lldt),
II(Prot | Priv | SrcMem16, em_ltr, ltr),
N, N, N, N,
};
static const struct group_dual group7 = { {
II(Mov | DstMem, em_sgdt, sgdt),
II(Mov | DstMem, em_sidt, sidt),
II(SrcMem | Priv, em_lgdt, lgdt),
II(SrcMem | Priv, em_lidt, lidt),
II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
}, {
EXT(0, group7_rm0),
EXT(0, group7_rm1),
N, EXT(0, group7_rm3),
II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
EXT(0, group7_rm7),
} };
static const struct opcode group8[] = {
N, N, N, N,
F(DstMem | SrcImmByte | NoWrite, em_bt),
F(DstMem | SrcImmByte | Lock | PageTable, em_bts),
F(DstMem | SrcImmByte | Lock, em_btr),
F(DstMem | SrcImmByte | Lock | PageTable, em_btc),
};
static const struct group_dual group9 = { {
N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
}, {
N, N, N, N, N, N, N, N,
} };
static const struct opcode group11[] = {
I(DstMem | SrcImm | Mov | PageTable, em_mov),
X7(D(Undefined)),
};
static const struct gprefix pfx_0f_ae_7 = {
I(SrcMem | ByteOp, em_clflush), N, N, N,
};
static const struct group_dual group15 = { {
N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
}, {
N, N, N, N, N, N, N, N,
} };
static const struct gprefix pfx_0f_6f_0f_7f = {
I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
};
static const struct gprefix pfx_0f_2b = {
I(0, em_mov), I(0, em_mov), N, N,
};
static const struct gprefix pfx_0f_28_0f_29 = {
I(Aligned, em_mov), I(Aligned, em_mov), N, N,
};
static const struct gprefix pfx_0f_e7 = {
N, I(Sse, em_mov), N, N,
};
static const struct escape escape_d9 = { {
N, N, N, N, N, N, N, I(DstMem, em_fnstcw),
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xC8 - 0xCF */
N, N, N, N, N, N, N, N,
/* 0xD0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
N, N, N, N, N, N, N, N,
/* 0xE8 - 0xEF */
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xF7 */
N, N, N, N, N, N, N, N,
/* 0xF8 - 0xFF */
N, N, N, N, N, N, N, N,
} };
static const struct escape escape_db = { {
N, N, N, N, N, N, N, N,
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xC8 - 0xCF */
N, N, N, N, N, N, N, N,
/* 0xD0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
N, N, N, I(ImplicitOps, em_fninit), N, N, N, N,
/* 0xE8 - 0xEF */
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xF7 */
N, N, N, N, N, N, N, N,
/* 0xF8 - 0xFF */
N, N, N, N, N, N, N, N,
} };
static const struct escape escape_dd = { {
N, N, N, N, N, N, N, I(DstMem, em_fnstsw),
}, {
/* 0xC0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xC8 - 0xCF */
N, N, N, N, N, N, N, N,
/* 0xD0 - 0xC7 */
N, N, N, N, N, N, N, N,
/* 0xD8 - 0xDF */
N, N, N, N, N, N, N, N,
/* 0xE0 - 0xE7 */
N, N, N, N, N, N, N, N,
/* 0xE8 - 0xEF */
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xF7 */
N, N, N, N, N, N, N, N,
/* 0xF8 - 0xFF */
N, N, N, N, N, N, N, N,
} };
static const struct opcode opcode_table[256] = {
/* 0x00 - 0x07 */
F6ALU(Lock, em_add),
I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2ES, em_pop_sreg),
/* 0x08 - 0x0F */
F6ALU(Lock | PageTable, em_or),
I(ImplicitOps | Stack | No64 | Src2CS, em_push_sreg),
N,
/* 0x10 - 0x17 */
F6ALU(Lock, em_adc),
I(ImplicitOps | Stack | No64 | Src2SS, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2SS, em_pop_sreg),
/* 0x18 - 0x1F */
F6ALU(Lock, em_sbb),
I(ImplicitOps | Stack | No64 | Src2DS, em_push_sreg),
I(ImplicitOps | Stack | No64 | Src2DS, em_pop_sreg),
/* 0x20 - 0x27 */
F6ALU(Lock | PageTable, em_and), N, N,
/* 0x28 - 0x2F */
F6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das),
/* 0x30 - 0x37 */
F6ALU(Lock, em_xor), N, N,
/* 0x38 - 0x3F */
F6ALU(NoWrite, em_cmp), N, N,
/* 0x40 - 0x4F */
X8(F(DstReg, em_inc)), X8(F(DstReg, em_dec)),
/* 0x50 - 0x57 */
X8(I(SrcReg | Stack, em_push)),
/* 0x58 - 0x5F */
X8(I(DstReg | Stack, em_pop)),
/* 0x60 - 0x67 */
I(ImplicitOps | Stack | No64, em_pusha),
I(ImplicitOps | Stack | No64, em_popa),
N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ ,
N, N, N, N,
/* 0x68 - 0x6F */
I(SrcImm | Mov | Stack, em_push),
I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
I(SrcImmByte | Mov | Stack, em_push),
I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
/* 0x70 - 0x7F */
X16(D(SrcImmByte)),
/* 0x80 - 0x87 */
G(ByteOp | DstMem | SrcImm, group1),
G(DstMem | SrcImm, group1),
G(ByteOp | DstMem | SrcImm | No64, group1),
G(DstMem | SrcImmByte, group1),
F2bv(DstMem | SrcReg | ModRM | NoWrite, em_test),
I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
/* 0x88 - 0x8F */
I2bv(DstMem | SrcReg | ModRM | Mov | PageTable, em_mov),
I2bv(DstReg | SrcMem | ModRM | Mov, em_mov),
I(DstMem | SrcNone | ModRM | Mov | PageTable, em_mov_rm_sreg),
D(ModRM | SrcMem | NoAccess | DstReg),
I(ImplicitOps | SrcMem16 | ModRM, em_mov_sreg_rm),
G(0, group1A),
/* 0x90 - 0x97 */
DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)),
/* 0x98 - 0x9F */
D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
I(SrcImmFAddr | No64, em_call_far), N,
II(ImplicitOps | Stack, em_pushf, pushf),
II(ImplicitOps | Stack, em_popf, popf),
I(ImplicitOps, em_sahf), I(ImplicitOps, em_lahf),
/* 0xA0 - 0xA7 */
I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
I2bv(SrcSI | DstDI | Mov | String, em_mov),
F2bv(SrcSI | DstDI | String | NoWrite, em_cmp),
/* 0xA8 - 0xAF */
F2bv(DstAcc | SrcImm | NoWrite, em_test),
I2bv(SrcAcc | DstDI | Mov | String, em_mov),
I2bv(SrcSI | DstAcc | Mov | String, em_mov),
F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp),
/* 0xB0 - 0xB7 */
X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
/* 0xB8 - 0xBF */
X8(I(DstReg | SrcImm64 | Mov, em_mov)),
/* 0xC0 - 0xC7 */
G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm),
I(ImplicitOps | Stack, em_ret),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
G(ByteOp, group11), G(0, group11),
/* 0xC8 - 0xCF */
I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
I(ImplicitOps | Stack | SrcImmU16, em_ret_far_imm),
I(ImplicitOps | Stack, em_ret_far),
D(ImplicitOps), DI(SrcImmByte, intn),
D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
/* 0xD0 - 0xD7 */
G(Src2One | ByteOp, group2), G(Src2One, group2),
G(Src2CL | ByteOp, group2), G(Src2CL, group2),
I(DstAcc | SrcImmUByte | No64, em_aam),
I(DstAcc | SrcImmUByte | No64, em_aad),
F(DstAcc | ByteOp | No64, em_salc),
I(DstAcc | SrcXLat | ByteOp, em_mov),
/* 0xD8 - 0xDF */
N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
/* 0xE0 - 0xE7 */
X3(I(SrcImmByte, em_loop)),
I(SrcImmByte, em_jcxz),
I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
/* 0xE8 - 0xEF */
I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps),
I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps),
I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
/* 0xF0 - 0xF7 */
N, DI(ImplicitOps, icebp), N, N,
DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
G(ByteOp, group3), G(0, group3),
/* 0xF8 - 0xFF */
D(ImplicitOps), D(ImplicitOps),
I(ImplicitOps, em_cli), I(ImplicitOps, em_sti),
D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
};
static const struct opcode twobyte_table[256] = {
/* 0x00 - 0x0F */
G(0, group6), GD(0, &group7), N, N,
N, I(ImplicitOps | EmulateOnUD, em_syscall),
II(ImplicitOps | Priv, em_clts, clts), N,
DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
/* 0x10 - 0x1F */
N, N, N, N, N, N, N, N,
D(ImplicitOps | ModRM | SrcMem | NoAccess),
N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
/* 0x20 - 0x2F */
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_cr_write, cr_write,
check_cr_write),
IIP(ModRM | SrcMem | Priv | Op3264 | NoMod, em_dr_write, dr_write,
check_dr_write),
N, N, N, N,
GP(ModRM | DstReg | SrcMem | Mov | Sse, &pfx_0f_28_0f_29),
GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_28_0f_29),
N, GP(ModRM | DstMem | SrcReg | Mov | Sse, &pfx_0f_2b),
N, N, N, N,
/* 0x30 - 0x3F */
II(ImplicitOps | Priv, em_wrmsr, wrmsr),
IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
II(ImplicitOps | Priv, em_rdmsr, rdmsr),
IIP(ImplicitOps, em_rdpmc, rdpmc, check_rdpmc),
I(ImplicitOps | EmulateOnUD, em_sysenter),
I(ImplicitOps | Priv | EmulateOnUD, em_sysexit),
N, N,
N, N, N, N, N, N, N, N,
/* 0x40 - 0x4F */
X16(D(DstReg | SrcMem | ModRM)),
/* 0x50 - 0x5F */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
/* 0x60 - 0x6F */
N, N, N, N,
N, N, N, N,
N, N, N, N,
N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f),
/* 0x70 - 0x7F */
N, N, N, N,
N, N, N, N,
N, N, N, N,
N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
/* 0x80 - 0x8F */
X16(D(SrcImm)),
/* 0x90 - 0x9F */
X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
/* 0xA0 - 0xA7 */
I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
II(ImplicitOps, em_cpuid, cpuid),
F(DstMem | SrcReg | ModRM | BitOp | NoWrite, em_bt),
F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shld),
F(DstMem | SrcReg | Src2CL | ModRM, em_shld), N, N,
/* 0xA8 - 0xAF */
I(Stack | Src2GS, em_push_sreg), I(Stack | Src2GS, em_pop_sreg),
DI(ImplicitOps, rsm),
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
/* 0xB0 - 0xB7 */
I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
F(DstMem | SrcReg | ModRM | BitOp | Lock, em_btr),
I(DstReg | SrcMemFAddr | ModRM | Src2FS, em_lseg),
I(DstReg | SrcMemFAddr | ModRM | Src2GS, em_lseg),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xB8 - 0xBF */
N, N,
G(BitOp, group8),
F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
F(DstReg | SrcMem | ModRM, em_bsf), F(DstReg | SrcMem | ModRM, em_bsr),
D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
/* 0xC0 - 0xC7 */
F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
N, D(DstMem | SrcReg | ModRM | Mov),
N, N, N, GD(0, &group9),
/* 0xC8 - 0xCF */
X8(I(DstReg, em_bswap)),
/* 0xD0 - 0xDF */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
/* 0xE0 - 0xEF */
N, N, N, N, N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_e7),
N, N, N, N, N, N, N, N,
/* 0xF0 - 0xFF */
N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
};
static const struct gprefix three_byte_0f_38_f0 = {
I(DstReg | SrcMem | Mov, em_movbe), N, N, N
};
static const struct gprefix three_byte_0f_38_f1 = {
I(DstMem | SrcReg | Mov, em_movbe), N, N, N
};
/*
* Insns below are selected by the prefix which indexed by the third opcode
* byte.
*/
static const struct opcode opcode_map_0f_38[256] = {
/* 0x00 - 0x7f */
X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
/* 0x80 - 0xef */
X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
/* 0xf0 - 0xf1 */
GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0),
GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1),
/* 0xf2 - 0xff */
N, N, X4(N), X8(N)
};
#undef D
#undef N
#undef G
#undef GD
#undef I
#undef GP
#undef EXT
#undef D2bv
#undef D2bvIP
#undef I2bv
#undef I2bvIP
#undef I6ALU
static unsigned imm_size(struct x86_emulate_ctxt *ctxt)
{
unsigned size;
size = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
if (size == 8)
size = 4;
return size;
}
static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
unsigned size, bool sign_extension)
{
int rc = X86EMUL_CONTINUE;
op->type = OP_IMM;
op->bytes = size;
op->addr.mem.ea = ctxt->_eip;
/* NB. Immediates are sign-extended as necessary. */
switch (op->bytes) {
case 1:
op->val = insn_fetch(s8, ctxt);
break;
case 2:
op->val = insn_fetch(s16, ctxt);
break;
case 4:
op->val = insn_fetch(s32, ctxt);
break;
case 8:
op->val = insn_fetch(s64, ctxt);
break;
}
if (!sign_extension) {
switch (op->bytes) {
case 1:
op->val &= 0xff;
break;
case 2:
op->val &= 0xffff;
break;
case 4:
op->val &= 0xffffffff;
break;
}
}
done:
return rc;
}
static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
unsigned d)
{
int rc = X86EMUL_CONTINUE;
switch (d) {
case OpReg:
decode_register_operand(ctxt, op);
break;
case OpImmUByte:
rc = decode_imm(ctxt, op, 1, false);
break;
case OpMem:
ctxt->memop.bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
mem_common:
*op = ctxt->memop;
ctxt->memopp = op;
if (ctxt->d & BitOp)
fetch_bit_operand(ctxt);
op->orig_val = op->val;
break;
case OpMem64:
ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
goto mem_common;
case OpAcc:
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpAccLo:
op->type = OP_REG;
op->bytes = (ctxt->d & ByteOp) ? 2 : ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpAccHi:
if (ctxt->d & ByteOp) {
op->type = OP_NONE;
break;
}
op->type = OP_REG;
op->bytes = ctxt->op_bytes;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
fetch_register_operand(op);
op->orig_val = op->val;
break;
case OpDI:
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
op->addr.mem.seg = VCPU_SREG_ES;
op->val = 0;
op->count = 1;
break;
case OpDX:
op->type = OP_REG;
op->bytes = 2;
op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
fetch_register_operand(op);
break;
case OpCL:
op->bytes = 1;
op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
break;
case OpImmByte:
rc = decode_imm(ctxt, op, 1, true);
break;
case OpOne:
op->bytes = 1;
op->val = 1;
break;
case OpImm:
rc = decode_imm(ctxt, op, imm_size(ctxt), true);
break;
case OpImm64:
rc = decode_imm(ctxt, op, ctxt->op_bytes, true);
break;
case OpMem8:
ctxt->memop.bytes = 1;
if (ctxt->memop.type == OP_REG) {
ctxt->memop.addr.reg = decode_register(ctxt,
ctxt->modrm_rm, true);
fetch_register_operand(&ctxt->memop);
}
goto mem_common;
case OpMem16:
ctxt->memop.bytes = 2;
goto mem_common;
case OpMem32:
ctxt->memop.bytes = 4;
goto mem_common;
case OpImmU16:
rc = decode_imm(ctxt, op, 2, false);
break;
case OpImmU:
rc = decode_imm(ctxt, op, imm_size(ctxt), false);
break;
case OpSI:
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
op->addr.mem.seg = ctxt->seg_override;
op->val = 0;
op->count = 1;
break;
case OpXLat:
op->type = OP_MEM;
op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
op->addr.mem.ea =
register_address(ctxt,
reg_read(ctxt, VCPU_REGS_RBX) +
(reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
op->addr.mem.seg = ctxt->seg_override;
op->val = 0;
break;
case OpImmFAddr:
op->type = OP_IMM;
op->addr.mem.ea = ctxt->_eip;
op->bytes = ctxt->op_bytes + 2;
insn_fetch_arr(op->valptr, op->bytes, ctxt);
break;
case OpMemFAddr:
ctxt->memop.bytes = ctxt->op_bytes + 2;
goto mem_common;
case OpES:
op->val = VCPU_SREG_ES;
break;
case OpCS:
op->val = VCPU_SREG_CS;
break;
case OpSS:
op->val = VCPU_SREG_SS;
break;
case OpDS:
op->val = VCPU_SREG_DS;
break;
case OpFS:
op->val = VCPU_SREG_FS;
break;
case OpGS:
op->val = VCPU_SREG_GS;
break;
case OpImplicit:
/* Special instructions do their own operand decoding. */
default:
op->type = OP_NONE; /* Disable writeback. */
break;
}
done:
return rc;
}
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
bool op_prefix = false;
bool has_seg_override = false;
struct opcode opcode;
ctxt->memop.type = OP_NONE;
ctxt->memopp = NULL;
ctxt->_eip = ctxt->eip;
ctxt->fetch.ptr = ctxt->fetch.data;
ctxt->fetch.end = ctxt->fetch.data + insn_len;
ctxt->opcode_len = 1;
if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len);
else {
rc = __do_insn_fetch_bytes(ctxt, 1);
if (rc != X86EMUL_CONTINUE)
return rc;
}
switch (mode) {
case X86EMUL_MODE_REAL:
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
case X86EMUL_MODE_PROT32:
def_op_bytes = def_ad_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86EMUL_MODE_PROT64:
def_op_bytes = 4;
def_ad_bytes = 8;
break;
#endif
default:
return EMULATION_FAILED;
}
ctxt->op_bytes = def_op_bytes;
ctxt->ad_bytes = def_ad_bytes;
/* Legacy prefixes. */
for (;;) {
switch (ctxt->b = insn_fetch(u8, ctxt)) {
case 0x66: /* operand-size override */
op_prefix = true;
/* switch between 2/4 bytes */
ctxt->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
ctxt->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
has_seg_override = true;
ctxt->seg_override = (ctxt->b >> 3) & 3;
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
has_seg_override = true;
ctxt->seg_override = ctxt->b & 7;
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
ctxt->rex_prefix = ctxt->b;
continue;
case 0xf0: /* LOCK */
ctxt->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
case 0xf3: /* REP/REPE/REPZ */
ctxt->rep_prefix = ctxt->b;
break;
default:
goto done_prefixes;
}
/* Any legacy prefix after a REX prefix nullifies its effect. */
ctxt->rex_prefix = 0;
}
done_prefixes:
/* REX prefix. */
if (ctxt->rex_prefix & 8)
ctxt->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */
opcode = opcode_table[ctxt->b];
/* Two-byte opcode? */
if (ctxt->b == 0x0f) {
ctxt->opcode_len = 2;
ctxt->b = insn_fetch(u8, ctxt);
opcode = twobyte_table[ctxt->b];
/* 0F_38 opcode map */
if (ctxt->b == 0x38) {
ctxt->opcode_len = 3;
ctxt->b = insn_fetch(u8, ctxt);
opcode = opcode_map_0f_38[ctxt->b];
}
}
ctxt->d = opcode.flags;
if (ctxt->d & ModRM)
ctxt->modrm = insn_fetch(u8, ctxt);
/* vex-prefix instructions are not implemented */
if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
(mode == X86EMUL_MODE_PROT64 ||
(mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
ctxt->d = NotImpl;
}
while (ctxt->d & GroupMask) {
switch (ctxt->d & GroupMask) {
case Group:
goffset = (ctxt->modrm >> 3) & 7;
opcode = opcode.u.group[goffset];
break;
case GroupDual:
goffset = (ctxt->modrm >> 3) & 7;
if ((ctxt->modrm >> 6) == 3)
opcode = opcode.u.gdual->mod3[goffset];
else
opcode = opcode.u.gdual->mod012[goffset];
break;
case RMExt:
goffset = ctxt->modrm & 7;
opcode = opcode.u.group[goffset];
break;
case Prefix:
if (ctxt->rep_prefix && op_prefix)
return EMULATION_FAILED;
simd_prefix = op_prefix ? 0x66 : ctxt->rep_prefix;
switch (simd_prefix) {
case 0x00: opcode = opcode.u.gprefix->pfx_no; break;
case 0x66: opcode = opcode.u.gprefix->pfx_66; break;
case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break;
case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break;
}
break;
case Escape:
if (ctxt->modrm > 0xbf)
opcode = opcode.u.esc->high[ctxt->modrm - 0xc0];
else
opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
break;
default:
return EMULATION_FAILED;
}
ctxt->d &= ~(u64)GroupMask;
ctxt->d |= opcode.flags;
}
/* Unrecognised? */
if (ctxt->d == 0)
return EMULATION_FAILED;
ctxt->execute = opcode.u.execute;
if (unlikely(ctxt->ud) && likely(!(ctxt->d & EmulateOnUD)))
return EMULATION_FAILED;
if (unlikely(ctxt->d &
(NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
/*
* These are copied unconditionally here, and checked unconditionally
* in x86_emulate_insn.
*/
ctxt->check_perm = opcode.check_perm;
ctxt->intercept = opcode.intercept;
if (ctxt->d & NotImpl)
return EMULATION_FAILED;
if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
ctxt->op_bytes = 8;
if (ctxt->d & Op3264) {
if (mode == X86EMUL_MODE_PROT64)
ctxt->op_bytes = 8;
else
ctxt->op_bytes = 4;
}
if (ctxt->d & Sse)
ctxt->op_bytes = 16;
else if (ctxt->d & Mmx)
ctxt->op_bytes = 8;
}
/* ModRM and SIB bytes. */
if (ctxt->d & ModRM) {
rc = decode_modrm(ctxt, &ctxt->memop);
if (!has_seg_override) {
has_seg_override = true;
ctxt->seg_override = ctxt->modrm_seg;
}
} else if (ctxt->d & MemAbs)
rc = decode_abs(ctxt, &ctxt->memop);
if (rc != X86EMUL_CONTINUE)
goto done;
if (!has_seg_override)
ctxt->seg_override = VCPU_SREG_DS;
ctxt->memop.addr.mem.seg = ctxt->seg_override;
/*
* Decode and fetch the source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src, (ctxt->d >> SrcShift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Decode and fetch the second source operand: register, memory
* or immediate.
*/
rc = decode_operand(ctxt, &ctxt->src2, (ctxt->d >> Src2Shift) & OpMask);
if (rc != X86EMUL_CONTINUE)
goto done;
/* Decode and fetch the destination operand: register or memory. */
rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
if (ctxt->rip_relative)
ctxt->memopp->addr.mem.ea += ctxt->_eip;
done:
return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
}
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt)
{
return ctxt->d & PageTable;
}
static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
{
/* The second termination condition only applies for REPE
* and REPNE. Test if the repeat string operation prefix is
* REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
* corresponding termination condition according to:
* - if REPE/REPZ and ZF = 0 then done
* - if REPNE/REPNZ and ZF = 1 then done
*/
if (((ctxt->b == 0xa6) || (ctxt->b == 0xa7) ||
(ctxt->b == 0xae) || (ctxt->b == 0xaf))
&& (((ctxt->rep_prefix == REPE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == 0))
|| ((ctxt->rep_prefix == REPNE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == EFLG_ZF))))
return true;
return false;
}
static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
{
bool fault = false;
ctxt->ops->get_fpu(ctxt);
asm volatile("1: fwait \n\t"
"2: \n\t"
".pushsection .fixup,\"ax\" \n\t"
"3: \n\t"
"movb $1, %[fault] \n\t"
"jmp 2b \n\t"
".popsection \n\t"
_ASM_EXTABLE(1b, 3b)
: [fault]"+qm"(fault));
ctxt->ops->put_fpu(ctxt);
if (unlikely(fault))
return emulate_exception(ctxt, MF_VECTOR, 0, false);
return X86EMUL_CONTINUE;
}
static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
struct operand *op)
{
if (op->type == OP_MM)
read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
}
static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
{
ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
if (!(ctxt->d & ByteOp))
fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
asm("push %[flags]; popf; call *%[fastop]; pushf; pop %[flags]\n"
: "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
[fastop]"+S"(fop)
: "c"(ctxt->src2.val));
ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
if (!fop) /* exception is returned in fop variable */
return emulate_de(ctxt);
return X86EMUL_CONTINUE;
}
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
{
memset(&ctxt->rip_relative, 0,
(void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
ctxt->io_read.pos = 0;
ctxt->io_read.end = 0;
ctxt->mem_read.end = 0;
}
int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
{
const struct x86_emulate_ops *ops = ctxt->ops;
int rc = X86EMUL_CONTINUE;
int saved_dst_type = ctxt->dst.type;
ctxt->mem_read.pos = 0;
/* LOCK prefix is allowed only with some instructions */
if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
rc = emulate_ud(ctxt);
goto done;
}
if ((ctxt->d & SrcMask) == SrcMemFAddr && ctxt->src.type != OP_MEM) {
rc = emulate_ud(ctxt);
goto done;
}
if (unlikely(ctxt->d &
(No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
(ctxt->d & Undefined)) {
rc = emulate_ud(ctxt);
goto done;
}
if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
|| ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
rc = emulate_ud(ctxt);
goto done;
}
if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
rc = emulate_nm(ctxt);
goto done;
}
if (ctxt->d & Mmx) {
rc = flush_pending_x87_faults(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
/*
* Now that we know the fpu is exception safe, we can fetch
* operands from it.
*/
fetch_possible_mmx_operand(ctxt, &ctxt->src);
fetch_possible_mmx_operand(ctxt, &ctxt->src2);
if (!(ctxt->d & Mov))
fetch_possible_mmx_operand(ctxt, &ctxt->dst);
}
if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
/* Privileged instruction can be executed only in CPL=0 */
if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
if (ctxt->d & PrivUD)
rc = emulate_ud(ctxt);
else
rc = emulate_gp(ctxt, 0);
goto done;
}
/* Instruction can only be executed in protected mode */
if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
rc = emulate_ud(ctxt);
goto done;
}
/* Do instruction specific permission checks */
if (ctxt->d & CheckPerm) {
rc = ctxt->check_perm(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (ctxt->rep_prefix && (ctxt->d & String)) {
/* All REP prefixes have the same first termination condition */
if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
ctxt->eip = ctxt->_eip;
ctxt->eflags &= ~EFLG_RF;
goto done;
}
}
}
if ((ctxt->src.type == OP_MEM) && !(ctxt->d & NoAccess)) {
rc = segmented_read(ctxt, ctxt->src.addr.mem,
ctxt->src.valptr, ctxt->src.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
ctxt->src.orig_val64 = ctxt->src.val64;
}
if (ctxt->src2.type == OP_MEM) {
rc = segmented_read(ctxt, ctxt->src2.addr.mem,
&ctxt->src2.val, ctxt->src2.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if ((ctxt->d & DstMask) == ImplicitOps)
goto special_insn;
if ((ctxt->dst.type == OP_MEM) && !(ctxt->d & Mov)) {
/* optimisation - avoid slow emulated read if Mov */
rc = segmented_read(ctxt, ctxt->dst.addr.mem,
&ctxt->dst.val, ctxt->dst.bytes);
if (rc != X86EMUL_CONTINUE)
goto done;
}
ctxt->dst.orig_val = ctxt->dst.val;
special_insn:
if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (ctxt->rep_prefix && (ctxt->d & String))
ctxt->eflags |= EFLG_RF;
else
ctxt->eflags &= ~EFLG_RF;
if (ctxt->execute) {
if (ctxt->d & Fastop) {
void (*fop)(struct fastop *) = (void *)ctxt->execute;
rc = fastop(ctxt, fop);
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
}
rc = ctxt->execute(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
}
if (ctxt->opcode_len == 2)
goto twobyte_insn;
else if (ctxt->opcode_len == 3)
goto threebyte_insn;
switch (ctxt->b) {
case 0x63: /* movsxd */
if (ctxt->mode != X86EMUL_MODE_PROT64)
goto cannot_emulate;
ctxt->dst.val = (s32) ctxt->src.val;
break;
case 0x70 ... 0x7f: /* jcc (short) */
if (test_cc(ctxt->b, ctxt->eflags))
rc = jmp_rel(ctxt, ctxt->src.val);
break;
case 0x8d: /* lea r16/r32, m */
ctxt->dst.val = ctxt->src.addr.mem.ea;
break;
case 0x90 ... 0x97: /* nop / xchg reg, rax */
if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
ctxt->dst.type = OP_NONE;
else
rc = em_xchg(ctxt);
break;
case 0x98: /* cbw/cwde/cdqe */
switch (ctxt->op_bytes) {
case 2: ctxt->dst.val = (s8)ctxt->dst.val; break;
case 4: ctxt->dst.val = (s16)ctxt->dst.val; break;
case 8: ctxt->dst.val = (s32)ctxt->dst.val; break;
}
break;
case 0xcc: /* int3 */
rc = emulate_int(ctxt, 3);
break;
case 0xcd: /* int n */
rc = emulate_int(ctxt, ctxt->src.val);
break;
case 0xce: /* into */
if (ctxt->eflags & EFLG_OF)
rc = emulate_int(ctxt, 4);
break;
case 0xe9: /* jmp rel */
case 0xeb: /* jmp rel short */
rc = jmp_rel(ctxt, ctxt->src.val);
ctxt->dst.type = OP_NONE; /* Disable writeback. */
break;
case 0xf4: /* hlt */
ctxt->ops->halt(ctxt);
break;
case 0xf5: /* cmc */
/* complement carry flag from eflags reg */
ctxt->eflags ^= EFLG_CF;
break;
case 0xf8: /* clc */
ctxt->eflags &= ~EFLG_CF;
break;
case 0xf9: /* stc */
ctxt->eflags |= EFLG_CF;
break;
case 0xfc: /* cld */
ctxt->eflags &= ~EFLG_DF;
break;
case 0xfd: /* std */
ctxt->eflags |= EFLG_DF;
break;
default:
goto cannot_emulate;
}
if (rc != X86EMUL_CONTINUE)
goto done;
writeback:
if (ctxt->d & SrcWrite) {
BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
rc = writeback(ctxt, &ctxt->src);
if (rc != X86EMUL_CONTINUE)
goto done;
}
if (!(ctxt->d & NoWrite)) {
rc = writeback(ctxt, &ctxt->dst);
if (rc != X86EMUL_CONTINUE)
goto done;
}
/*
* restore dst type in case the decoding will be reused
* (happens for string instruction )
*/
ctxt->dst.type = saved_dst_type;
if ((ctxt->d & SrcMask) == SrcSI)
string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
if ((ctxt->d & DstMask) == DstDI)
string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
if (ctxt->rep_prefix && (ctxt->d & String)) {
unsigned int count;
struct read_cache *r = &ctxt->io_read;
if ((ctxt->d & SrcMask) == SrcSI)
count = ctxt->src.count;
else
count = ctxt->dst.count;
register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
-count);
if (!string_insn_completed(ctxt)) {
/*
* Re-enter guest when pio read ahead buffer is empty
* or, if it is not used, after each 1024 iteration.
*/
if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
(r->end == 0 || r->end != r->pos)) {
/*
* Reset read cache. Usually happens before
* decode, but since instruction is restarted
* we have to do it here.
*/
ctxt->mem_read.end = 0;
writeback_registers(ctxt);
return EMULATION_RESTART;
}
goto done; /* skip rip writeback */
}
ctxt->eflags &= ~EFLG_RF;
}
ctxt->eip = ctxt->_eip;
done:
if (rc == X86EMUL_PROPAGATE_FAULT) {
WARN_ON(ctxt->exception.vector > 0x1f);
ctxt->have_exception = true;
}
if (rc == X86EMUL_INTERCEPTED)
return EMULATION_INTERCEPTED;
if (rc == X86EMUL_CONTINUE)
writeback_registers(ctxt);
return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
twobyte_insn:
switch (ctxt->b) {
case 0x09: /* wbinvd */
(ctxt->ops->wbinvd)(ctxt);
break;
case 0x08: /* invd */
case 0x0d: /* GrpP (prefetch) */
case 0x18: /* Grp16 (prefetch/nop) */
case 0x1f: /* nop */
break;
case 0x20: /* mov cr, reg */
ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg);
break;
case 0x21: /* mov from dr to reg */
ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
break;
case 0x40 ... 0x4f: /* cmov */
if (test_cc(ctxt->b, ctxt->eflags))
ctxt->dst.val = ctxt->src.val;
else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
ctxt->op_bytes != 4)
ctxt->dst.type = OP_NONE; /* no writeback */
break;
case 0x80 ... 0x8f: /* jnz rel, etc*/
if (test_cc(ctxt->b, ctxt->eflags))
rc = jmp_rel(ctxt, ctxt->src.val);
break;
case 0x90 ... 0x9f: /* setcc r/m8 */
ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
break;
case 0xb6 ... 0xb7: /* movzx */
ctxt->dst.bytes = ctxt->op_bytes;
ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
: (u16) ctxt->src.val;
break;
case 0xbe ... 0xbf: /* movsx */
ctxt->dst.bytes = ctxt->op_bytes;
ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
(s16) ctxt->src.val;
break;
case 0xc3: /* movnti */
ctxt->dst.bytes = ctxt->op_bytes;
ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
(u32) ctxt->src.val;
break;
default:
goto cannot_emulate;
}
threebyte_insn:
if (rc != X86EMUL_CONTINUE)
goto done;
goto writeback;
cannot_emulate:
return EMULATION_FAILED;
}
void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
{
invalidate_registers(ctxt);
}
void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
{
writeback_registers(ctxt);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_2321_0 |
crossvul-cpp_data_bad_5671_0 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Routing netlink socket interface: protocol independent part.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Fixes:
* Vitaly E. Lavrov RTA_OK arithmetics was wrong.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/capability.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/security.h>
#include <linux/mutex.h>
#include <linux/if_addr.h>
#include <linux/if_bridge.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <asm/uaccess.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/arp.h>
#include <net/route.h>
#include <net/udp.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
#include <net/fib_rules.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
struct rtnl_link {
rtnl_doit_func doit;
rtnl_dumpit_func dumpit;
rtnl_calcit_func calcit;
};
static DEFINE_MUTEX(rtnl_mutex);
void rtnl_lock(void)
{
mutex_lock(&rtnl_mutex);
}
EXPORT_SYMBOL(rtnl_lock);
void __rtnl_unlock(void)
{
mutex_unlock(&rtnl_mutex);
}
void rtnl_unlock(void)
{
/* This fellow will unlock it for us. */
netdev_run_todo();
}
EXPORT_SYMBOL(rtnl_unlock);
int rtnl_trylock(void)
{
return mutex_trylock(&rtnl_mutex);
}
EXPORT_SYMBOL(rtnl_trylock);
int rtnl_is_locked(void)
{
return mutex_is_locked(&rtnl_mutex);
}
EXPORT_SYMBOL(rtnl_is_locked);
#ifdef CONFIG_PROVE_LOCKING
int lockdep_rtnl_is_held(void)
{
return lockdep_is_held(&rtnl_mutex);
}
EXPORT_SYMBOL(lockdep_rtnl_is_held);
#endif /* #ifdef CONFIG_PROVE_LOCKING */
static struct rtnl_link *rtnl_msg_handlers[RTNL_FAMILY_MAX + 1];
static inline int rtm_msgindex(int msgtype)
{
int msgindex = msgtype - RTM_BASE;
/*
* msgindex < 0 implies someone tried to register a netlink
* control code. msgindex >= RTM_NR_MSGTYPES may indicate that
* the message type has not been added to linux/rtnetlink.h
*/
BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
return msgindex;
}
static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
{
struct rtnl_link *tab;
if (protocol <= RTNL_FAMILY_MAX)
tab = rtnl_msg_handlers[protocol];
else
tab = NULL;
if (tab == NULL || tab[msgindex].doit == NULL)
tab = rtnl_msg_handlers[PF_UNSPEC];
return tab[msgindex].doit;
}
static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
{
struct rtnl_link *tab;
if (protocol <= RTNL_FAMILY_MAX)
tab = rtnl_msg_handlers[protocol];
else
tab = NULL;
if (tab == NULL || tab[msgindex].dumpit == NULL)
tab = rtnl_msg_handlers[PF_UNSPEC];
return tab[msgindex].dumpit;
}
static rtnl_calcit_func rtnl_get_calcit(int protocol, int msgindex)
{
struct rtnl_link *tab;
if (protocol <= RTNL_FAMILY_MAX)
tab = rtnl_msg_handlers[protocol];
else
tab = NULL;
if (tab == NULL || tab[msgindex].calcit == NULL)
tab = rtnl_msg_handlers[PF_UNSPEC];
return tab[msgindex].calcit;
}
/**
* __rtnl_register - Register a rtnetlink message type
* @protocol: Protocol family or PF_UNSPEC
* @msgtype: rtnetlink message type
* @doit: Function pointer called for each request message
* @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
* @calcit: Function pointer to calc size of dump message
*
* Registers the specified function pointers (at least one of them has
* to be non-NULL) to be called whenever a request message for the
* specified protocol family and message type is received.
*
* The special protocol family PF_UNSPEC may be used to define fallback
* function pointers for the case when no entry for the specific protocol
* family exists.
*
* Returns 0 on success or a negative error code.
*/
int __rtnl_register(int protocol, int msgtype,
rtnl_doit_func doit, rtnl_dumpit_func dumpit,
rtnl_calcit_func calcit)
{
struct rtnl_link *tab;
int msgindex;
BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
msgindex = rtm_msgindex(msgtype);
tab = rtnl_msg_handlers[protocol];
if (tab == NULL) {
tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
if (tab == NULL)
return -ENOBUFS;
rtnl_msg_handlers[protocol] = tab;
}
if (doit)
tab[msgindex].doit = doit;
if (dumpit)
tab[msgindex].dumpit = dumpit;
if (calcit)
tab[msgindex].calcit = calcit;
return 0;
}
EXPORT_SYMBOL_GPL(__rtnl_register);
/**
* rtnl_register - Register a rtnetlink message type
*
* Identical to __rtnl_register() but panics on failure. This is useful
* as failure of this function is very unlikely, it can only happen due
* to lack of memory when allocating the chain to store all message
* handlers for a protocol. Meant for use in init functions where lack
* of memory implies no sense in continuing.
*/
void rtnl_register(int protocol, int msgtype,
rtnl_doit_func doit, rtnl_dumpit_func dumpit,
rtnl_calcit_func calcit)
{
if (__rtnl_register(protocol, msgtype, doit, dumpit, calcit) < 0)
panic("Unable to register rtnetlink message handler, "
"protocol = %d, message type = %d\n",
protocol, msgtype);
}
EXPORT_SYMBOL_GPL(rtnl_register);
/**
* rtnl_unregister - Unregister a rtnetlink message type
* @protocol: Protocol family or PF_UNSPEC
* @msgtype: rtnetlink message type
*
* Returns 0 on success or a negative error code.
*/
int rtnl_unregister(int protocol, int msgtype)
{
int msgindex;
BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
msgindex = rtm_msgindex(msgtype);
if (rtnl_msg_handlers[protocol] == NULL)
return -ENOENT;
rtnl_msg_handlers[protocol][msgindex].doit = NULL;
rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(rtnl_unregister);
/**
* rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
* @protocol : Protocol family or PF_UNSPEC
*
* Identical to calling rtnl_unregster() for all registered message types
* of a certain protocol family.
*/
void rtnl_unregister_all(int protocol)
{
BUG_ON(protocol < 0 || protocol > RTNL_FAMILY_MAX);
kfree(rtnl_msg_handlers[protocol]);
rtnl_msg_handlers[protocol] = NULL;
}
EXPORT_SYMBOL_GPL(rtnl_unregister_all);
static LIST_HEAD(link_ops);
static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
{
const struct rtnl_link_ops *ops;
list_for_each_entry(ops, &link_ops, list) {
if (!strcmp(ops->kind, kind))
return ops;
}
return NULL;
}
/**
* __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
* @ops: struct rtnl_link_ops * to register
*
* The caller must hold the rtnl_mutex. This function should be used
* by drivers that create devices during module initialization. It
* must be called before registering the devices.
*
* Returns 0 on success or a negative error code.
*/
int __rtnl_link_register(struct rtnl_link_ops *ops)
{
if (rtnl_link_ops_get(ops->kind))
return -EEXIST;
if (!ops->dellink)
ops->dellink = unregister_netdevice_queue;
list_add_tail(&ops->list, &link_ops);
return 0;
}
EXPORT_SYMBOL_GPL(__rtnl_link_register);
/**
* rtnl_link_register - Register rtnl_link_ops with rtnetlink.
* @ops: struct rtnl_link_ops * to register
*
* Returns 0 on success or a negative error code.
*/
int rtnl_link_register(struct rtnl_link_ops *ops)
{
int err;
rtnl_lock();
err = __rtnl_link_register(ops);
rtnl_unlock();
return err;
}
EXPORT_SYMBOL_GPL(rtnl_link_register);
static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
{
struct net_device *dev;
LIST_HEAD(list_kill);
for_each_netdev(net, dev) {
if (dev->rtnl_link_ops == ops)
ops->dellink(dev, &list_kill);
}
unregister_netdevice_many(&list_kill);
}
/**
* __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
* @ops: struct rtnl_link_ops * to unregister
*
* The caller must hold the rtnl_mutex.
*/
void __rtnl_link_unregister(struct rtnl_link_ops *ops)
{
struct net *net;
for_each_net(net) {
__rtnl_kill_links(net, ops);
}
list_del(&ops->list);
}
EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
/**
* rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
* @ops: struct rtnl_link_ops * to unregister
*/
void rtnl_link_unregister(struct rtnl_link_ops *ops)
{
rtnl_lock();
__rtnl_link_unregister(ops);
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(rtnl_link_unregister);
static size_t rtnl_link_get_size(const struct net_device *dev)
{
const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
size_t size;
if (!ops)
return 0;
size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
if (ops->get_size)
/* IFLA_INFO_DATA + nested data */
size += nla_total_size(sizeof(struct nlattr)) +
ops->get_size(dev);
if (ops->get_xstats_size)
/* IFLA_INFO_XSTATS */
size += nla_total_size(ops->get_xstats_size(dev));
return size;
}
static LIST_HEAD(rtnl_af_ops);
static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
{
const struct rtnl_af_ops *ops;
list_for_each_entry(ops, &rtnl_af_ops, list) {
if (ops->family == family)
return ops;
}
return NULL;
}
/**
* __rtnl_af_register - Register rtnl_af_ops with rtnetlink.
* @ops: struct rtnl_af_ops * to register
*
* The caller must hold the rtnl_mutex.
*
* Returns 0 on success or a negative error code.
*/
int __rtnl_af_register(struct rtnl_af_ops *ops)
{
list_add_tail(&ops->list, &rtnl_af_ops);
return 0;
}
EXPORT_SYMBOL_GPL(__rtnl_af_register);
/**
* rtnl_af_register - Register rtnl_af_ops with rtnetlink.
* @ops: struct rtnl_af_ops * to register
*
* Returns 0 on success or a negative error code.
*/
int rtnl_af_register(struct rtnl_af_ops *ops)
{
int err;
rtnl_lock();
err = __rtnl_af_register(ops);
rtnl_unlock();
return err;
}
EXPORT_SYMBOL_GPL(rtnl_af_register);
/**
* __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
* @ops: struct rtnl_af_ops * to unregister
*
* The caller must hold the rtnl_mutex.
*/
void __rtnl_af_unregister(struct rtnl_af_ops *ops)
{
list_del(&ops->list);
}
EXPORT_SYMBOL_GPL(__rtnl_af_unregister);
/**
* rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
* @ops: struct rtnl_af_ops * to unregister
*/
void rtnl_af_unregister(struct rtnl_af_ops *ops)
{
rtnl_lock();
__rtnl_af_unregister(ops);
rtnl_unlock();
}
EXPORT_SYMBOL_GPL(rtnl_af_unregister);
static size_t rtnl_link_get_af_size(const struct net_device *dev)
{
struct rtnl_af_ops *af_ops;
size_t size;
/* IFLA_AF_SPEC */
size = nla_total_size(sizeof(struct nlattr));
list_for_each_entry(af_ops, &rtnl_af_ops, list) {
if (af_ops->get_link_af_size) {
/* AF_* + nested data */
size += nla_total_size(sizeof(struct nlattr)) +
af_ops->get_link_af_size(dev);
}
}
return size;
}
static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
{
const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
struct nlattr *linkinfo, *data;
int err = -EMSGSIZE;
linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
if (linkinfo == NULL)
goto out;
if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
goto err_cancel_link;
if (ops->fill_xstats) {
err = ops->fill_xstats(skb, dev);
if (err < 0)
goto err_cancel_link;
}
if (ops->fill_info) {
data = nla_nest_start(skb, IFLA_INFO_DATA);
if (data == NULL)
goto err_cancel_link;
err = ops->fill_info(skb, dev);
if (err < 0)
goto err_cancel_data;
nla_nest_end(skb, data);
}
nla_nest_end(skb, linkinfo);
return 0;
err_cancel_data:
nla_nest_cancel(skb, data);
err_cancel_link:
nla_nest_cancel(skb, linkinfo);
out:
return err;
}
static const int rtm_min[RTM_NR_FAMILIES] =
{
[RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
[RTM_FAM(RTM_NEWADDR)] = NLMSG_LENGTH(sizeof(struct ifaddrmsg)),
[RTM_FAM(RTM_NEWROUTE)] = NLMSG_LENGTH(sizeof(struct rtmsg)),
[RTM_FAM(RTM_NEWRULE)] = NLMSG_LENGTH(sizeof(struct fib_rule_hdr)),
[RTM_FAM(RTM_NEWQDISC)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
[RTM_FAM(RTM_NEWTCLASS)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
[RTM_FAM(RTM_NEWTFILTER)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
[RTM_FAM(RTM_NEWACTION)] = NLMSG_LENGTH(sizeof(struct tcamsg)),
[RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
[RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
};
static const int rta_max[RTM_NR_FAMILIES] =
{
[RTM_FAM(RTM_NEWLINK)] = IFLA_MAX,
[RTM_FAM(RTM_NEWADDR)] = IFA_MAX,
[RTM_FAM(RTM_NEWROUTE)] = RTA_MAX,
[RTM_FAM(RTM_NEWRULE)] = FRA_MAX,
[RTM_FAM(RTM_NEWQDISC)] = TCA_MAX,
[RTM_FAM(RTM_NEWTCLASS)] = TCA_MAX,
[RTM_FAM(RTM_NEWTFILTER)] = TCA_MAX,
[RTM_FAM(RTM_NEWACTION)] = TCAA_MAX,
};
int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int group, int echo)
{
struct sock *rtnl = net->rtnl;
int err = 0;
NETLINK_CB(skb).dst_group = group;
if (echo)
atomic_inc(&skb->users);
netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
if (echo)
err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
return err;
}
int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
{
struct sock *rtnl = net->rtnl;
return nlmsg_unicast(rtnl, skb, pid);
}
EXPORT_SYMBOL(rtnl_unicast);
void rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
struct nlmsghdr *nlh, gfp_t flags)
{
struct sock *rtnl = net->rtnl;
int report = 0;
if (nlh)
report = nlmsg_report(nlh);
nlmsg_notify(rtnl, skb, pid, group, report, flags);
}
EXPORT_SYMBOL(rtnl_notify);
void rtnl_set_sk_err(struct net *net, u32 group, int error)
{
struct sock *rtnl = net->rtnl;
netlink_set_err(rtnl, 0, group, error);
}
EXPORT_SYMBOL(rtnl_set_sk_err);
int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
{
struct nlattr *mx;
int i, valid = 0;
mx = nla_nest_start(skb, RTA_METRICS);
if (mx == NULL)
return -ENOBUFS;
for (i = 0; i < RTAX_MAX; i++) {
if (metrics[i]) {
valid++;
if (nla_put_u32(skb, i+1, metrics[i]))
goto nla_put_failure;
}
}
if (!valid) {
nla_nest_cancel(skb, mx);
return 0;
}
return nla_nest_end(skb, mx);
nla_put_failure:
nla_nest_cancel(skb, mx);
return -EMSGSIZE;
}
EXPORT_SYMBOL(rtnetlink_put_metrics);
int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
long expires, u32 error)
{
struct rta_cacheinfo ci = {
.rta_lastuse = jiffies_delta_to_clock_t(jiffies - dst->lastuse),
.rta_used = dst->__use,
.rta_clntref = atomic_read(&(dst->__refcnt)),
.rta_error = error,
.rta_id = id,
};
if (expires) {
unsigned long clock;
clock = jiffies_to_clock_t(abs(expires));
clock = min_t(unsigned long, clock, INT_MAX);
ci.rta_expires = (expires > 0) ? clock : -clock;
}
return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
}
EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
static void set_operstate(struct net_device *dev, unsigned char transition)
{
unsigned char operstate = dev->operstate;
switch (transition) {
case IF_OPER_UP:
if ((operstate == IF_OPER_DORMANT ||
operstate == IF_OPER_UNKNOWN) &&
!netif_dormant(dev))
operstate = IF_OPER_UP;
break;
case IF_OPER_DORMANT:
if (operstate == IF_OPER_UP ||
operstate == IF_OPER_UNKNOWN)
operstate = IF_OPER_DORMANT;
break;
}
if (dev->operstate != operstate) {
write_lock_bh(&dev_base_lock);
dev->operstate = operstate;
write_unlock_bh(&dev_base_lock);
netdev_state_change(dev);
}
}
static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
{
return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
(dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
}
static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
const struct ifinfomsg *ifm)
{
unsigned int flags = ifm->ifi_flags;
/* bugwards compatibility: ifi_change == 0 is treated as ~0 */
if (ifm->ifi_change)
flags = (flags & ifm->ifi_change) |
(rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
return flags;
}
static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
const struct rtnl_link_stats64 *b)
{
a->rx_packets = b->rx_packets;
a->tx_packets = b->tx_packets;
a->rx_bytes = b->rx_bytes;
a->tx_bytes = b->tx_bytes;
a->rx_errors = b->rx_errors;
a->tx_errors = b->tx_errors;
a->rx_dropped = b->rx_dropped;
a->tx_dropped = b->tx_dropped;
a->multicast = b->multicast;
a->collisions = b->collisions;
a->rx_length_errors = b->rx_length_errors;
a->rx_over_errors = b->rx_over_errors;
a->rx_crc_errors = b->rx_crc_errors;
a->rx_frame_errors = b->rx_frame_errors;
a->rx_fifo_errors = b->rx_fifo_errors;
a->rx_missed_errors = b->rx_missed_errors;
a->tx_aborted_errors = b->tx_aborted_errors;
a->tx_carrier_errors = b->tx_carrier_errors;
a->tx_fifo_errors = b->tx_fifo_errors;
a->tx_heartbeat_errors = b->tx_heartbeat_errors;
a->tx_window_errors = b->tx_window_errors;
a->rx_compressed = b->rx_compressed;
a->tx_compressed = b->tx_compressed;
}
static void copy_rtnl_link_stats64(void *v, const struct rtnl_link_stats64 *b)
{
memcpy(v, b, sizeof(*b));
}
/* All VF info */
static inline int rtnl_vfinfo_size(const struct net_device *dev,
u32 ext_filter_mask)
{
if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
(ext_filter_mask & RTEXT_FILTER_VF)) {
int num_vfs = dev_num_vf(dev->dev.parent);
size_t size = nla_total_size(sizeof(struct nlattr));
size += nla_total_size(num_vfs * sizeof(struct nlattr));
size += num_vfs *
(nla_total_size(sizeof(struct ifla_vf_mac)) +
nla_total_size(sizeof(struct ifla_vf_vlan)) +
nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
nla_total_size(sizeof(struct ifla_vf_spoofchk)));
return size;
} else
return 0;
}
static size_t rtnl_port_size(const struct net_device *dev)
{
size_t port_size = nla_total_size(4) /* PORT_VF */
+ nla_total_size(PORT_PROFILE_MAX) /* PORT_PROFILE */
+ nla_total_size(sizeof(struct ifla_port_vsi))
/* PORT_VSI_TYPE */
+ nla_total_size(PORT_UUID_MAX) /* PORT_INSTANCE_UUID */
+ nla_total_size(PORT_UUID_MAX) /* PORT_HOST_UUID */
+ nla_total_size(1) /* PROT_VDP_REQUEST */
+ nla_total_size(2); /* PORT_VDP_RESPONSE */
size_t vf_ports_size = nla_total_size(sizeof(struct nlattr));
size_t vf_port_size = nla_total_size(sizeof(struct nlattr))
+ port_size;
size_t port_self_size = nla_total_size(sizeof(struct nlattr))
+ port_size;
if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
return 0;
if (dev_num_vf(dev->dev.parent))
return port_self_size + vf_ports_size +
vf_port_size * dev_num_vf(dev->dev.parent);
else
return port_self_size;
}
static noinline size_t if_nlmsg_size(const struct net_device *dev,
u32 ext_filter_mask)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+ nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
+ nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
+ nla_total_size(sizeof(struct rtnl_link_ifmap))
+ nla_total_size(sizeof(struct rtnl_link_stats))
+ nla_total_size(sizeof(struct rtnl_link_stats64))
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
+ nla_total_size(4) /* IFLA_TXQLEN */
+ nla_total_size(4) /* IFLA_WEIGHT */
+ nla_total_size(4) /* IFLA_MTU */
+ nla_total_size(4) /* IFLA_LINK */
+ nla_total_size(4) /* IFLA_MASTER */
+ nla_total_size(1) /* IFLA_CARRIER */
+ nla_total_size(4) /* IFLA_PROMISCUITY */
+ nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
+ nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
+ nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(ext_filter_mask
& RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
+ rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+ rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
}
static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
{
struct nlattr *vf_ports;
struct nlattr *vf_port;
int vf;
int err;
vf_ports = nla_nest_start(skb, IFLA_VF_PORTS);
if (!vf_ports)
return -EMSGSIZE;
for (vf = 0; vf < dev_num_vf(dev->dev.parent); vf++) {
vf_port = nla_nest_start(skb, IFLA_VF_PORT);
if (!vf_port)
goto nla_put_failure;
if (nla_put_u32(skb, IFLA_PORT_VF, vf))
goto nla_put_failure;
err = dev->netdev_ops->ndo_get_vf_port(dev, vf, skb);
if (err == -EMSGSIZE)
goto nla_put_failure;
if (err) {
nla_nest_cancel(skb, vf_port);
continue;
}
nla_nest_end(skb, vf_port);
}
nla_nest_end(skb, vf_ports);
return 0;
nla_put_failure:
nla_nest_cancel(skb, vf_ports);
return -EMSGSIZE;
}
static int rtnl_port_self_fill(struct sk_buff *skb, struct net_device *dev)
{
struct nlattr *port_self;
int err;
port_self = nla_nest_start(skb, IFLA_PORT_SELF);
if (!port_self)
return -EMSGSIZE;
err = dev->netdev_ops->ndo_get_vf_port(dev, PORT_SELF_VF, skb);
if (err) {
nla_nest_cancel(skb, port_self);
return (err == -EMSGSIZE) ? err : 0;
}
nla_nest_end(skb, port_self);
return 0;
}
static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
{
int err;
if (!dev->netdev_ops->ndo_get_vf_port || !dev->dev.parent)
return 0;
err = rtnl_port_self_fill(skb, dev);
if (err)
return err;
if (dev_num_vf(dev->dev.parent)) {
err = rtnl_vf_ports_fill(skb, dev);
if (err)
return err;
}
return 0;
}
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
int type, u32 pid, u32 seq, u32 change,
unsigned int flags, u32 ext_filter_mask)
{
struct ifinfomsg *ifm;
struct nlmsghdr *nlh;
struct rtnl_link_stats64 temp;
const struct rtnl_link_stats64 *stats;
struct nlattr *attr, *af_spec;
struct rtnl_af_ops *af_ops;
struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
ASSERT_RTNL();
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
if (nlh == NULL)
return -EMSGSIZE;
ifm = nlmsg_data(nlh);
ifm->ifi_family = AF_UNSPEC;
ifm->__ifi_pad = 0;
ifm->ifi_type = dev->type;
ifm->ifi_index = dev->ifindex;
ifm->ifi_flags = dev_get_flags(dev);
ifm->ifi_change = change;
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
nla_put_u32(skb, IFLA_TXQLEN, dev->tx_queue_len) ||
nla_put_u8(skb, IFLA_OPERSTATE,
netif_running(dev) ? dev->operstate : IF_OPER_DOWN) ||
nla_put_u8(skb, IFLA_LINKMODE, dev->link_mode) ||
nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
nla_put_u32(skb, IFLA_GROUP, dev->group) ||
nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
#ifdef CONFIG_RPS
nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
#endif
(dev->ifindex != dev->iflink &&
nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
(upper_dev &&
nla_put_u32(skb, IFLA_MASTER, upper_dev->ifindex)) ||
nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) ||
(dev->qdisc &&
nla_put_string(skb, IFLA_QDISC, dev->qdisc->ops->id)) ||
(dev->ifalias &&
nla_put_string(skb, IFLA_IFALIAS, dev->ifalias)))
goto nla_put_failure;
if (1) {
struct rtnl_link_ifmap map = {
.mem_start = dev->mem_start,
.mem_end = dev->mem_end,
.base_addr = dev->base_addr,
.irq = dev->irq,
.dma = dev->dma,
.port = dev->if_port,
};
if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
goto nla_put_failure;
}
if (dev->addr_len) {
if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
nla_put(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast))
goto nla_put_failure;
}
attr = nla_reserve(skb, IFLA_STATS,
sizeof(struct rtnl_link_stats));
if (attr == NULL)
goto nla_put_failure;
stats = dev_get_stats(dev, &temp);
copy_rtnl_link_stats(nla_data(attr), stats);
attr = nla_reserve(skb, IFLA_STATS64,
sizeof(struct rtnl_link_stats64));
if (attr == NULL)
goto nla_put_failure;
copy_rtnl_link_stats64(nla_data(attr), stats);
if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
goto nla_put_failure;
if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
&& (ext_filter_mask & RTEXT_FILTER_VF)) {
int i;
struct nlattr *vfinfo, *vf;
int num_vfs = dev_num_vf(dev->dev.parent);
vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
if (!vfinfo)
goto nla_put_failure;
for (i = 0; i < num_vfs; i++) {
struct ifla_vf_info ivi;
struct ifla_vf_mac vf_mac;
struct ifla_vf_vlan vf_vlan;
struct ifla_vf_tx_rate vf_tx_rate;
struct ifla_vf_spoofchk vf_spoofchk;
/*
* Not all SR-IOV capable drivers support the
* spoofcheck query. Preset to -1 so the user
* space tool can detect that the driver didn't
* report anything.
*/
ivi.spoofchk = -1;
if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
break;
vf_mac.vf =
vf_vlan.vf =
vf_tx_rate.vf =
vf_spoofchk.vf = ivi.vf;
memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
vf_vlan.vlan = ivi.vlan;
vf_vlan.qos = ivi.qos;
vf_tx_rate.rate = ivi.tx_rate;
vf_spoofchk.setting = ivi.spoofchk;
vf = nla_nest_start(skb, IFLA_VF_INFO);
if (!vf) {
nla_nest_cancel(skb, vfinfo);
goto nla_put_failure;
}
if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
&vf_tx_rate) ||
nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
&vf_spoofchk))
goto nla_put_failure;
nla_nest_end(skb, vf);
}
nla_nest_end(skb, vfinfo);
}
if (rtnl_port_fill(skb, dev))
goto nla_put_failure;
if (dev->rtnl_link_ops) {
if (rtnl_link_fill(skb, dev) < 0)
goto nla_put_failure;
}
if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
goto nla_put_failure;
list_for_each_entry(af_ops, &rtnl_af_ops, list) {
if (af_ops->fill_link_af) {
struct nlattr *af;
int err;
if (!(af = nla_nest_start(skb, af_ops->family)))
goto nla_put_failure;
err = af_ops->fill_link_af(skb, dev);
/*
* Caller may return ENODATA to indicate that there
* was no data to be dumped. This is not an error, it
* means we should trim the attribute header and
* continue.
*/
if (err == -ENODATA)
nla_nest_cancel(skb, af);
else if (err < 0)
goto nla_put_failure;
nla_nest_end(skb, af);
}
}
nla_nest_end(skb, af_spec);
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
int h, s_h;
int idx = 0, s_idx;
struct net_device *dev;
struct hlist_head *head;
struct nlattr *tb[IFLA_MAX+1];
u32 ext_filter_mask = 0;
s_h = cb->args[0];
s_idx = cb->args[1];
rcu_read_lock();
cb->seq = net->dev_base_seq;
if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
ifla_policy) >= 0) {
if (tb[IFLA_EXT_MASK])
ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
}
for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
idx = 0;
head = &net->dev_index_head[h];
hlist_for_each_entry_rcu(dev, head, index_hlist) {
if (idx < s_idx)
goto cont;
if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, 0,
NLM_F_MULTI,
ext_filter_mask) <= 0)
goto out;
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
idx++;
}
}
out:
rcu_read_unlock();
cb->args[1] = idx;
cb->args[0] = h;
return skb->len;
}
const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
[IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
[IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
[IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
[IFLA_MTU] = { .type = NLA_U32 },
[IFLA_LINK] = { .type = NLA_U32 },
[IFLA_MASTER] = { .type = NLA_U32 },
[IFLA_CARRIER] = { .type = NLA_U8 },
[IFLA_TXQLEN] = { .type = NLA_U32 },
[IFLA_WEIGHT] = { .type = NLA_U32 },
[IFLA_OPERSTATE] = { .type = NLA_U8 },
[IFLA_LINKMODE] = { .type = NLA_U8 },
[IFLA_LINKINFO] = { .type = NLA_NESTED },
[IFLA_NET_NS_PID] = { .type = NLA_U32 },
[IFLA_NET_NS_FD] = { .type = NLA_U32 },
[IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
[IFLA_VFINFO_LIST] = {. type = NLA_NESTED },
[IFLA_VF_PORTS] = { .type = NLA_NESTED },
[IFLA_PORT_SELF] = { .type = NLA_NESTED },
[IFLA_AF_SPEC] = { .type = NLA_NESTED },
[IFLA_EXT_MASK] = { .type = NLA_U32 },
[IFLA_PROMISCUITY] = { .type = NLA_U32 },
[IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
[IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
};
EXPORT_SYMBOL(ifla_policy);
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
[IFLA_INFO_KIND] = { .type = NLA_STRING },
[IFLA_INFO_DATA] = { .type = NLA_NESTED },
};
static const struct nla_policy ifla_vfinfo_policy[IFLA_VF_INFO_MAX+1] = {
[IFLA_VF_INFO] = { .type = NLA_NESTED },
};
static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = {
[IFLA_VF_MAC] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_mac) },
[IFLA_VF_VLAN] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_vlan) },
[IFLA_VF_TX_RATE] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_tx_rate) },
[IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_vf_spoofchk) },
};
static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
[IFLA_PORT_VF] = { .type = NLA_U32 },
[IFLA_PORT_PROFILE] = { .type = NLA_STRING,
.len = PORT_PROFILE_MAX },
[IFLA_PORT_VSI_TYPE] = { .type = NLA_BINARY,
.len = sizeof(struct ifla_port_vsi)},
[IFLA_PORT_INSTANCE_UUID] = { .type = NLA_BINARY,
.len = PORT_UUID_MAX },
[IFLA_PORT_HOST_UUID] = { .type = NLA_STRING,
.len = PORT_UUID_MAX },
[IFLA_PORT_REQUEST] = { .type = NLA_U8, },
[IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
};
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
{
struct net *net;
/* Examine the link attributes and figure out which
* network namespace we are talking about.
*/
if (tb[IFLA_NET_NS_PID])
net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
else if (tb[IFLA_NET_NS_FD])
net = get_net_ns_by_fd(nla_get_u32(tb[IFLA_NET_NS_FD]));
else
net = get_net(src_net);
return net;
}
EXPORT_SYMBOL(rtnl_link_get_net);
static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
{
if (dev) {
if (tb[IFLA_ADDRESS] &&
nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
return -EINVAL;
if (tb[IFLA_BROADCAST] &&
nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
return -EINVAL;
}
if (tb[IFLA_AF_SPEC]) {
struct nlattr *af;
int rem, err;
nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
const struct rtnl_af_ops *af_ops;
if (!(af_ops = rtnl_af_lookup(nla_type(af))))
return -EAFNOSUPPORT;
if (!af_ops->set_link_af)
return -EOPNOTSUPP;
if (af_ops->validate_link_af) {
err = af_ops->validate_link_af(dev, af);
if (err < 0)
return err;
}
}
}
return 0;
}
static int do_setvfinfo(struct net_device *dev, struct nlattr *attr)
{
int rem, err = -EINVAL;
struct nlattr *vf;
const struct net_device_ops *ops = dev->netdev_ops;
nla_for_each_nested(vf, attr, rem) {
switch (nla_type(vf)) {
case IFLA_VF_MAC: {
struct ifla_vf_mac *ivm;
ivm = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_mac)
err = ops->ndo_set_vf_mac(dev, ivm->vf,
ivm->mac);
break;
}
case IFLA_VF_VLAN: {
struct ifla_vf_vlan *ivv;
ivv = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_vlan)
err = ops->ndo_set_vf_vlan(dev, ivv->vf,
ivv->vlan,
ivv->qos);
break;
}
case IFLA_VF_TX_RATE: {
struct ifla_vf_tx_rate *ivt;
ivt = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_tx_rate)
err = ops->ndo_set_vf_tx_rate(dev, ivt->vf,
ivt->rate);
break;
}
case IFLA_VF_SPOOFCHK: {
struct ifla_vf_spoofchk *ivs;
ivs = nla_data(vf);
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_spoofchk)
err = ops->ndo_set_vf_spoofchk(dev, ivs->vf,
ivs->setting);
break;
}
default:
err = -EINVAL;
break;
}
if (err)
break;
}
return err;
}
static int do_set_master(struct net_device *dev, int ifindex)
{
struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
const struct net_device_ops *ops;
int err;
if (upper_dev) {
if (upper_dev->ifindex == ifindex)
return 0;
ops = upper_dev->netdev_ops;
if (ops->ndo_del_slave) {
err = ops->ndo_del_slave(upper_dev, dev);
if (err)
return err;
} else {
return -EOPNOTSUPP;
}
}
if (ifindex) {
upper_dev = __dev_get_by_index(dev_net(dev), ifindex);
if (!upper_dev)
return -EINVAL;
ops = upper_dev->netdev_ops;
if (ops->ndo_add_slave) {
err = ops->ndo_add_slave(upper_dev, dev);
if (err)
return err;
} else {
return -EOPNOTSUPP;
}
}
return 0;
}
static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
struct nlattr **tb, char *ifname, int modified)
{
const struct net_device_ops *ops = dev->netdev_ops;
int err;
if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
struct net *net = rtnl_link_get_net(dev_net(dev), tb);
if (IS_ERR(net)) {
err = PTR_ERR(net);
goto errout;
}
if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) {
err = -EPERM;
goto errout;
}
err = dev_change_net_namespace(dev, net, ifname);
put_net(net);
if (err)
goto errout;
modified = 1;
}
if (tb[IFLA_MAP]) {
struct rtnl_link_ifmap *u_map;
struct ifmap k_map;
if (!ops->ndo_set_config) {
err = -EOPNOTSUPP;
goto errout;
}
if (!netif_device_present(dev)) {
err = -ENODEV;
goto errout;
}
u_map = nla_data(tb[IFLA_MAP]);
k_map.mem_start = (unsigned long) u_map->mem_start;
k_map.mem_end = (unsigned long) u_map->mem_end;
k_map.base_addr = (unsigned short) u_map->base_addr;
k_map.irq = (unsigned char) u_map->irq;
k_map.dma = (unsigned char) u_map->dma;
k_map.port = (unsigned char) u_map->port;
err = ops->ndo_set_config(dev, &k_map);
if (err < 0)
goto errout;
modified = 1;
}
if (tb[IFLA_ADDRESS]) {
struct sockaddr *sa;
int len;
len = sizeof(sa_family_t) + dev->addr_len;
sa = kmalloc(len, GFP_KERNEL);
if (!sa) {
err = -ENOMEM;
goto errout;
}
sa->sa_family = dev->type;
memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
dev->addr_len);
err = dev_set_mac_address(dev, sa);
kfree(sa);
if (err)
goto errout;
modified = 1;
}
if (tb[IFLA_MTU]) {
err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
if (err < 0)
goto errout;
modified = 1;
}
if (tb[IFLA_GROUP]) {
dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
modified = 1;
}
/*
* Interface selected by interface index but interface
* name provided implies that a name change has been
* requested.
*/
if (ifm->ifi_index > 0 && ifname[0]) {
err = dev_change_name(dev, ifname);
if (err < 0)
goto errout;
modified = 1;
}
if (tb[IFLA_IFALIAS]) {
err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
nla_len(tb[IFLA_IFALIAS]));
if (err < 0)
goto errout;
modified = 1;
}
if (tb[IFLA_BROADCAST]) {
nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
}
if (ifm->ifi_flags || ifm->ifi_change) {
err = dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
if (err < 0)
goto errout;
}
if (tb[IFLA_MASTER]) {
err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
if (err)
goto errout;
modified = 1;
}
if (tb[IFLA_CARRIER]) {
err = dev_change_carrier(dev, nla_get_u8(tb[IFLA_CARRIER]));
if (err)
goto errout;
modified = 1;
}
if (tb[IFLA_TXQLEN])
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
if (tb[IFLA_OPERSTATE])
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
if (tb[IFLA_LINKMODE]) {
write_lock_bh(&dev_base_lock);
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
write_unlock_bh(&dev_base_lock);
}
if (tb[IFLA_VFINFO_LIST]) {
struct nlattr *attr;
int rem;
nla_for_each_nested(attr, tb[IFLA_VFINFO_LIST], rem) {
if (nla_type(attr) != IFLA_VF_INFO) {
err = -EINVAL;
goto errout;
}
err = do_setvfinfo(dev, attr);
if (err < 0)
goto errout;
modified = 1;
}
}
err = 0;
if (tb[IFLA_VF_PORTS]) {
struct nlattr *port[IFLA_PORT_MAX+1];
struct nlattr *attr;
int vf;
int rem;
err = -EOPNOTSUPP;
if (!ops->ndo_set_vf_port)
goto errout;
nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
if (nla_type(attr) != IFLA_VF_PORT)
continue;
err = nla_parse_nested(port, IFLA_PORT_MAX,
attr, ifla_port_policy);
if (err < 0)
goto errout;
if (!port[IFLA_PORT_VF]) {
err = -EOPNOTSUPP;
goto errout;
}
vf = nla_get_u32(port[IFLA_PORT_VF]);
err = ops->ndo_set_vf_port(dev, vf, port);
if (err < 0)
goto errout;
modified = 1;
}
}
err = 0;
if (tb[IFLA_PORT_SELF]) {
struct nlattr *port[IFLA_PORT_MAX+1];
err = nla_parse_nested(port, IFLA_PORT_MAX,
tb[IFLA_PORT_SELF], ifla_port_policy);
if (err < 0)
goto errout;
err = -EOPNOTSUPP;
if (ops->ndo_set_vf_port)
err = ops->ndo_set_vf_port(dev, PORT_SELF_VF, port);
if (err < 0)
goto errout;
modified = 1;
}
if (tb[IFLA_AF_SPEC]) {
struct nlattr *af;
int rem;
nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
const struct rtnl_af_ops *af_ops;
if (!(af_ops = rtnl_af_lookup(nla_type(af))))
BUG();
err = af_ops->set_link_af(dev, af);
if (err < 0)
goto errout;
modified = 1;
}
}
err = 0;
errout:
if (err < 0 && modified)
net_warn_ratelimited("A link change request failed with some changes committed already. Interface %s may have been left with an inconsistent configuration, please check.\n",
dev->name);
return err;
}
static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct ifinfomsg *ifm;
struct net_device *dev;
int err;
struct nlattr *tb[IFLA_MAX+1];
char ifname[IFNAMSIZ];
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
if (err < 0)
goto errout;
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
else
ifname[0] = '\0';
err = -EINVAL;
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
else if (tb[IFLA_IFNAME])
dev = __dev_get_by_name(net, ifname);
else
goto errout;
if (dev == NULL) {
err = -ENODEV;
goto errout;
}
err = validate_linkmsg(dev, tb);
if (err < 0)
goto errout;
err = do_setlink(dev, ifm, tb, ifname, 0);
errout:
return err;
}
static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
const struct rtnl_link_ops *ops;
struct net_device *dev;
struct ifinfomsg *ifm;
char ifname[IFNAMSIZ];
struct nlattr *tb[IFLA_MAX+1];
int err;
LIST_HEAD(list_kill);
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
if (err < 0)
return err;
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
else if (tb[IFLA_IFNAME])
dev = __dev_get_by_name(net, ifname);
else
return -EINVAL;
if (!dev)
return -ENODEV;
ops = dev->rtnl_link_ops;
if (!ops)
return -EOPNOTSUPP;
ops->dellink(dev, &list_kill);
unregister_netdevice_many(&list_kill);
list_del(&list_kill);
return 0;
}
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
{
unsigned int old_flags;
int err;
old_flags = dev->flags;
if (ifm && (ifm->ifi_flags || ifm->ifi_change)) {
err = __dev_change_flags(dev, rtnl_dev_combine_flags(dev, ifm));
if (err < 0)
return err;
}
dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
__dev_notify_flags(dev, old_flags);
return 0;
}
EXPORT_SYMBOL(rtnl_configure_link);
struct net_device *rtnl_create_link(struct net *net,
char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[])
{
int err;
struct net_device *dev;
unsigned int num_tx_queues = 1;
unsigned int num_rx_queues = 1;
if (tb[IFLA_NUM_TX_QUEUES])
num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
else if (ops->get_num_tx_queues)
num_tx_queues = ops->get_num_tx_queues();
if (tb[IFLA_NUM_RX_QUEUES])
num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
else if (ops->get_num_rx_queues)
num_rx_queues = ops->get_num_rx_queues();
err = -ENOMEM;
dev = alloc_netdev_mqs(ops->priv_size, ifname, ops->setup,
num_tx_queues, num_rx_queues);
if (!dev)
goto err;
dev_net_set(dev, net);
dev->rtnl_link_ops = ops;
dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
if (tb[IFLA_MTU])
dev->mtu = nla_get_u32(tb[IFLA_MTU]);
if (tb[IFLA_ADDRESS]) {
memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
nla_len(tb[IFLA_ADDRESS]));
dev->addr_assign_type = NET_ADDR_SET;
}
if (tb[IFLA_BROADCAST])
memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
nla_len(tb[IFLA_BROADCAST]));
if (tb[IFLA_TXQLEN])
dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
if (tb[IFLA_OPERSTATE])
set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
if (tb[IFLA_LINKMODE])
dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
if (tb[IFLA_GROUP])
dev_set_group(dev, nla_get_u32(tb[IFLA_GROUP]));
return dev;
err:
return ERR_PTR(err);
}
EXPORT_SYMBOL(rtnl_create_link);
static int rtnl_group_changelink(struct net *net, int group,
struct ifinfomsg *ifm,
struct nlattr **tb)
{
struct net_device *dev;
int err;
for_each_netdev(net, dev) {
if (dev->group == group) {
err = do_setlink(dev, ifm, tb, NULL, 0);
if (err < 0)
return err;
}
}
return 0;
}
static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
const struct rtnl_link_ops *ops;
struct net_device *dev;
struct ifinfomsg *ifm;
char kind[MODULE_NAME_LEN];
char ifname[IFNAMSIZ];
struct nlattr *tb[IFLA_MAX+1];
struct nlattr *linkinfo[IFLA_INFO_MAX+1];
int err;
#ifdef CONFIG_MODULES
replay:
#endif
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
if (err < 0)
return err;
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
else
ifname[0] = '\0';
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
else {
if (ifname[0])
dev = __dev_get_by_name(net, ifname);
else
dev = NULL;
}
err = validate_linkmsg(dev, tb);
if (err < 0)
return err;
if (tb[IFLA_LINKINFO]) {
err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
tb[IFLA_LINKINFO], ifla_info_policy);
if (err < 0)
return err;
} else
memset(linkinfo, 0, sizeof(linkinfo));
if (linkinfo[IFLA_INFO_KIND]) {
nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
ops = rtnl_link_ops_get(kind);
} else {
kind[0] = '\0';
ops = NULL;
}
if (1) {
struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL;
struct net *dest_net;
if (ops) {
if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
err = nla_parse_nested(attr, ops->maxtype,
linkinfo[IFLA_INFO_DATA],
ops->policy);
if (err < 0)
return err;
data = attr;
}
if (ops->validate) {
err = ops->validate(tb, data);
if (err < 0)
return err;
}
}
if (dev) {
int modified = 0;
if (nlh->nlmsg_flags & NLM_F_EXCL)
return -EEXIST;
if (nlh->nlmsg_flags & NLM_F_REPLACE)
return -EOPNOTSUPP;
if (linkinfo[IFLA_INFO_DATA]) {
if (!ops || ops != dev->rtnl_link_ops ||
!ops->changelink)
return -EOPNOTSUPP;
err = ops->changelink(dev, tb, data);
if (err < 0)
return err;
modified = 1;
}
return do_setlink(dev, ifm, tb, ifname, modified);
}
if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
return rtnl_group_changelink(net,
nla_get_u32(tb[IFLA_GROUP]),
ifm, tb);
return -ENODEV;
}
if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
return -EOPNOTSUPP;
if (!ops) {
#ifdef CONFIG_MODULES
if (kind[0]) {
__rtnl_unlock();
request_module("rtnl-link-%s", kind);
rtnl_lock();
ops = rtnl_link_ops_get(kind);
if (ops)
goto replay;
}
#endif
return -EOPNOTSUPP;
}
if (!ifname[0])
snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
dest_net = rtnl_link_get_net(net, tb);
if (IS_ERR(dest_net))
return PTR_ERR(dest_net);
dev = rtnl_create_link(dest_net, ifname, ops, tb);
if (IS_ERR(dev)) {
err = PTR_ERR(dev);
goto out;
}
dev->ifindex = ifm->ifi_index;
if (ops->newlink)
err = ops->newlink(net, dev, tb, data);
else
err = register_netdevice(dev);
if (err < 0 && !IS_ERR(dev))
free_netdev(dev);
if (err < 0)
goto out;
err = rtnl_configure_link(dev, ifm);
if (err < 0)
unregister_netdevice(dev);
out:
put_net(dest_net);
return err;
}
}
static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct ifinfomsg *ifm;
char ifname[IFNAMSIZ];
struct nlattr *tb[IFLA_MAX+1];
struct net_device *dev = NULL;
struct sk_buff *nskb;
int err;
u32 ext_filter_mask = 0;
err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
if (err < 0)
return err;
if (tb[IFLA_IFNAME])
nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
if (tb[IFLA_EXT_MASK])
ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
ifm = nlmsg_data(nlh);
if (ifm->ifi_index > 0)
dev = __dev_get_by_index(net, ifm->ifi_index);
else if (tb[IFLA_IFNAME])
dev = __dev_get_by_name(net, ifname);
else
return -EINVAL;
if (dev == NULL)
return -ENODEV;
nskb = nlmsg_new(if_nlmsg_size(dev, ext_filter_mask), GFP_KERNEL);
if (nskb == NULL)
return -ENOBUFS;
err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).portid,
nlh->nlmsg_seq, 0, 0, ext_filter_mask);
if (err < 0) {
/* -EMSGSIZE implies BUG in if_nlmsg_size */
WARN_ON(err == -EMSGSIZE);
kfree_skb(nskb);
} else
err = rtnl_unicast(nskb, net, NETLINK_CB(skb).portid);
return err;
}
static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
struct net_device *dev;
struct nlattr *tb[IFLA_MAX+1];
u32 ext_filter_mask = 0;
u16 min_ifinfo_dump_size = 0;
if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
ifla_policy) >= 0) {
if (tb[IFLA_EXT_MASK])
ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
}
if (!ext_filter_mask)
return NLMSG_GOODSIZE;
/*
* traverse the list of net devices and compute the minimum
* buffer size based upon the filter mask.
*/
list_for_each_entry(dev, &net->dev_base_head, dev_list) {
min_ifinfo_dump_size = max_t(u16, min_ifinfo_dump_size,
if_nlmsg_size(dev,
ext_filter_mask));
}
return min_ifinfo_dump_size;
}
static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
{
int idx;
int s_idx = cb->family;
if (s_idx == 0)
s_idx = 1;
for (idx = 1; idx <= RTNL_FAMILY_MAX; idx++) {
int type = cb->nlh->nlmsg_type-RTM_BASE;
if (idx < s_idx || idx == PF_PACKET)
continue;
if (rtnl_msg_handlers[idx] == NULL ||
rtnl_msg_handlers[idx][type].dumpit == NULL)
continue;
if (idx > s_idx)
memset(&cb->args[0], 0, sizeof(cb->args));
if (rtnl_msg_handlers[idx][type].dumpit(skb, cb))
break;
}
cb->family = idx;
return skb->len;
}
void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
int err = -ENOBUFS;
size_t if_info_size;
skb = nlmsg_new((if_info_size = if_nlmsg_size(dev, 0)), GFP_KERNEL);
if (skb == NULL)
goto errout;
err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in if_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
return;
errout:
if (err < 0)
rtnl_set_sk_err(net, RTNLGRP_LINK, err);
}
EXPORT_SYMBOL(rtmsg_ifinfo);
static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
struct net_device *dev,
u8 *addr, u32 pid, u32 seq,
int type, unsigned int flags)
{
struct nlmsghdr *nlh;
struct ndmsg *ndm;
nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
if (!nlh)
return -EMSGSIZE;
ndm = nlmsg_data(nlh);
ndm->ndm_family = AF_BRIDGE;
ndm->ndm_pad1 = 0;
ndm->ndm_pad2 = 0;
ndm->ndm_flags = flags;
ndm->ndm_type = 0;
ndm->ndm_ifindex = dev->ifindex;
ndm->ndm_state = NUD_PERMANENT;
if (nla_put(skb, NDA_LLADDR, ETH_ALEN, addr))
goto nla_put_failure;
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
static inline size_t rtnl_fdb_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
}
static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
{
struct net *net = dev_net(dev);
struct sk_buff *skb;
int err = -ENOBUFS;
skb = nlmsg_new(rtnl_fdb_nlmsg_size(), GFP_ATOMIC);
if (!skb)
goto errout;
err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
if (err < 0) {
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
return;
errout:
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
static int rtnl_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct ndmsg *ndm;
struct nlattr *tb[NDA_MAX+1];
struct net_device *dev;
u8 *addr;
int err;
err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
if (err < 0)
return err;
ndm = nlmsg_data(nlh);
if (ndm->ndm_ifindex == 0) {
pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ifindex\n");
return -EINVAL;
}
dev = __dev_get_by_index(net, ndm->ndm_ifindex);
if (dev == NULL) {
pr_info("PF_BRIDGE: RTM_NEWNEIGH with unknown ifindex\n");
return -ENODEV;
}
if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid address\n");
return -EINVAL;
}
addr = nla_data(tb[NDA_LLADDR]);
if (!is_valid_ether_addr(addr)) {
pr_info("PF_BRIDGE: RTM_NEWNEIGH with invalid ether address\n");
return -EINVAL;
}
err = -EOPNOTSUPP;
/* Support fdb on master device the net/bridge default case */
if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
(dev->priv_flags & IFF_BRIDGE_PORT)) {
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
const struct net_device_ops *ops = br_dev->netdev_ops;
err = ops->ndo_fdb_add(ndm, tb, dev, addr, nlh->nlmsg_flags);
if (err)
goto out;
else
ndm->ndm_flags &= ~NTF_MASTER;
}
/* Embedded bridge, macvlan, and any other device support */
if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_add) {
err = dev->netdev_ops->ndo_fdb_add(ndm, tb,
dev, addr,
nlh->nlmsg_flags);
if (!err) {
rtnl_fdb_notify(dev, addr, RTM_NEWNEIGH);
ndm->ndm_flags &= ~NTF_SELF;
}
}
out:
return err;
}
static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct net *net = sock_net(skb->sk);
struct ndmsg *ndm;
struct nlattr *tb[NDA_MAX+1];
struct net_device *dev;
int err = -EINVAL;
__u8 *addr;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
if (err < 0)
return err;
ndm = nlmsg_data(nlh);
if (ndm->ndm_ifindex == 0) {
pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ifindex\n");
return -EINVAL;
}
dev = __dev_get_by_index(net, ndm->ndm_ifindex);
if (dev == NULL) {
pr_info("PF_BRIDGE: RTM_DELNEIGH with unknown ifindex\n");
return -ENODEV;
}
if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid address\n");
return -EINVAL;
}
addr = nla_data(tb[NDA_LLADDR]);
if (!is_valid_ether_addr(addr)) {
pr_info("PF_BRIDGE: RTM_DELNEIGH with invalid ether address\n");
return -EINVAL;
}
err = -EOPNOTSUPP;
/* Support fdb on master device the net/bridge default case */
if ((!ndm->ndm_flags || ndm->ndm_flags & NTF_MASTER) &&
(dev->priv_flags & IFF_BRIDGE_PORT)) {
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
const struct net_device_ops *ops = br_dev->netdev_ops;
if (ops->ndo_fdb_del)
err = ops->ndo_fdb_del(ndm, tb, dev, addr);
if (err)
goto out;
else
ndm->ndm_flags &= ~NTF_MASTER;
}
/* Embedded bridge, macvlan, and any other device support */
if ((ndm->ndm_flags & NTF_SELF) && dev->netdev_ops->ndo_fdb_del) {
err = dev->netdev_ops->ndo_fdb_del(ndm, tb, dev, addr);
if (!err) {
rtnl_fdb_notify(dev, addr, RTM_DELNEIGH);
ndm->ndm_flags &= ~NTF_SELF;
}
}
out:
return err;
}
static int nlmsg_populate_fdb(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
int *idx,
struct netdev_hw_addr_list *list)
{
struct netdev_hw_addr *ha;
int err;
u32 portid, seq;
portid = NETLINK_CB(cb->skb).portid;
seq = cb->nlh->nlmsg_seq;
list_for_each_entry(ha, &list->list, list) {
if (*idx < cb->args[0])
goto skip;
err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
portid, seq,
RTM_NEWNEIGH, NTF_SELF);
if (err < 0)
return err;
skip:
*idx += 1;
}
return 0;
}
/**
* ndo_dflt_fdb_dump - default netdevice operation to dump an FDB table.
* @nlh: netlink message header
* @dev: netdevice
*
* Default netdevice operation to dump the existing unicast address list.
* Returns zero on success.
*/
int ndo_dflt_fdb_dump(struct sk_buff *skb,
struct netlink_callback *cb,
struct net_device *dev,
int idx)
{
int err;
netif_addr_lock_bh(dev);
err = nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->uc);
if (err)
goto out;
nlmsg_populate_fdb(skb, cb, dev, &idx, &dev->mc);
out:
netif_addr_unlock_bh(dev);
return idx;
}
EXPORT_SYMBOL(ndo_dflt_fdb_dump);
static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
{
int idx = 0;
struct net *net = sock_net(skb->sk);
struct net_device *dev;
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
if (dev->priv_flags & IFF_BRIDGE_PORT) {
struct net_device *br_dev;
const struct net_device_ops *ops;
br_dev = netdev_master_upper_dev_get(dev);
ops = br_dev->netdev_ops;
if (ops->ndo_fdb_dump)
idx = ops->ndo_fdb_dump(skb, cb, dev, idx);
}
if (dev->netdev_ops->ndo_fdb_dump)
idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx);
}
rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u16 mode)
{
struct nlmsghdr *nlh;
struct ifinfomsg *ifm;
struct nlattr *br_afspec;
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI);
if (nlh == NULL)
return -EMSGSIZE;
ifm = nlmsg_data(nlh);
ifm->ifi_family = AF_BRIDGE;
ifm->__ifi_pad = 0;
ifm->ifi_type = dev->type;
ifm->ifi_index = dev->ifindex;
ifm->ifi_flags = dev_get_flags(dev);
ifm->ifi_change = 0;
if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
(br_dev &&
nla_put_u32(skb, IFLA_MASTER, br_dev->ifindex)) ||
(dev->addr_len &&
nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
(dev->ifindex != dev->iflink &&
nla_put_u32(skb, IFLA_LINK, dev->iflink)))
goto nla_put_failure;
br_afspec = nla_nest_start(skb, IFLA_AF_SPEC);
if (!br_afspec)
goto nla_put_failure;
if (nla_put_u16(skb, IFLA_BRIDGE_FLAGS, BRIDGE_FLAGS_SELF) ||
nla_put_u16(skb, IFLA_BRIDGE_MODE, mode)) {
nla_nest_cancel(skb, br_afspec);
goto nla_put_failure;
}
nla_nest_end(skb, br_afspec);
return nlmsg_end(skb, nlh);
nla_put_failure:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
EXPORT_SYMBOL(ndo_dflt_bridge_getlink);
static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
struct net_device *dev;
int idx = 0;
u32 portid = NETLINK_CB(cb->skb).portid;
u32 seq = cb->nlh->nlmsg_seq;
struct nlattr *extfilt;
u32 filter_mask = 0;
extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
IFLA_EXT_MASK);
if (extfilt)
filter_mask = nla_get_u32(extfilt);
rcu_read_lock();
for_each_netdev_rcu(net, dev) {
const struct net_device_ops *ops = dev->netdev_ops;
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
if (idx >= cb->args[0] &&
br_dev->netdev_ops->ndo_bridge_getlink(
skb, portid, seq, dev, filter_mask) < 0)
break;
idx++;
}
if (ops->ndo_bridge_getlink) {
if (idx >= cb->args[0] &&
ops->ndo_bridge_getlink(skb, portid, seq, dev,
filter_mask) < 0)
break;
idx++;
}
}
rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
static inline size_t bridge_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+ nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+ nla_total_size(sizeof(u32)) /* IFLA_MASTER */
+ nla_total_size(sizeof(u32)) /* IFLA_MTU */
+ nla_total_size(sizeof(u32)) /* IFLA_LINK */
+ nla_total_size(sizeof(u32)) /* IFLA_OPERSTATE */
+ nla_total_size(sizeof(u8)) /* IFLA_PROTINFO */
+ nla_total_size(sizeof(struct nlattr)) /* IFLA_AF_SPEC */
+ nla_total_size(sizeof(u16)) /* IFLA_BRIDGE_FLAGS */
+ nla_total_size(sizeof(u16)); /* IFLA_BRIDGE_MODE */
}
static int rtnl_bridge_notify(struct net_device *dev, u16 flags)
{
struct net *net = dev_net(dev);
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
struct sk_buff *skb;
int err = -EOPNOTSUPP;
skb = nlmsg_new(bridge_nlmsg_size(), GFP_ATOMIC);
if (!skb) {
err = -ENOMEM;
goto errout;
}
if ((!flags || (flags & BRIDGE_FLAGS_MASTER)) &&
br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
err = br_dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
if (err < 0)
goto errout;
}
if ((flags & BRIDGE_FLAGS_SELF) &&
dev->netdev_ops->ndo_bridge_getlink) {
err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
if (err < 0)
goto errout;
}
rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
return 0;
errout:
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
rtnl_set_sk_err(net, RTNLGRP_LINK, err);
return err;
}
static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
void *arg)
{
struct net *net = sock_net(skb->sk);
struct ifinfomsg *ifm;
struct net_device *dev;
struct nlattr *br_spec, *attr = NULL;
int rem, err = -EOPNOTSUPP;
u16 oflags, flags = 0;
bool have_flags = false;
if (nlmsg_len(nlh) < sizeof(*ifm))
return -EINVAL;
ifm = nlmsg_data(nlh);
if (ifm->ifi_family != AF_BRIDGE)
return -EPFNOSUPPORT;
dev = __dev_get_by_index(net, ifm->ifi_index);
if (!dev) {
pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
return -ENODEV;
}
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
have_flags = true;
flags = nla_get_u16(attr);
break;
}
}
}
oflags = flags;
if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
if (!br_dev || !br_dev->netdev_ops->ndo_bridge_setlink) {
err = -EOPNOTSUPP;
goto out;
}
err = br_dev->netdev_ops->ndo_bridge_setlink(dev, nlh);
if (err)
goto out;
flags &= ~BRIDGE_FLAGS_MASTER;
}
if ((flags & BRIDGE_FLAGS_SELF)) {
if (!dev->netdev_ops->ndo_bridge_setlink)
err = -EOPNOTSUPP;
else
err = dev->netdev_ops->ndo_bridge_setlink(dev, nlh);
if (!err)
flags &= ~BRIDGE_FLAGS_SELF;
}
if (have_flags)
memcpy(nla_data(attr), &flags, sizeof(flags));
/* Generate event to notify upper layer of bridge change */
if (!err)
err = rtnl_bridge_notify(dev, oflags);
out:
return err;
}
static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh,
void *arg)
{
struct net *net = sock_net(skb->sk);
struct ifinfomsg *ifm;
struct net_device *dev;
struct nlattr *br_spec, *attr = NULL;
int rem, err = -EOPNOTSUPP;
u16 oflags, flags = 0;
bool have_flags = false;
if (nlmsg_len(nlh) < sizeof(*ifm))
return -EINVAL;
ifm = nlmsg_data(nlh);
if (ifm->ifi_family != AF_BRIDGE)
return -EPFNOSUPPORT;
dev = __dev_get_by_index(net, ifm->ifi_index);
if (!dev) {
pr_info("PF_BRIDGE: RTM_SETLINK with unknown ifindex\n");
return -ENODEV;
}
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (br_spec) {
nla_for_each_nested(attr, br_spec, rem) {
if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
have_flags = true;
flags = nla_get_u16(attr);
break;
}
}
}
oflags = flags;
if (!flags || (flags & BRIDGE_FLAGS_MASTER)) {
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
if (!br_dev || !br_dev->netdev_ops->ndo_bridge_dellink) {
err = -EOPNOTSUPP;
goto out;
}
err = br_dev->netdev_ops->ndo_bridge_dellink(dev, nlh);
if (err)
goto out;
flags &= ~BRIDGE_FLAGS_MASTER;
}
if ((flags & BRIDGE_FLAGS_SELF)) {
if (!dev->netdev_ops->ndo_bridge_dellink)
err = -EOPNOTSUPP;
else
err = dev->netdev_ops->ndo_bridge_dellink(dev, nlh);
if (!err)
flags &= ~BRIDGE_FLAGS_SELF;
}
if (have_flags)
memcpy(nla_data(attr), &flags, sizeof(flags));
/* Generate event to notify upper layer of bridge change */
if (!err)
err = rtnl_bridge_notify(dev, oflags);
out:
return err;
}
/* Protected by RTNL sempahore. */
static struct rtattr **rta_buf;
static int rtattr_max;
/* Process one rtnetlink message. */
static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
rtnl_doit_func doit;
int sz_idx, kind;
int min_len;
int family;
int type;
int err;
type = nlh->nlmsg_type;
if (type > RTM_MAX)
return -EOPNOTSUPP;
type -= RTM_BASE;
/* All the messages must have at least 1 byte length */
if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg)))
return 0;
family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family;
sz_idx = type>>2;
kind = type&3;
if (kind != 2 && !ns_capable(net->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
struct sock *rtnl;
rtnl_dumpit_func dumpit;
rtnl_calcit_func calcit;
u16 min_dump_alloc = 0;
dumpit = rtnl_get_dumpit(family, type);
if (dumpit == NULL)
return -EOPNOTSUPP;
calcit = rtnl_get_calcit(family, type);
if (calcit)
min_dump_alloc = calcit(skb, nlh);
__rtnl_unlock();
rtnl = net->rtnl;
{
struct netlink_dump_control c = {
.dump = dumpit,
.min_dump_alloc = min_dump_alloc,
};
err = netlink_dump_start(rtnl, skb, nlh, &c);
}
rtnl_lock();
return err;
}
memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *)));
min_len = rtm_min[sz_idx];
if (nlh->nlmsg_len < min_len)
return -EINVAL;
if (nlh->nlmsg_len > min_len) {
int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);
while (RTA_OK(attr, attrlen)) {
unsigned int flavor = attr->rta_type;
if (flavor) {
if (flavor > rta_max[sz_idx])
return -EINVAL;
rta_buf[flavor-1] = attr;
}
attr = RTA_NEXT(attr, attrlen);
}
}
doit = rtnl_get_doit(family, type);
if (doit == NULL)
return -EOPNOTSUPP;
return doit(skb, nlh, (void *)&rta_buf[0]);
}
static void rtnetlink_rcv(struct sk_buff *skb)
{
rtnl_lock();
netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
rtnl_unlock();
}
static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = ptr;
switch (event) {
case NETDEV_UP:
case NETDEV_DOWN:
case NETDEV_PRE_UP:
case NETDEV_POST_INIT:
case NETDEV_REGISTER:
case NETDEV_CHANGE:
case NETDEV_PRE_TYPE_CHANGE:
case NETDEV_GOING_DOWN:
case NETDEV_UNREGISTER:
case NETDEV_UNREGISTER_FINAL:
case NETDEV_RELEASE:
case NETDEV_JOIN:
break;
default:
rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block rtnetlink_dev_notifier = {
.notifier_call = rtnetlink_event,
};
static int __net_init rtnetlink_net_init(struct net *net)
{
struct sock *sk;
struct netlink_kernel_cfg cfg = {
.groups = RTNLGRP_MAX,
.input = rtnetlink_rcv,
.cb_mutex = &rtnl_mutex,
.flags = NL_CFG_F_NONROOT_RECV,
};
sk = netlink_kernel_create(net, NETLINK_ROUTE, &cfg);
if (!sk)
return -ENOMEM;
net->rtnl = sk;
return 0;
}
static void __net_exit rtnetlink_net_exit(struct net *net)
{
netlink_kernel_release(net->rtnl);
net->rtnl = NULL;
}
static struct pernet_operations rtnetlink_net_ops = {
.init = rtnetlink_net_init,
.exit = rtnetlink_net_exit,
};
void __init rtnetlink_init(void)
{
int i;
rtattr_max = 0;
for (i = 0; i < ARRAY_SIZE(rta_max); i++)
if (rta_max[i] > rtattr_max)
rtattr_max = rta_max[i];
rta_buf = kmalloc(rtattr_max * sizeof(struct rtattr *), GFP_KERNEL);
if (!rta_buf)
panic("rtnetlink_init: cannot allocate rta_buf\n");
if (register_pernet_subsys(&rtnetlink_net_ops))
panic("rtnetlink_init: cannot initialize rtnetlink\n");
register_netdevice_notifier(&rtnetlink_dev_notifier);
rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink,
rtnl_dump_ifinfo, rtnl_calcit);
rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL, NULL);
rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL, NULL);
rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL, NULL);
rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all, NULL);
rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all, NULL);
rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, rtnl_fdb_add, NULL, NULL);
rtnl_register(PF_BRIDGE, RTM_DELNEIGH, rtnl_fdb_del, NULL, NULL);
rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, rtnl_fdb_dump, NULL);
rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, rtnl_bridge_getlink, NULL);
rtnl_register(PF_BRIDGE, RTM_DELLINK, rtnl_bridge_dellink, NULL, NULL);
rtnl_register(PF_BRIDGE, RTM_SETLINK, rtnl_bridge_setlink, NULL, NULL);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_5671_0 |
crossvul-cpp_data_good_2285_0 | /*
* Copyright (c) 2000-2005 Silicon Graphics, Inc.
* Copyright (c) 2013 Red Hat, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_shared.h"
#include "xfs_format.h"
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
#include "xfs_bit.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_format.h"
#include "xfs_da_btree.h"
#include "xfs_dir2.h"
#include "xfs_dir2_priv.h"
#include "xfs_inode.h"
#include "xfs_trans.h"
#include "xfs_inode_item.h"
#include "xfs_alloc.h"
#include "xfs_bmap.h"
#include "xfs_attr.h"
#include "xfs_attr_leaf.h"
#include "xfs_error.h"
#include "xfs_trace.h"
#include "xfs_cksum.h"
#include "xfs_buf_item.h"
/*
* xfs_da_btree.c
*
* Routines to implement directories as Btrees of hashed names.
*/
/*========================================================================
* Function prototypes for the kernel.
*========================================================================*/
/*
* Routines used for growing the Btree.
*/
STATIC int xfs_da3_root_split(xfs_da_state_t *state,
xfs_da_state_blk_t *existing_root,
xfs_da_state_blk_t *new_child);
STATIC int xfs_da3_node_split(xfs_da_state_t *state,
xfs_da_state_blk_t *existing_blk,
xfs_da_state_blk_t *split_blk,
xfs_da_state_blk_t *blk_to_add,
int treelevel,
int *result);
STATIC void xfs_da3_node_rebalance(xfs_da_state_t *state,
xfs_da_state_blk_t *node_blk_1,
xfs_da_state_blk_t *node_blk_2);
STATIC void xfs_da3_node_add(xfs_da_state_t *state,
xfs_da_state_blk_t *old_node_blk,
xfs_da_state_blk_t *new_node_blk);
/*
* Routines used for shrinking the Btree.
*/
STATIC int xfs_da3_root_join(xfs_da_state_t *state,
xfs_da_state_blk_t *root_blk);
STATIC int xfs_da3_node_toosmall(xfs_da_state_t *state, int *retval);
STATIC void xfs_da3_node_remove(xfs_da_state_t *state,
xfs_da_state_blk_t *drop_blk);
STATIC void xfs_da3_node_unbalance(xfs_da_state_t *state,
xfs_da_state_blk_t *src_node_blk,
xfs_da_state_blk_t *dst_node_blk);
/*
* Utility routines.
*/
STATIC int xfs_da3_blk_unlink(xfs_da_state_t *state,
xfs_da_state_blk_t *drop_blk,
xfs_da_state_blk_t *save_blk);
kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */
/*
* Allocate a dir-state structure.
* We don't put them on the stack since they're large.
*/
xfs_da_state_t *
xfs_da_state_alloc(void)
{
return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS);
}
/*
* Kill the altpath contents of a da-state structure.
*/
STATIC void
xfs_da_state_kill_altpath(xfs_da_state_t *state)
{
int i;
for (i = 0; i < state->altpath.active; i++)
state->altpath.blk[i].bp = NULL;
state->altpath.active = 0;
}
/*
* Free a da-state structure.
*/
void
xfs_da_state_free(xfs_da_state_t *state)
{
xfs_da_state_kill_altpath(state);
#ifdef DEBUG
memset((char *)state, 0, sizeof(*state));
#endif /* DEBUG */
kmem_zone_free(xfs_da_state_zone, state);
}
static bool
xfs_da3_node_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_da_intnode *hdr = bp->b_addr;
struct xfs_da3_icnode_hdr ichdr;
const struct xfs_dir_ops *ops;
ops = xfs_dir_get_ops(mp, NULL);
ops->node_hdr_from_disk(&ichdr, hdr);
if (xfs_sb_version_hascrc(&mp->m_sb)) {
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
if (ichdr.magic != XFS_DA3_NODE_MAGIC)
return false;
if (!uuid_equal(&hdr3->info.uuid, &mp->m_sb.sb_uuid))
return false;
if (be64_to_cpu(hdr3->info.blkno) != bp->b_bn)
return false;
} else {
if (ichdr.magic != XFS_DA_NODE_MAGIC)
return false;
}
if (ichdr.level == 0)
return false;
if (ichdr.level > XFS_DA_NODE_MAXDEPTH)
return false;
if (ichdr.count == 0)
return false;
/*
* we don't know if the node is for and attribute or directory tree,
* so only fail if the count is outside both bounds
*/
if (ichdr.count > mp->m_dir_node_ents &&
ichdr.count > mp->m_attr_node_ents)
return false;
/* XXX: hash order check? */
return true;
}
static void
xfs_da3_node_write_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_buf_log_item *bip = bp->b_fspriv;
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
if (!xfs_da3_node_verify(bp)) {
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
return;
}
if (!xfs_sb_version_hascrc(&mp->m_sb))
return;
if (bip)
hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
xfs_update_cksum(bp->b_addr, BBTOB(bp->b_length), XFS_DA3_NODE_CRC_OFF);
}
/*
* leaf/node format detection on trees is sketchy, so a node read can be done on
* leaf level blocks when detection identifies the tree as a node format tree
* incorrectly. In this case, we need to swap the verifier to match the correct
* format of the block being read.
*/
static void
xfs_da3_node_read_verify(
struct xfs_buf *bp)
{
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_da_blkinfo *info = bp->b_addr;
switch (be16_to_cpu(info->magic)) {
case XFS_DA3_NODE_MAGIC:
if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
XFS_DA3_NODE_CRC_OFF))
break;
/* fall through */
case XFS_DA_NODE_MAGIC:
if (!xfs_da3_node_verify(bp))
break;
return;
case XFS_ATTR_LEAF_MAGIC:
case XFS_ATTR3_LEAF_MAGIC:
bp->b_ops = &xfs_attr3_leaf_buf_ops;
bp->b_ops->verify_read(bp);
return;
case XFS_DIR2_LEAFN_MAGIC:
case XFS_DIR3_LEAFN_MAGIC:
bp->b_ops = &xfs_dir3_leafn_buf_ops;
bp->b_ops->verify_read(bp);
return;
default:
break;
}
/* corrupt block */
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
xfs_buf_ioerror(bp, EFSCORRUPTED);
}
const struct xfs_buf_ops xfs_da3_node_buf_ops = {
.verify_read = xfs_da3_node_read_verify,
.verify_write = xfs_da3_node_write_verify,
};
int
xfs_da3_node_read(
struct xfs_trans *tp,
struct xfs_inode *dp,
xfs_dablk_t bno,
xfs_daddr_t mappedbno,
struct xfs_buf **bpp,
int which_fork)
{
int err;
err = xfs_da_read_buf(tp, dp, bno, mappedbno, bpp,
which_fork, &xfs_da3_node_buf_ops);
if (!err && tp) {
struct xfs_da_blkinfo *info = (*bpp)->b_addr;
int type;
switch (be16_to_cpu(info->magic)) {
case XFS_DA_NODE_MAGIC:
case XFS_DA3_NODE_MAGIC:
type = XFS_BLFT_DA_NODE_BUF;
break;
case XFS_ATTR_LEAF_MAGIC:
case XFS_ATTR3_LEAF_MAGIC:
type = XFS_BLFT_ATTR_LEAF_BUF;
break;
case XFS_DIR2_LEAFN_MAGIC:
case XFS_DIR3_LEAFN_MAGIC:
type = XFS_BLFT_DIR_LEAFN_BUF;
break;
default:
type = 0;
ASSERT(0);
break;
}
xfs_trans_buf_set_type(tp, *bpp, type);
}
return err;
}
/*========================================================================
* Routines used for growing the Btree.
*========================================================================*/
/*
* Create the initial contents of an intermediate node.
*/
int
xfs_da3_node_create(
struct xfs_da_args *args,
xfs_dablk_t blkno,
int level,
struct xfs_buf **bpp,
int whichfork)
{
struct xfs_da_intnode *node;
struct xfs_trans *tp = args->trans;
struct xfs_mount *mp = tp->t_mountp;
struct xfs_da3_icnode_hdr ichdr = {0};
struct xfs_buf *bp;
int error;
struct xfs_inode *dp = args->dp;
trace_xfs_da_node_create(args);
ASSERT(level <= XFS_DA_NODE_MAXDEPTH);
error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, whichfork);
if (error)
return(error);
bp->b_ops = &xfs_da3_node_buf_ops;
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
node = bp->b_addr;
if (xfs_sb_version_hascrc(&mp->m_sb)) {
struct xfs_da3_node_hdr *hdr3 = bp->b_addr;
ichdr.magic = XFS_DA3_NODE_MAGIC;
hdr3->info.blkno = cpu_to_be64(bp->b_bn);
hdr3->info.owner = cpu_to_be64(args->dp->i_ino);
uuid_copy(&hdr3->info.uuid, &mp->m_sb.sb_uuid);
} else {
ichdr.magic = XFS_DA_NODE_MAGIC;
}
ichdr.level = level;
dp->d_ops->node_hdr_to_disk(node, &ichdr);
xfs_trans_log_buf(tp, bp,
XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
*bpp = bp;
return(0);
}
/*
* Split a leaf node, rebalance, then possibly split
* intermediate nodes, rebalance, etc.
*/
int /* error */
xfs_da3_split(
struct xfs_da_state *state)
{
struct xfs_da_state_blk *oldblk;
struct xfs_da_state_blk *newblk;
struct xfs_da_state_blk *addblk;
struct xfs_da_intnode *node;
struct xfs_buf *bp;
int max;
int action = 0;
int error;
int i;
trace_xfs_da_split(state->args);
/*
* Walk back up the tree splitting/inserting/adjusting as necessary.
* If we need to insert and there isn't room, split the node, then
* decide which fragment to insert the new block from below into.
* Note that we may split the root this way, but we need more fixup.
*/
max = state->path.active - 1;
ASSERT((max >= 0) && (max < XFS_DA_NODE_MAXDEPTH));
ASSERT(state->path.blk[max].magic == XFS_ATTR_LEAF_MAGIC ||
state->path.blk[max].magic == XFS_DIR2_LEAFN_MAGIC);
addblk = &state->path.blk[max]; /* initial dummy value */
for (i = max; (i >= 0) && addblk; state->path.active--, i--) {
oldblk = &state->path.blk[i];
newblk = &state->altpath.blk[i];
/*
* If a leaf node then
* Allocate a new leaf node, then rebalance across them.
* else if an intermediate node then
* We split on the last layer, must we split the node?
*/
switch (oldblk->magic) {
case XFS_ATTR_LEAF_MAGIC:
error = xfs_attr3_leaf_split(state, oldblk, newblk);
if ((error != 0) && (error != ENOSPC)) {
return(error); /* GROT: attr is inconsistent */
}
if (!error) {
addblk = newblk;
break;
}
/*
* Entry wouldn't fit, split the leaf again.
*/
state->extravalid = 1;
if (state->inleaf) {
state->extraafter = 0; /* before newblk */
trace_xfs_attr_leaf_split_before(state->args);
error = xfs_attr3_leaf_split(state, oldblk,
&state->extrablk);
} else {
state->extraafter = 1; /* after newblk */
trace_xfs_attr_leaf_split_after(state->args);
error = xfs_attr3_leaf_split(state, newblk,
&state->extrablk);
}
if (error)
return(error); /* GROT: attr inconsistent */
addblk = newblk;
break;
case XFS_DIR2_LEAFN_MAGIC:
error = xfs_dir2_leafn_split(state, oldblk, newblk);
if (error)
return error;
addblk = newblk;
break;
case XFS_DA_NODE_MAGIC:
error = xfs_da3_node_split(state, oldblk, newblk, addblk,
max - i, &action);
addblk->bp = NULL;
if (error)
return(error); /* GROT: dir is inconsistent */
/*
* Record the newly split block for the next time thru?
*/
if (action)
addblk = newblk;
else
addblk = NULL;
break;
}
/*
* Update the btree to show the new hashval for this child.
*/
xfs_da3_fixhashpath(state, &state->path);
}
if (!addblk)
return(0);
/*
* Split the root node.
*/
ASSERT(state->path.active == 0);
oldblk = &state->path.blk[0];
error = xfs_da3_root_split(state, oldblk, addblk);
if (error) {
addblk->bp = NULL;
return(error); /* GROT: dir is inconsistent */
}
/*
* Update pointers to the node which used to be block 0 and
* just got bumped because of the addition of a new root node.
* There might be three blocks involved if a double split occurred,
* and the original block 0 could be at any position in the list.
*
* Note: the magic numbers and sibling pointers are in the same
* physical place for both v2 and v3 headers (by design). Hence it
* doesn't matter which version of the xfs_da_intnode structure we use
* here as the result will be the same using either structure.
*/
node = oldblk->bp->b_addr;
if (node->hdr.info.forw) {
if (be32_to_cpu(node->hdr.info.forw) == addblk->blkno) {
bp = addblk->bp;
} else {
ASSERT(state->extravalid);
bp = state->extrablk.bp;
}
node = bp->b_addr;
node->hdr.info.back = cpu_to_be32(oldblk->blkno);
xfs_trans_log_buf(state->args->trans, bp,
XFS_DA_LOGRANGE(node, &node->hdr.info,
sizeof(node->hdr.info)));
}
node = oldblk->bp->b_addr;
if (node->hdr.info.back) {
if (be32_to_cpu(node->hdr.info.back) == addblk->blkno) {
bp = addblk->bp;
} else {
ASSERT(state->extravalid);
bp = state->extrablk.bp;
}
node = bp->b_addr;
node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
xfs_trans_log_buf(state->args->trans, bp,
XFS_DA_LOGRANGE(node, &node->hdr.info,
sizeof(node->hdr.info)));
}
addblk->bp = NULL;
return(0);
}
/*
* Split the root. We have to create a new root and point to the two
* parts (the split old root) that we just created. Copy block zero to
* the EOF, extending the inode in process.
*/
STATIC int /* error */
xfs_da3_root_split(
struct xfs_da_state *state,
struct xfs_da_state_blk *blk1,
struct xfs_da_state_blk *blk2)
{
struct xfs_da_intnode *node;
struct xfs_da_intnode *oldroot;
struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
struct xfs_da_args *args;
struct xfs_buf *bp;
struct xfs_inode *dp;
struct xfs_trans *tp;
struct xfs_mount *mp;
struct xfs_dir2_leaf *leaf;
xfs_dablk_t blkno;
int level;
int error;
int size;
trace_xfs_da_root_split(state->args);
/*
* Copy the existing (incorrect) block from the root node position
* to a free space somewhere.
*/
args = state->args;
error = xfs_da_grow_inode(args, &blkno);
if (error)
return error;
dp = args->dp;
tp = args->trans;
mp = state->mp;
error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
if (error)
return error;
node = bp->b_addr;
oldroot = blk1->bp->b_addr;
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC)) {
struct xfs_da3_icnode_hdr nodehdr;
dp->d_ops->node_hdr_from_disk(&nodehdr, oldroot);
btree = dp->d_ops->node_tree_p(oldroot);
size = (int)((char *)&btree[nodehdr.count] - (char *)oldroot);
level = nodehdr.level;
/*
* we are about to copy oldroot to bp, so set up the type
* of bp while we know exactly what it will be.
*/
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DA_NODE_BUF);
} else {
struct xfs_dir3_icleaf_hdr leafhdr;
struct xfs_dir2_leaf_entry *ents;
leaf = (xfs_dir2_leaf_t *)oldroot;
dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
ents = dp->d_ops->leaf_ents_p(leaf);
ASSERT(leafhdr.magic == XFS_DIR2_LEAFN_MAGIC ||
leafhdr.magic == XFS_DIR3_LEAFN_MAGIC);
size = (int)((char *)&ents[leafhdr.count] - (char *)leaf);
level = 0;
/*
* we are about to copy oldroot to bp, so set up the type
* of bp while we know exactly what it will be.
*/
xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DIR_LEAFN_BUF);
}
/*
* we can copy most of the information in the node from one block to
* another, but for CRC enabled headers we have to make sure that the
* block specific identifiers are kept intact. We update the buffer
* directly for this.
*/
memcpy(node, oldroot, size);
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
struct xfs_da3_intnode *node3 = (struct xfs_da3_intnode *)node;
node3->hdr.info.blkno = cpu_to_be64(bp->b_bn);
}
xfs_trans_log_buf(tp, bp, 0, size - 1);
bp->b_ops = blk1->bp->b_ops;
xfs_trans_buf_copy_type(bp, blk1->bp);
blk1->bp = bp;
blk1->blkno = blkno;
/*
* Set up the new root node.
*/
error = xfs_da3_node_create(args,
(args->whichfork == XFS_DATA_FORK) ? mp->m_dirleafblk : 0,
level + 1, &bp, args->whichfork);
if (error)
return error;
node = bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
btree = dp->d_ops->node_tree_p(node);
btree[0].hashval = cpu_to_be32(blk1->hashval);
btree[0].before = cpu_to_be32(blk1->blkno);
btree[1].hashval = cpu_to_be32(blk2->hashval);
btree[1].before = cpu_to_be32(blk2->blkno);
nodehdr.count = 2;
dp->d_ops->node_hdr_to_disk(node, &nodehdr);
#ifdef DEBUG
if (oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
oldroot->hdr.info.magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
ASSERT(blk1->blkno >= mp->m_dirleafblk &&
blk1->blkno < mp->m_dirfreeblk);
ASSERT(blk2->blkno >= mp->m_dirleafblk &&
blk2->blkno < mp->m_dirfreeblk);
}
#endif
/* Header is already logged by xfs_da_node_create */
xfs_trans_log_buf(tp, bp,
XFS_DA_LOGRANGE(node, btree, sizeof(xfs_da_node_entry_t) * 2));
return 0;
}
/*
* Split the node, rebalance, then add the new entry.
*/
STATIC int /* error */
xfs_da3_node_split(
struct xfs_da_state *state,
struct xfs_da_state_blk *oldblk,
struct xfs_da_state_blk *newblk,
struct xfs_da_state_blk *addblk,
int treelevel,
int *result)
{
struct xfs_da_intnode *node;
struct xfs_da3_icnode_hdr nodehdr;
xfs_dablk_t blkno;
int newcount;
int error;
int useextra;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_split(state->args);
node = oldblk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
/*
* With V2 dirs the extra block is data or freespace.
*/
useextra = state->extravalid && state->args->whichfork == XFS_ATTR_FORK;
newcount = 1 + useextra;
/*
* Do we have to split the node?
*/
if (nodehdr.count + newcount > state->node_ents) {
/*
* Allocate a new node, add to the doubly linked chain of
* nodes, then move some of our excess entries into it.
*/
error = xfs_da_grow_inode(state->args, &blkno);
if (error)
return(error); /* GROT: dir is inconsistent */
error = xfs_da3_node_create(state->args, blkno, treelevel,
&newblk->bp, state->args->whichfork);
if (error)
return(error); /* GROT: dir is inconsistent */
newblk->blkno = blkno;
newblk->magic = XFS_DA_NODE_MAGIC;
xfs_da3_node_rebalance(state, oldblk, newblk);
error = xfs_da3_blk_link(state, oldblk, newblk);
if (error)
return(error);
*result = 1;
} else {
*result = 0;
}
/*
* Insert the new entry(s) into the correct block
* (updating last hashval in the process).
*
* xfs_da3_node_add() inserts BEFORE the given index,
* and as a result of using node_lookup_int() we always
* point to a valid entry (not after one), but a split
* operation always results in a new block whose hashvals
* FOLLOW the current block.
*
* If we had double-split op below us, then add the extra block too.
*/
node = oldblk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
if (oldblk->index <= nodehdr.count) {
oldblk->index++;
xfs_da3_node_add(state, oldblk, addblk);
if (useextra) {
if (state->extraafter)
oldblk->index++;
xfs_da3_node_add(state, oldblk, &state->extrablk);
state->extravalid = 0;
}
} else {
newblk->index++;
xfs_da3_node_add(state, newblk, addblk);
if (useextra) {
if (state->extraafter)
newblk->index++;
xfs_da3_node_add(state, newblk, &state->extrablk);
state->extravalid = 0;
}
}
return(0);
}
/*
* Balance the btree elements between two intermediate nodes,
* usually one full and one empty.
*
* NOTE: if blk2 is empty, then it will get the upper half of blk1.
*/
STATIC void
xfs_da3_node_rebalance(
struct xfs_da_state *state,
struct xfs_da_state_blk *blk1,
struct xfs_da_state_blk *blk2)
{
struct xfs_da_intnode *node1;
struct xfs_da_intnode *node2;
struct xfs_da_intnode *tmpnode;
struct xfs_da_node_entry *btree1;
struct xfs_da_node_entry *btree2;
struct xfs_da_node_entry *btree_s;
struct xfs_da_node_entry *btree_d;
struct xfs_da3_icnode_hdr nodehdr1;
struct xfs_da3_icnode_hdr nodehdr2;
struct xfs_trans *tp;
int count;
int tmp;
int swap = 0;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_rebalance(state->args);
node1 = blk1->bp->b_addr;
node2 = blk2->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
btree1 = dp->d_ops->node_tree_p(node1);
btree2 = dp->d_ops->node_tree_p(node2);
/*
* Figure out how many entries need to move, and in which direction.
* Swap the nodes around if that makes it simpler.
*/
if (nodehdr1.count > 0 && nodehdr2.count > 0 &&
((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
(be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
tmpnode = node1;
node1 = node2;
node2 = tmpnode;
dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
btree1 = dp->d_ops->node_tree_p(node1);
btree2 = dp->d_ops->node_tree_p(node2);
swap = 1;
}
count = (nodehdr1.count - nodehdr2.count) / 2;
if (count == 0)
return;
tp = state->args->trans;
/*
* Two cases: high-to-low and low-to-high.
*/
if (count > 0) {
/*
* Move elements in node2 up to make a hole.
*/
tmp = nodehdr2.count;
if (tmp > 0) {
tmp *= (uint)sizeof(xfs_da_node_entry_t);
btree_s = &btree2[0];
btree_d = &btree2[count];
memmove(btree_d, btree_s, tmp);
}
/*
* Move the req'd B-tree elements from high in node1 to
* low in node2.
*/
nodehdr2.count += count;
tmp = count * (uint)sizeof(xfs_da_node_entry_t);
btree_s = &btree1[nodehdr1.count - count];
btree_d = &btree2[0];
memcpy(btree_d, btree_s, tmp);
nodehdr1.count -= count;
} else {
/*
* Move the req'd B-tree elements from low in node2 to
* high in node1.
*/
count = -count;
tmp = count * (uint)sizeof(xfs_da_node_entry_t);
btree_s = &btree2[0];
btree_d = &btree1[nodehdr1.count];
memcpy(btree_d, btree_s, tmp);
nodehdr1.count += count;
xfs_trans_log_buf(tp, blk1->bp,
XFS_DA_LOGRANGE(node1, btree_d, tmp));
/*
* Move elements in node2 down to fill the hole.
*/
tmp = nodehdr2.count - count;
tmp *= (uint)sizeof(xfs_da_node_entry_t);
btree_s = &btree2[count];
btree_d = &btree2[0];
memmove(btree_d, btree_s, tmp);
nodehdr2.count -= count;
}
/*
* Log header of node 1 and all current bits of node 2.
*/
dp->d_ops->node_hdr_to_disk(node1, &nodehdr1);
xfs_trans_log_buf(tp, blk1->bp,
XFS_DA_LOGRANGE(node1, &node1->hdr, dp->d_ops->node_hdr_size));
dp->d_ops->node_hdr_to_disk(node2, &nodehdr2);
xfs_trans_log_buf(tp, blk2->bp,
XFS_DA_LOGRANGE(node2, &node2->hdr,
dp->d_ops->node_hdr_size +
(sizeof(btree2[0]) * nodehdr2.count)));
/*
* Record the last hashval from each block for upward propagation.
* (note: don't use the swapped node pointers)
*/
if (swap) {
node1 = blk1->bp->b_addr;
node2 = blk2->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr1, node1);
dp->d_ops->node_hdr_from_disk(&nodehdr2, node2);
btree1 = dp->d_ops->node_tree_p(node1);
btree2 = dp->d_ops->node_tree_p(node2);
}
blk1->hashval = be32_to_cpu(btree1[nodehdr1.count - 1].hashval);
blk2->hashval = be32_to_cpu(btree2[nodehdr2.count - 1].hashval);
/*
* Adjust the expected index for insertion.
*/
if (blk1->index >= nodehdr1.count) {
blk2->index = blk1->index - nodehdr1.count;
blk1->index = nodehdr1.count + 1; /* make it invalid */
}
}
/*
* Add a new entry to an intermediate node.
*/
STATIC void
xfs_da3_node_add(
struct xfs_da_state *state,
struct xfs_da_state_blk *oldblk,
struct xfs_da_state_blk *newblk)
{
struct xfs_da_intnode *node;
struct xfs_da3_icnode_hdr nodehdr;
struct xfs_da_node_entry *btree;
int tmp;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_add(state->args);
node = oldblk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
btree = dp->d_ops->node_tree_p(node);
ASSERT(oldblk->index >= 0 && oldblk->index <= nodehdr.count);
ASSERT(newblk->blkno != 0);
if (state->args->whichfork == XFS_DATA_FORK)
ASSERT(newblk->blkno >= state->mp->m_dirleafblk &&
newblk->blkno < state->mp->m_dirfreeblk);
/*
* We may need to make some room before we insert the new node.
*/
tmp = 0;
if (oldblk->index < nodehdr.count) {
tmp = (nodehdr.count - oldblk->index) * (uint)sizeof(*btree);
memmove(&btree[oldblk->index + 1], &btree[oldblk->index], tmp);
}
btree[oldblk->index].hashval = cpu_to_be32(newblk->hashval);
btree[oldblk->index].before = cpu_to_be32(newblk->blkno);
xfs_trans_log_buf(state->args->trans, oldblk->bp,
XFS_DA_LOGRANGE(node, &btree[oldblk->index],
tmp + sizeof(*btree)));
nodehdr.count += 1;
dp->d_ops->node_hdr_to_disk(node, &nodehdr);
xfs_trans_log_buf(state->args->trans, oldblk->bp,
XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
/*
* Copy the last hash value from the oldblk to propagate upwards.
*/
oldblk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
}
/*========================================================================
* Routines used for shrinking the Btree.
*========================================================================*/
/*
* Deallocate an empty leaf node, remove it from its parent,
* possibly deallocating that block, etc...
*/
int
xfs_da3_join(
struct xfs_da_state *state)
{
struct xfs_da_state_blk *drop_blk;
struct xfs_da_state_blk *save_blk;
int action = 0;
int error;
trace_xfs_da_join(state->args);
drop_blk = &state->path.blk[ state->path.active-1 ];
save_blk = &state->altpath.blk[ state->path.active-1 ];
ASSERT(state->path.blk[0].magic == XFS_DA_NODE_MAGIC);
ASSERT(drop_blk->magic == XFS_ATTR_LEAF_MAGIC ||
drop_blk->magic == XFS_DIR2_LEAFN_MAGIC);
/*
* Walk back up the tree joining/deallocating as necessary.
* When we stop dropping blocks, break out.
*/
for ( ; state->path.active >= 2; drop_blk--, save_blk--,
state->path.active--) {
/*
* See if we can combine the block with a neighbor.
* (action == 0) => no options, just leave
* (action == 1) => coalesce, then unlink
* (action == 2) => block empty, unlink it
*/
switch (drop_blk->magic) {
case XFS_ATTR_LEAF_MAGIC:
error = xfs_attr3_leaf_toosmall(state, &action);
if (error)
return(error);
if (action == 0)
return(0);
xfs_attr3_leaf_unbalance(state, drop_blk, save_blk);
break;
case XFS_DIR2_LEAFN_MAGIC:
error = xfs_dir2_leafn_toosmall(state, &action);
if (error)
return error;
if (action == 0)
return 0;
xfs_dir2_leafn_unbalance(state, drop_blk, save_blk);
break;
case XFS_DA_NODE_MAGIC:
/*
* Remove the offending node, fixup hashvals,
* check for a toosmall neighbor.
*/
xfs_da3_node_remove(state, drop_blk);
xfs_da3_fixhashpath(state, &state->path);
error = xfs_da3_node_toosmall(state, &action);
if (error)
return(error);
if (action == 0)
return 0;
xfs_da3_node_unbalance(state, drop_blk, save_blk);
break;
}
xfs_da3_fixhashpath(state, &state->altpath);
error = xfs_da3_blk_unlink(state, drop_blk, save_blk);
xfs_da_state_kill_altpath(state);
if (error)
return(error);
error = xfs_da_shrink_inode(state->args, drop_blk->blkno,
drop_blk->bp);
drop_blk->bp = NULL;
if (error)
return(error);
}
/*
* We joined all the way to the top. If it turns out that
* we only have one entry in the root, make the child block
* the new root.
*/
xfs_da3_node_remove(state, drop_blk);
xfs_da3_fixhashpath(state, &state->path);
error = xfs_da3_root_join(state, &state->path.blk[0]);
return(error);
}
#ifdef DEBUG
static void
xfs_da_blkinfo_onlychild_validate(struct xfs_da_blkinfo *blkinfo, __u16 level)
{
__be16 magic = blkinfo->magic;
if (level == 1) {
ASSERT(magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
} else {
ASSERT(magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
magic == cpu_to_be16(XFS_DA3_NODE_MAGIC));
}
ASSERT(!blkinfo->forw);
ASSERT(!blkinfo->back);
}
#else /* !DEBUG */
#define xfs_da_blkinfo_onlychild_validate(blkinfo, level)
#endif /* !DEBUG */
/*
* We have only one entry in the root. Copy the only remaining child of
* the old root to block 0 as the new root node.
*/
STATIC int
xfs_da3_root_join(
struct xfs_da_state *state,
struct xfs_da_state_blk *root_blk)
{
struct xfs_da_intnode *oldroot;
struct xfs_da_args *args;
xfs_dablk_t child;
struct xfs_buf *bp;
struct xfs_da3_icnode_hdr oldroothdr;
struct xfs_da_node_entry *btree;
int error;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_root_join(state->args);
ASSERT(root_blk->magic == XFS_DA_NODE_MAGIC);
args = state->args;
oldroot = root_blk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&oldroothdr, oldroot);
ASSERT(oldroothdr.forw == 0);
ASSERT(oldroothdr.back == 0);
/*
* If the root has more than one child, then don't do anything.
*/
if (oldroothdr.count > 1)
return 0;
/*
* Read in the (only) child block, then copy those bytes into
* the root block's buffer and free the original child block.
*/
btree = dp->d_ops->node_tree_p(oldroot);
child = be32_to_cpu(btree[0].before);
ASSERT(child != 0);
error = xfs_da3_node_read(args->trans, dp, child, -1, &bp,
args->whichfork);
if (error)
return error;
xfs_da_blkinfo_onlychild_validate(bp->b_addr, oldroothdr.level);
/*
* This could be copying a leaf back into the root block in the case of
* there only being a single leaf block left in the tree. Hence we have
* to update the b_ops pointer as well to match the buffer type change
* that could occur. For dir3 blocks we also need to update the block
* number in the buffer header.
*/
memcpy(root_blk->bp->b_addr, bp->b_addr, state->blocksize);
root_blk->bp->b_ops = bp->b_ops;
xfs_trans_buf_copy_type(root_blk->bp, bp);
if (oldroothdr.magic == XFS_DA3_NODE_MAGIC) {
struct xfs_da3_blkinfo *da3 = root_blk->bp->b_addr;
da3->blkno = cpu_to_be64(root_blk->bp->b_bn);
}
xfs_trans_log_buf(args->trans, root_blk->bp, 0, state->blocksize - 1);
error = xfs_da_shrink_inode(args, child, bp);
return(error);
}
/*
* Check a node block and its neighbors to see if the block should be
* collapsed into one or the other neighbor. Always keep the block
* with the smaller block number.
* If the current block is over 50% full, don't try to join it, return 0.
* If the block is empty, fill in the state structure and return 2.
* If it can be collapsed, fill in the state structure and return 1.
* If nothing can be done, return 0.
*/
STATIC int
xfs_da3_node_toosmall(
struct xfs_da_state *state,
int *action)
{
struct xfs_da_intnode *node;
struct xfs_da_state_blk *blk;
struct xfs_da_blkinfo *info;
xfs_dablk_t blkno;
struct xfs_buf *bp;
struct xfs_da3_icnode_hdr nodehdr;
int count;
int forward;
int error;
int retval;
int i;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_toosmall(state->args);
/*
* Check for the degenerate case of the block being over 50% full.
* If so, it's not worth even looking to see if we might be able
* to coalesce with a sibling.
*/
blk = &state->path.blk[ state->path.active-1 ];
info = blk->bp->b_addr;
node = (xfs_da_intnode_t *)info;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
if (nodehdr.count > (state->node_ents >> 1)) {
*action = 0; /* blk over 50%, don't try to join */
return(0); /* blk over 50%, don't try to join */
}
/*
* Check for the degenerate case of the block being empty.
* If the block is empty, we'll simply delete it, no need to
* coalesce it with a sibling block. We choose (arbitrarily)
* to merge with the forward block unless it is NULL.
*/
if (nodehdr.count == 0) {
/*
* Make altpath point to the block we want to keep and
* path point to the block we want to drop (this one).
*/
forward = (info->forw != 0);
memcpy(&state->altpath, &state->path, sizeof(state->path));
error = xfs_da3_path_shift(state, &state->altpath, forward,
0, &retval);
if (error)
return(error);
if (retval) {
*action = 0;
} else {
*action = 2;
}
return(0);
}
/*
* Examine each sibling block to see if we can coalesce with
* at least 25% free space to spare. We need to figure out
* whether to merge with the forward or the backward block.
* We prefer coalescing with the lower numbered sibling so as
* to shrink a directory over time.
*/
count = state->node_ents;
count -= state->node_ents >> 2;
count -= nodehdr.count;
/* start with smaller blk num */
forward = nodehdr.forw < nodehdr.back;
for (i = 0; i < 2; forward = !forward, i++) {
struct xfs_da3_icnode_hdr thdr;
if (forward)
blkno = nodehdr.forw;
else
blkno = nodehdr.back;
if (blkno == 0)
continue;
error = xfs_da3_node_read(state->args->trans, dp,
blkno, -1, &bp, state->args->whichfork);
if (error)
return(error);
node = bp->b_addr;
dp->d_ops->node_hdr_from_disk(&thdr, node);
xfs_trans_brelse(state->args->trans, bp);
if (count - thdr.count >= 0)
break; /* fits with at least 25% to spare */
}
if (i >= 2) {
*action = 0;
return 0;
}
/*
* Make altpath point to the block we want to keep (the lower
* numbered block) and path point to the block we want to drop.
*/
memcpy(&state->altpath, &state->path, sizeof(state->path));
if (blkno < blk->blkno) {
error = xfs_da3_path_shift(state, &state->altpath, forward,
0, &retval);
} else {
error = xfs_da3_path_shift(state, &state->path, forward,
0, &retval);
}
if (error)
return error;
if (retval) {
*action = 0;
return 0;
}
*action = 1;
return 0;
}
/*
* Pick up the last hashvalue from an intermediate node.
*/
STATIC uint
xfs_da3_node_lasthash(
struct xfs_inode *dp,
struct xfs_buf *bp,
int *count)
{
struct xfs_da_intnode *node;
struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
node = bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
if (count)
*count = nodehdr.count;
if (!nodehdr.count)
return 0;
btree = dp->d_ops->node_tree_p(node);
return be32_to_cpu(btree[nodehdr.count - 1].hashval);
}
/*
* Walk back up the tree adjusting hash values as necessary,
* when we stop making changes, return.
*/
void
xfs_da3_fixhashpath(
struct xfs_da_state *state,
struct xfs_da_state_path *path)
{
struct xfs_da_state_blk *blk;
struct xfs_da_intnode *node;
struct xfs_da_node_entry *btree;
xfs_dahash_t lasthash=0;
int level;
int count;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_fixhashpath(state->args);
level = path->active-1;
blk = &path->blk[ level ];
switch (blk->magic) {
case XFS_ATTR_LEAF_MAGIC:
lasthash = xfs_attr_leaf_lasthash(blk->bp, &count);
if (count == 0)
return;
break;
case XFS_DIR2_LEAFN_MAGIC:
lasthash = xfs_dir2_leafn_lasthash(dp, blk->bp, &count);
if (count == 0)
return;
break;
case XFS_DA_NODE_MAGIC:
lasthash = xfs_da3_node_lasthash(dp, blk->bp, &count);
if (count == 0)
return;
break;
}
for (blk--, level--; level >= 0; blk--, level--) {
struct xfs_da3_icnode_hdr nodehdr;
node = blk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
btree = dp->d_ops->node_tree_p(node);
if (be32_to_cpu(btree[blk->index].hashval) == lasthash)
break;
blk->hashval = lasthash;
btree[blk->index].hashval = cpu_to_be32(lasthash);
xfs_trans_log_buf(state->args->trans, blk->bp,
XFS_DA_LOGRANGE(node, &btree[blk->index],
sizeof(*btree)));
lasthash = be32_to_cpu(btree[nodehdr.count - 1].hashval);
}
}
/*
* Remove an entry from an intermediate node.
*/
STATIC void
xfs_da3_node_remove(
struct xfs_da_state *state,
struct xfs_da_state_blk *drop_blk)
{
struct xfs_da_intnode *node;
struct xfs_da3_icnode_hdr nodehdr;
struct xfs_da_node_entry *btree;
int index;
int tmp;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_remove(state->args);
node = drop_blk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
ASSERT(drop_blk->index < nodehdr.count);
ASSERT(drop_blk->index >= 0);
/*
* Copy over the offending entry, or just zero it out.
*/
index = drop_blk->index;
btree = dp->d_ops->node_tree_p(node);
if (index < nodehdr.count - 1) {
tmp = nodehdr.count - index - 1;
tmp *= (uint)sizeof(xfs_da_node_entry_t);
memmove(&btree[index], &btree[index + 1], tmp);
xfs_trans_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, &btree[index], tmp));
index = nodehdr.count - 1;
}
memset(&btree[index], 0, sizeof(xfs_da_node_entry_t));
xfs_trans_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, &btree[index], sizeof(btree[index])));
nodehdr.count -= 1;
dp->d_ops->node_hdr_to_disk(node, &nodehdr);
xfs_trans_log_buf(state->args->trans, drop_blk->bp,
XFS_DA_LOGRANGE(node, &node->hdr, dp->d_ops->node_hdr_size));
/*
* Copy the last hash value from the block to propagate upwards.
*/
drop_blk->hashval = be32_to_cpu(btree[index - 1].hashval);
}
/*
* Unbalance the elements between two intermediate nodes,
* move all Btree elements from one node into another.
*/
STATIC void
xfs_da3_node_unbalance(
struct xfs_da_state *state,
struct xfs_da_state_blk *drop_blk,
struct xfs_da_state_blk *save_blk)
{
struct xfs_da_intnode *drop_node;
struct xfs_da_intnode *save_node;
struct xfs_da_node_entry *drop_btree;
struct xfs_da_node_entry *save_btree;
struct xfs_da3_icnode_hdr drop_hdr;
struct xfs_da3_icnode_hdr save_hdr;
struct xfs_trans *tp;
int sindex;
int tmp;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_node_unbalance(state->args);
drop_node = drop_blk->bp->b_addr;
save_node = save_blk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&drop_hdr, drop_node);
dp->d_ops->node_hdr_from_disk(&save_hdr, save_node);
drop_btree = dp->d_ops->node_tree_p(drop_node);
save_btree = dp->d_ops->node_tree_p(save_node);
tp = state->args->trans;
/*
* If the dying block has lower hashvals, then move all the
* elements in the remaining block up to make a hole.
*/
if ((be32_to_cpu(drop_btree[0].hashval) <
be32_to_cpu(save_btree[0].hashval)) ||
(be32_to_cpu(drop_btree[drop_hdr.count - 1].hashval) <
be32_to_cpu(save_btree[save_hdr.count - 1].hashval))) {
/* XXX: check this - is memmove dst correct? */
tmp = save_hdr.count * sizeof(xfs_da_node_entry_t);
memmove(&save_btree[drop_hdr.count], &save_btree[0], tmp);
sindex = 0;
xfs_trans_log_buf(tp, save_blk->bp,
XFS_DA_LOGRANGE(save_node, &save_btree[0],
(save_hdr.count + drop_hdr.count) *
sizeof(xfs_da_node_entry_t)));
} else {
sindex = save_hdr.count;
xfs_trans_log_buf(tp, save_blk->bp,
XFS_DA_LOGRANGE(save_node, &save_btree[sindex],
drop_hdr.count * sizeof(xfs_da_node_entry_t)));
}
/*
* Move all the B-tree elements from drop_blk to save_blk.
*/
tmp = drop_hdr.count * (uint)sizeof(xfs_da_node_entry_t);
memcpy(&save_btree[sindex], &drop_btree[0], tmp);
save_hdr.count += drop_hdr.count;
dp->d_ops->node_hdr_to_disk(save_node, &save_hdr);
xfs_trans_log_buf(tp, save_blk->bp,
XFS_DA_LOGRANGE(save_node, &save_node->hdr,
dp->d_ops->node_hdr_size));
/*
* Save the last hashval in the remaining block for upward propagation.
*/
save_blk->hashval = be32_to_cpu(save_btree[save_hdr.count - 1].hashval);
}
/*========================================================================
* Routines used for finding things in the Btree.
*========================================================================*/
/*
* Walk down the Btree looking for a particular filename, filling
* in the state structure as we go.
*
* We will set the state structure to point to each of the elements
* in each of the nodes where either the hashval is or should be.
*
* We support duplicate hashval's so for each entry in the current
* node that could contain the desired hashval, descend. This is a
* pruned depth-first tree search.
*/
int /* error */
xfs_da3_node_lookup_int(
struct xfs_da_state *state,
int *result)
{
struct xfs_da_state_blk *blk;
struct xfs_da_blkinfo *curr;
struct xfs_da_intnode *node;
struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
struct xfs_da_args *args;
xfs_dablk_t blkno;
xfs_dahash_t hashval;
xfs_dahash_t btreehashval;
int probe;
int span;
int max;
int error;
int retval;
struct xfs_inode *dp = state->args->dp;
args = state->args;
/*
* Descend thru the B-tree searching each level for the right
* node to use, until the right hashval is found.
*/
blkno = (args->whichfork == XFS_DATA_FORK)? state->mp->m_dirleafblk : 0;
for (blk = &state->path.blk[0], state->path.active = 1;
state->path.active <= XFS_DA_NODE_MAXDEPTH;
blk++, state->path.active++) {
/*
* Read the next node down in the tree.
*/
blk->blkno = blkno;
error = xfs_da3_node_read(args->trans, args->dp, blkno,
-1, &blk->bp, args->whichfork);
if (error) {
blk->blkno = 0;
state->path.active--;
return(error);
}
curr = blk->bp->b_addr;
blk->magic = be16_to_cpu(curr->magic);
if (blk->magic == XFS_ATTR_LEAF_MAGIC ||
blk->magic == XFS_ATTR3_LEAF_MAGIC) {
blk->magic = XFS_ATTR_LEAF_MAGIC;
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
break;
}
if (blk->magic == XFS_DIR2_LEAFN_MAGIC ||
blk->magic == XFS_DIR3_LEAFN_MAGIC) {
blk->magic = XFS_DIR2_LEAFN_MAGIC;
blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
blk->bp, NULL);
break;
}
blk->magic = XFS_DA_NODE_MAGIC;
/*
* Search an intermediate node for a match.
*/
node = blk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
btree = dp->d_ops->node_tree_p(node);
max = nodehdr.count;
blk->hashval = be32_to_cpu(btree[max - 1].hashval);
/*
* Binary search. (note: small blocks will skip loop)
*/
probe = span = max / 2;
hashval = args->hashval;
while (span > 4) {
span /= 2;
btreehashval = be32_to_cpu(btree[probe].hashval);
if (btreehashval < hashval)
probe += span;
else if (btreehashval > hashval)
probe -= span;
else
break;
}
ASSERT((probe >= 0) && (probe < max));
ASSERT((span <= 4) ||
(be32_to_cpu(btree[probe].hashval) == hashval));
/*
* Since we may have duplicate hashval's, find the first
* matching hashval in the node.
*/
while (probe > 0 &&
be32_to_cpu(btree[probe].hashval) >= hashval) {
probe--;
}
while (probe < max &&
be32_to_cpu(btree[probe].hashval) < hashval) {
probe++;
}
/*
* Pick the right block to descend on.
*/
if (probe == max) {
blk->index = max - 1;
blkno = be32_to_cpu(btree[max - 1].before);
} else {
blk->index = probe;
blkno = be32_to_cpu(btree[probe].before);
}
}
/*
* A leaf block that ends in the hashval that we are interested in
* (final hashval == search hashval) means that the next block may
* contain more entries with the same hashval, shift upward to the
* next leaf and keep searching.
*/
for (;;) {
if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
&blk->index, state);
} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
retval = xfs_attr3_leaf_lookup_int(blk->bp, args);
blk->index = args->index;
args->blkno = blk->blkno;
} else {
ASSERT(0);
return XFS_ERROR(EFSCORRUPTED);
}
if (((retval == ENOENT) || (retval == ENOATTR)) &&
(blk->hashval == args->hashval)) {
error = xfs_da3_path_shift(state, &state->path, 1, 1,
&retval);
if (error)
return(error);
if (retval == 0) {
continue;
} else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
/* path_shift() gives ENOENT */
retval = XFS_ERROR(ENOATTR);
}
}
break;
}
*result = retval;
return(0);
}
/*========================================================================
* Utility routines.
*========================================================================*/
/*
* Compare two intermediate nodes for "order".
*/
STATIC int
xfs_da3_node_order(
struct xfs_inode *dp,
struct xfs_buf *node1_bp,
struct xfs_buf *node2_bp)
{
struct xfs_da_intnode *node1;
struct xfs_da_intnode *node2;
struct xfs_da_node_entry *btree1;
struct xfs_da_node_entry *btree2;
struct xfs_da3_icnode_hdr node1hdr;
struct xfs_da3_icnode_hdr node2hdr;
node1 = node1_bp->b_addr;
node2 = node2_bp->b_addr;
dp->d_ops->node_hdr_from_disk(&node1hdr, node1);
dp->d_ops->node_hdr_from_disk(&node2hdr, node2);
btree1 = dp->d_ops->node_tree_p(node1);
btree2 = dp->d_ops->node_tree_p(node2);
if (node1hdr.count > 0 && node2hdr.count > 0 &&
((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
(be32_to_cpu(btree2[node2hdr.count - 1].hashval) <
be32_to_cpu(btree1[node1hdr.count - 1].hashval)))) {
return 1;
}
return 0;
}
/*
* Link a new block into a doubly linked list of blocks (of whatever type).
*/
int /* error */
xfs_da3_blk_link(
struct xfs_da_state *state,
struct xfs_da_state_blk *old_blk,
struct xfs_da_state_blk *new_blk)
{
struct xfs_da_blkinfo *old_info;
struct xfs_da_blkinfo *new_info;
struct xfs_da_blkinfo *tmp_info;
struct xfs_da_args *args;
struct xfs_buf *bp;
int before = 0;
int error;
struct xfs_inode *dp = state->args->dp;
/*
* Set up environment.
*/
args = state->args;
ASSERT(args != NULL);
old_info = old_blk->bp->b_addr;
new_info = new_blk->bp->b_addr;
ASSERT(old_blk->magic == XFS_DA_NODE_MAGIC ||
old_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
old_blk->magic == XFS_ATTR_LEAF_MAGIC);
switch (old_blk->magic) {
case XFS_ATTR_LEAF_MAGIC:
before = xfs_attr_leaf_order(old_blk->bp, new_blk->bp);
break;
case XFS_DIR2_LEAFN_MAGIC:
before = xfs_dir2_leafn_order(dp, old_blk->bp, new_blk->bp);
break;
case XFS_DA_NODE_MAGIC:
before = xfs_da3_node_order(dp, old_blk->bp, new_blk->bp);
break;
}
/*
* Link blocks in appropriate order.
*/
if (before) {
/*
* Link new block in before existing block.
*/
trace_xfs_da_link_before(args);
new_info->forw = cpu_to_be32(old_blk->blkno);
new_info->back = old_info->back;
if (old_info->back) {
error = xfs_da3_node_read(args->trans, dp,
be32_to_cpu(old_info->back),
-1, &bp, args->whichfork);
if (error)
return(error);
ASSERT(bp != NULL);
tmp_info = bp->b_addr;
ASSERT(tmp_info->magic == old_info->magic);
ASSERT(be32_to_cpu(tmp_info->forw) == old_blk->blkno);
tmp_info->forw = cpu_to_be32(new_blk->blkno);
xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
}
old_info->back = cpu_to_be32(new_blk->blkno);
} else {
/*
* Link new block in after existing block.
*/
trace_xfs_da_link_after(args);
new_info->forw = old_info->forw;
new_info->back = cpu_to_be32(old_blk->blkno);
if (old_info->forw) {
error = xfs_da3_node_read(args->trans, dp,
be32_to_cpu(old_info->forw),
-1, &bp, args->whichfork);
if (error)
return(error);
ASSERT(bp != NULL);
tmp_info = bp->b_addr;
ASSERT(tmp_info->magic == old_info->magic);
ASSERT(be32_to_cpu(tmp_info->back) == old_blk->blkno);
tmp_info->back = cpu_to_be32(new_blk->blkno);
xfs_trans_log_buf(args->trans, bp, 0, sizeof(*tmp_info)-1);
}
old_info->forw = cpu_to_be32(new_blk->blkno);
}
xfs_trans_log_buf(args->trans, old_blk->bp, 0, sizeof(*tmp_info) - 1);
xfs_trans_log_buf(args->trans, new_blk->bp, 0, sizeof(*tmp_info) - 1);
return(0);
}
/*
* Unlink a block from a doubly linked list of blocks.
*/
STATIC int /* error */
xfs_da3_blk_unlink(
struct xfs_da_state *state,
struct xfs_da_state_blk *drop_blk,
struct xfs_da_state_blk *save_blk)
{
struct xfs_da_blkinfo *drop_info;
struct xfs_da_blkinfo *save_info;
struct xfs_da_blkinfo *tmp_info;
struct xfs_da_args *args;
struct xfs_buf *bp;
int error;
/*
* Set up environment.
*/
args = state->args;
ASSERT(args != NULL);
save_info = save_blk->bp->b_addr;
drop_info = drop_blk->bp->b_addr;
ASSERT(save_blk->magic == XFS_DA_NODE_MAGIC ||
save_blk->magic == XFS_DIR2_LEAFN_MAGIC ||
save_blk->magic == XFS_ATTR_LEAF_MAGIC);
ASSERT(save_blk->magic == drop_blk->magic);
ASSERT((be32_to_cpu(save_info->forw) == drop_blk->blkno) ||
(be32_to_cpu(save_info->back) == drop_blk->blkno));
ASSERT((be32_to_cpu(drop_info->forw) == save_blk->blkno) ||
(be32_to_cpu(drop_info->back) == save_blk->blkno));
/*
* Unlink the leaf block from the doubly linked chain of leaves.
*/
if (be32_to_cpu(save_info->back) == drop_blk->blkno) {
trace_xfs_da_unlink_back(args);
save_info->back = drop_info->back;
if (drop_info->back) {
error = xfs_da3_node_read(args->trans, args->dp,
be32_to_cpu(drop_info->back),
-1, &bp, args->whichfork);
if (error)
return(error);
ASSERT(bp != NULL);
tmp_info = bp->b_addr;
ASSERT(tmp_info->magic == save_info->magic);
ASSERT(be32_to_cpu(tmp_info->forw) == drop_blk->blkno);
tmp_info->forw = cpu_to_be32(save_blk->blkno);
xfs_trans_log_buf(args->trans, bp, 0,
sizeof(*tmp_info) - 1);
}
} else {
trace_xfs_da_unlink_forward(args);
save_info->forw = drop_info->forw;
if (drop_info->forw) {
error = xfs_da3_node_read(args->trans, args->dp,
be32_to_cpu(drop_info->forw),
-1, &bp, args->whichfork);
if (error)
return(error);
ASSERT(bp != NULL);
tmp_info = bp->b_addr;
ASSERT(tmp_info->magic == save_info->magic);
ASSERT(be32_to_cpu(tmp_info->back) == drop_blk->blkno);
tmp_info->back = cpu_to_be32(save_blk->blkno);
xfs_trans_log_buf(args->trans, bp, 0,
sizeof(*tmp_info) - 1);
}
}
xfs_trans_log_buf(args->trans, save_blk->bp, 0, sizeof(*save_info) - 1);
return(0);
}
/*
* Move a path "forward" or "!forward" one block at the current level.
*
* This routine will adjust a "path" to point to the next block
* "forward" (higher hashvalues) or "!forward" (lower hashvals) in the
* Btree, including updating pointers to the intermediate nodes between
* the new bottom and the root.
*/
int /* error */
xfs_da3_path_shift(
struct xfs_da_state *state,
struct xfs_da_state_path *path,
int forward,
int release,
int *result)
{
struct xfs_da_state_blk *blk;
struct xfs_da_blkinfo *info;
struct xfs_da_intnode *node;
struct xfs_da_args *args;
struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr nodehdr;
xfs_dablk_t blkno = 0;
int level;
int error;
struct xfs_inode *dp = state->args->dp;
trace_xfs_da_path_shift(state->args);
/*
* Roll up the Btree looking for the first block where our
* current index is not at the edge of the block. Note that
* we skip the bottom layer because we want the sibling block.
*/
args = state->args;
ASSERT(args != NULL);
ASSERT(path != NULL);
ASSERT((path->active > 0) && (path->active < XFS_DA_NODE_MAXDEPTH));
level = (path->active-1) - 1; /* skip bottom layer in path */
for (blk = &path->blk[level]; level >= 0; blk--, level--) {
node = blk->bp->b_addr;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
btree = dp->d_ops->node_tree_p(node);
if (forward && (blk->index < nodehdr.count - 1)) {
blk->index++;
blkno = be32_to_cpu(btree[blk->index].before);
break;
} else if (!forward && (blk->index > 0)) {
blk->index--;
blkno = be32_to_cpu(btree[blk->index].before);
break;
}
}
if (level < 0) {
*result = XFS_ERROR(ENOENT); /* we're out of our tree */
ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
return(0);
}
/*
* Roll down the edge of the subtree until we reach the
* same depth we were at originally.
*/
for (blk++, level++; level < path->active; blk++, level++) {
/*
* Release the old block.
* (if it's dirty, trans won't actually let go)
*/
if (release)
xfs_trans_brelse(args->trans, blk->bp);
/*
* Read the next child block.
*/
blk->blkno = blkno;
error = xfs_da3_node_read(args->trans, dp, blkno, -1,
&blk->bp, args->whichfork);
if (error)
return(error);
info = blk->bp->b_addr;
ASSERT(info->magic == cpu_to_be16(XFS_DA_NODE_MAGIC) ||
info->magic == cpu_to_be16(XFS_DA3_NODE_MAGIC) ||
info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC) ||
info->magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
info->magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
/*
* Note: we flatten the magic number to a single type so we
* don't have to compare against crc/non-crc types elsewhere.
*/
switch (be16_to_cpu(info->magic)) {
case XFS_DA_NODE_MAGIC:
case XFS_DA3_NODE_MAGIC:
blk->magic = XFS_DA_NODE_MAGIC;
node = (xfs_da_intnode_t *)info;
dp->d_ops->node_hdr_from_disk(&nodehdr, node);
btree = dp->d_ops->node_tree_p(node);
blk->hashval = be32_to_cpu(btree[nodehdr.count - 1].hashval);
if (forward)
blk->index = 0;
else
blk->index = nodehdr.count - 1;
blkno = be32_to_cpu(btree[blk->index].before);
break;
case XFS_ATTR_LEAF_MAGIC:
case XFS_ATTR3_LEAF_MAGIC:
blk->magic = XFS_ATTR_LEAF_MAGIC;
ASSERT(level == path->active-1);
blk->index = 0;
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
break;
case XFS_DIR2_LEAFN_MAGIC:
case XFS_DIR3_LEAFN_MAGIC:
blk->magic = XFS_DIR2_LEAFN_MAGIC;
ASSERT(level == path->active-1);
blk->index = 0;
blk->hashval = xfs_dir2_leafn_lasthash(args->dp,
blk->bp, NULL);
break;
default:
ASSERT(0);
break;
}
}
*result = 0;
return 0;
}
/*========================================================================
* Utility routines.
*========================================================================*/
/*
* Implement a simple hash on a character string.
* Rotate the hash value by 7 bits, then XOR each character in.
* This is implemented with some source-level loop unrolling.
*/
xfs_dahash_t
xfs_da_hashname(const __uint8_t *name, int namelen)
{
xfs_dahash_t hash;
/*
* Do four characters at a time as long as we can.
*/
for (hash = 0; namelen >= 4; namelen -= 4, name += 4)
hash = (name[0] << 21) ^ (name[1] << 14) ^ (name[2] << 7) ^
(name[3] << 0) ^ rol32(hash, 7 * 4);
/*
* Now do the rest of the characters.
*/
switch (namelen) {
case 3:
return (name[0] << 14) ^ (name[1] << 7) ^ (name[2] << 0) ^
rol32(hash, 7 * 3);
case 2:
return (name[0] << 7) ^ (name[1] << 0) ^ rol32(hash, 7 * 2);
case 1:
return (name[0] << 0) ^ rol32(hash, 7 * 1);
default: /* case 0: */
return hash;
}
}
enum xfs_dacmp
xfs_da_compname(
struct xfs_da_args *args,
const unsigned char *name,
int len)
{
return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
}
static xfs_dahash_t
xfs_default_hashname(
struct xfs_name *name)
{
return xfs_da_hashname(name->name, name->len);
}
const struct xfs_nameops xfs_default_nameops = {
.hashname = xfs_default_hashname,
.compname = xfs_da_compname
};
int
xfs_da_grow_inode_int(
struct xfs_da_args *args,
xfs_fileoff_t *bno,
int count)
{
struct xfs_trans *tp = args->trans;
struct xfs_inode *dp = args->dp;
int w = args->whichfork;
xfs_drfsbno_t nblks = dp->i_d.di_nblocks;
struct xfs_bmbt_irec map, *mapp;
int nmap, error, got, i, mapi;
/*
* Find a spot in the file space to put the new block.
*/
error = xfs_bmap_first_unused(tp, dp, count, bno, w);
if (error)
return error;
/*
* Try mapping it in one filesystem block.
*/
nmap = 1;
ASSERT(args->firstblock != NULL);
error = xfs_bmapi_write(tp, dp, *bno, count,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA|XFS_BMAPI_CONTIG,
args->firstblock, args->total, &map, &nmap,
args->flist);
if (error)
return error;
ASSERT(nmap <= 1);
if (nmap == 1) {
mapp = ↦
mapi = 1;
} else if (nmap == 0 && count > 1) {
xfs_fileoff_t b;
int c;
/*
* If we didn't get it and the block might work if fragmented,
* try without the CONTIG flag. Loop until we get it all.
*/
mapp = kmem_alloc(sizeof(*mapp) * count, KM_SLEEP);
for (b = *bno, mapi = 0; b < *bno + count; ) {
nmap = MIN(XFS_BMAP_MAX_NMAP, count);
c = (int)(*bno + count - b);
error = xfs_bmapi_write(tp, dp, b, c,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
args->firstblock, args->total,
&mapp[mapi], &nmap, args->flist);
if (error)
goto out_free_map;
if (nmap < 1)
break;
mapi += nmap;
b = mapp[mapi - 1].br_startoff +
mapp[mapi - 1].br_blockcount;
}
} else {
mapi = 0;
mapp = NULL;
}
/*
* Count the blocks we got, make sure it matches the total.
*/
for (i = 0, got = 0; i < mapi; i++)
got += mapp[i].br_blockcount;
if (got != count || mapp[0].br_startoff != *bno ||
mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
*bno + count) {
error = XFS_ERROR(ENOSPC);
goto out_free_map;
}
/* account for newly allocated blocks in reserved blocks total */
args->total -= dp->i_d.di_nblocks - nblks;
out_free_map:
if (mapp != &map)
kmem_free(mapp);
return error;
}
/*
* Add a block to the btree ahead of the file.
* Return the new block number to the caller.
*/
int
xfs_da_grow_inode(
struct xfs_da_args *args,
xfs_dablk_t *new_blkno)
{
xfs_fileoff_t bno;
int count;
int error;
trace_xfs_da_grow_inode(args);
if (args->whichfork == XFS_DATA_FORK) {
bno = args->dp->i_mount->m_dirleafblk;
count = args->dp->i_mount->m_dirblkfsbs;
} else {
bno = 0;
count = 1;
}
error = xfs_da_grow_inode_int(args, &bno, count);
if (!error)
*new_blkno = (xfs_dablk_t)bno;
return error;
}
/*
* Ick. We need to always be able to remove a btree block, even
* if there's no space reservation because the filesystem is full.
* This is called if xfs_bunmapi on a btree block fails due to ENOSPC.
* It swaps the target block with the last block in the file. The
* last block in the file can always be removed since it can't cause
* a bmap btree split to do that.
*/
STATIC int
xfs_da3_swap_lastblock(
struct xfs_da_args *args,
xfs_dablk_t *dead_blknop,
struct xfs_buf **dead_bufp)
{
struct xfs_da_blkinfo *dead_info;
struct xfs_da_blkinfo *sib_info;
struct xfs_da_intnode *par_node;
struct xfs_da_intnode *dead_node;
struct xfs_dir2_leaf *dead_leaf2;
struct xfs_da_node_entry *btree;
struct xfs_da3_icnode_hdr par_hdr;
struct xfs_inode *dp;
struct xfs_trans *tp;
struct xfs_mount *mp;
struct xfs_buf *dead_buf;
struct xfs_buf *last_buf;
struct xfs_buf *sib_buf;
struct xfs_buf *par_buf;
xfs_dahash_t dead_hash;
xfs_fileoff_t lastoff;
xfs_dablk_t dead_blkno;
xfs_dablk_t last_blkno;
xfs_dablk_t sib_blkno;
xfs_dablk_t par_blkno;
int error;
int w;
int entno;
int level;
int dead_level;
trace_xfs_da_swap_lastblock(args);
dead_buf = *dead_bufp;
dead_blkno = *dead_blknop;
tp = args->trans;
dp = args->dp;
w = args->whichfork;
ASSERT(w == XFS_DATA_FORK);
mp = dp->i_mount;
lastoff = mp->m_dirfreeblk;
error = xfs_bmap_last_before(tp, dp, &lastoff, w);
if (error)
return error;
if (unlikely(lastoff == 0)) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(1)", XFS_ERRLEVEL_LOW,
mp);
return XFS_ERROR(EFSCORRUPTED);
}
/*
* Read the last block in the btree space.
*/
last_blkno = (xfs_dablk_t)lastoff - mp->m_dirblkfsbs;
error = xfs_da3_node_read(tp, dp, last_blkno, -1, &last_buf, w);
if (error)
return error;
/*
* Copy the last block into the dead buffer and log it.
*/
memcpy(dead_buf->b_addr, last_buf->b_addr, mp->m_dirblksize);
xfs_trans_log_buf(tp, dead_buf, 0, mp->m_dirblksize - 1);
dead_info = dead_buf->b_addr;
/*
* Get values from the moved block.
*/
if (dead_info->magic == cpu_to_be16(XFS_DIR2_LEAFN_MAGIC) ||
dead_info->magic == cpu_to_be16(XFS_DIR3_LEAFN_MAGIC)) {
struct xfs_dir3_icleaf_hdr leafhdr;
struct xfs_dir2_leaf_entry *ents;
dead_leaf2 = (xfs_dir2_leaf_t *)dead_info;
dp->d_ops->leaf_hdr_from_disk(&leafhdr, dead_leaf2);
ents = dp->d_ops->leaf_ents_p(dead_leaf2);
dead_level = 0;
dead_hash = be32_to_cpu(ents[leafhdr.count - 1].hashval);
} else {
struct xfs_da3_icnode_hdr deadhdr;
dead_node = (xfs_da_intnode_t *)dead_info;
dp->d_ops->node_hdr_from_disk(&deadhdr, dead_node);
btree = dp->d_ops->node_tree_p(dead_node);
dead_level = deadhdr.level;
dead_hash = be32_to_cpu(btree[deadhdr.count - 1].hashval);
}
sib_buf = par_buf = NULL;
/*
* If the moved block has a left sibling, fix up the pointers.
*/
if ((sib_blkno = be32_to_cpu(dead_info->back))) {
error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
if (error)
goto done;
sib_info = sib_buf->b_addr;
if (unlikely(
be32_to_cpu(sib_info->forw) != last_blkno ||
sib_info->magic != dead_info->magic)) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(2)",
XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
sib_info->forw = cpu_to_be32(dead_blkno);
xfs_trans_log_buf(tp, sib_buf,
XFS_DA_LOGRANGE(sib_info, &sib_info->forw,
sizeof(sib_info->forw)));
sib_buf = NULL;
}
/*
* If the moved block has a right sibling, fix up the pointers.
*/
if ((sib_blkno = be32_to_cpu(dead_info->forw))) {
error = xfs_da3_node_read(tp, dp, sib_blkno, -1, &sib_buf, w);
if (error)
goto done;
sib_info = sib_buf->b_addr;
if (unlikely(
be32_to_cpu(sib_info->back) != last_blkno ||
sib_info->magic != dead_info->magic)) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(3)",
XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
sib_info->back = cpu_to_be32(dead_blkno);
xfs_trans_log_buf(tp, sib_buf,
XFS_DA_LOGRANGE(sib_info, &sib_info->back,
sizeof(sib_info->back)));
sib_buf = NULL;
}
par_blkno = mp->m_dirleafblk;
level = -1;
/*
* Walk down the tree looking for the parent of the moved block.
*/
for (;;) {
error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
if (error)
goto done;
par_node = par_buf->b_addr;
dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
if (level >= 0 && level != par_hdr.level + 1) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(4)",
XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
level = par_hdr.level;
btree = dp->d_ops->node_tree_p(par_node);
for (entno = 0;
entno < par_hdr.count &&
be32_to_cpu(btree[entno].hashval) < dead_hash;
entno++)
continue;
if (entno == par_hdr.count) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(5)",
XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
par_blkno = be32_to_cpu(btree[entno].before);
if (level == dead_level + 1)
break;
xfs_trans_brelse(tp, par_buf);
par_buf = NULL;
}
/*
* We're in the right parent block.
* Look for the right entry.
*/
for (;;) {
for (;
entno < par_hdr.count &&
be32_to_cpu(btree[entno].before) != last_blkno;
entno++)
continue;
if (entno < par_hdr.count)
break;
par_blkno = par_hdr.forw;
xfs_trans_brelse(tp, par_buf);
par_buf = NULL;
if (unlikely(par_blkno == 0)) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(6)",
XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
error = xfs_da3_node_read(tp, dp, par_blkno, -1, &par_buf, w);
if (error)
goto done;
par_node = par_buf->b_addr;
dp->d_ops->node_hdr_from_disk(&par_hdr, par_node);
if (par_hdr.level != level) {
XFS_ERROR_REPORT("xfs_da_swap_lastblock(7)",
XFS_ERRLEVEL_LOW, mp);
error = XFS_ERROR(EFSCORRUPTED);
goto done;
}
btree = dp->d_ops->node_tree_p(par_node);
entno = 0;
}
/*
* Update the parent entry pointing to the moved block.
*/
btree[entno].before = cpu_to_be32(dead_blkno);
xfs_trans_log_buf(tp, par_buf,
XFS_DA_LOGRANGE(par_node, &btree[entno].before,
sizeof(btree[entno].before)));
*dead_blknop = last_blkno;
*dead_bufp = last_buf;
return 0;
done:
if (par_buf)
xfs_trans_brelse(tp, par_buf);
if (sib_buf)
xfs_trans_brelse(tp, sib_buf);
xfs_trans_brelse(tp, last_buf);
return error;
}
/*
* Remove a btree block from a directory or attribute.
*/
int
xfs_da_shrink_inode(
xfs_da_args_t *args,
xfs_dablk_t dead_blkno,
struct xfs_buf *dead_buf)
{
xfs_inode_t *dp;
int done, error, w, count;
xfs_trans_t *tp;
xfs_mount_t *mp;
trace_xfs_da_shrink_inode(args);
dp = args->dp;
w = args->whichfork;
tp = args->trans;
mp = dp->i_mount;
if (w == XFS_DATA_FORK)
count = mp->m_dirblkfsbs;
else
count = 1;
for (;;) {
/*
* Remove extents. If we get ENOSPC for a dir we have to move
* the last block to the place we want to kill.
*/
error = xfs_bunmapi(tp, dp, dead_blkno, count,
xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
0, args->firstblock, args->flist, &done);
if (error == ENOSPC) {
if (w != XFS_DATA_FORK)
break;
error = xfs_da3_swap_lastblock(args, &dead_blkno,
&dead_buf);
if (error)
break;
} else {
break;
}
}
xfs_trans_binval(tp, dead_buf);
return error;
}
/*
* See if the mapping(s) for this btree block are valid, i.e.
* don't contain holes, are logically contiguous, and cover the whole range.
*/
STATIC int
xfs_da_map_covers_blocks(
int nmap,
xfs_bmbt_irec_t *mapp,
xfs_dablk_t bno,
int count)
{
int i;
xfs_fileoff_t off;
for (i = 0, off = bno; i < nmap; i++) {
if (mapp[i].br_startblock == HOLESTARTBLOCK ||
mapp[i].br_startblock == DELAYSTARTBLOCK) {
return 0;
}
if (off != mapp[i].br_startoff) {
return 0;
}
off += mapp[i].br_blockcount;
}
return off == bno + count;
}
/*
* Convert a struct xfs_bmbt_irec to a struct xfs_buf_map.
*
* For the single map case, it is assumed that the caller has provided a pointer
* to a valid xfs_buf_map. For the multiple map case, this function will
* allocate the xfs_buf_map to hold all the maps and replace the caller's single
* map pointer with the allocated map.
*/
static int
xfs_buf_map_from_irec(
struct xfs_mount *mp,
struct xfs_buf_map **mapp,
int *nmaps,
struct xfs_bmbt_irec *irecs,
int nirecs)
{
struct xfs_buf_map *map;
int i;
ASSERT(*nmaps == 1);
ASSERT(nirecs >= 1);
if (nirecs > 1) {
map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
KM_SLEEP | KM_NOFS);
if (!map)
return ENOMEM;
*mapp = map;
}
*nmaps = nirecs;
map = *mapp;
for (i = 0; i < *nmaps; i++) {
ASSERT(irecs[i].br_startblock != DELAYSTARTBLOCK &&
irecs[i].br_startblock != HOLESTARTBLOCK);
map[i].bm_bn = XFS_FSB_TO_DADDR(mp, irecs[i].br_startblock);
map[i].bm_len = XFS_FSB_TO_BB(mp, irecs[i].br_blockcount);
}
return 0;
}
/*
* Map the block we are given ready for reading. There are three possible return
* values:
* -1 - will be returned if we land in a hole and mappedbno == -2 so the
* caller knows not to execute a subsequent read.
* 0 - if we mapped the block successfully
* >0 - positive error number if there was an error.
*/
static int
xfs_dabuf_map(
struct xfs_trans *trans,
struct xfs_inode *dp,
xfs_dablk_t bno,
xfs_daddr_t mappedbno,
int whichfork,
struct xfs_buf_map **map,
int *nmaps)
{
struct xfs_mount *mp = dp->i_mount;
int nfsb;
int error = 0;
struct xfs_bmbt_irec irec;
struct xfs_bmbt_irec *irecs = &irec;
int nirecs;
ASSERT(map && *map);
ASSERT(*nmaps == 1);
nfsb = (whichfork == XFS_DATA_FORK) ? mp->m_dirblkfsbs : 1;
/*
* Caller doesn't have a mapping. -2 means don't complain
* if we land in a hole.
*/
if (mappedbno == -1 || mappedbno == -2) {
/*
* Optimize the one-block case.
*/
if (nfsb != 1)
irecs = kmem_zalloc(sizeof(irec) * nfsb,
KM_SLEEP | KM_NOFS);
nirecs = nfsb;
error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
&nirecs, xfs_bmapi_aflag(whichfork));
if (error)
goto out;
} else {
irecs->br_startblock = XFS_DADDR_TO_FSB(mp, mappedbno);
irecs->br_startoff = (xfs_fileoff_t)bno;
irecs->br_blockcount = nfsb;
irecs->br_state = 0;
nirecs = 1;
}
if (!xfs_da_map_covers_blocks(nirecs, irecs, bno, nfsb)) {
error = mappedbno == -2 ? -1 : XFS_ERROR(EFSCORRUPTED);
if (unlikely(error == EFSCORRUPTED)) {
if (xfs_error_level >= XFS_ERRLEVEL_LOW) {
int i;
xfs_alert(mp, "%s: bno %lld dir: inode %lld",
__func__, (long long)bno,
(long long)dp->i_ino);
for (i = 0; i < *nmaps; i++) {
xfs_alert(mp,
"[%02d] br_startoff %lld br_startblock %lld br_blockcount %lld br_state %d",
i,
(long long)irecs[i].br_startoff,
(long long)irecs[i].br_startblock,
(long long)irecs[i].br_blockcount,
irecs[i].br_state);
}
}
XFS_ERROR_REPORT("xfs_da_do_buf(1)",
XFS_ERRLEVEL_LOW, mp);
}
goto out;
}
error = xfs_buf_map_from_irec(mp, map, nmaps, irecs, nirecs);
out:
if (irecs != &irec)
kmem_free(irecs);
return error;
}
/*
* Get a buffer for the dir/attr block.
*/
int
xfs_da_get_buf(
struct xfs_trans *trans,
struct xfs_inode *dp,
xfs_dablk_t bno,
xfs_daddr_t mappedbno,
struct xfs_buf **bpp,
int whichfork)
{
struct xfs_buf *bp;
struct xfs_buf_map map;
struct xfs_buf_map *mapp;
int nmap;
int error;
*bpp = NULL;
mapp = ↦
nmap = 1;
error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
&mapp, &nmap);
if (error) {
/* mapping a hole is not an error, but we don't continue */
if (error == -1)
error = 0;
goto out_free;
}
bp = xfs_trans_get_buf_map(trans, dp->i_mount->m_ddev_targp,
mapp, nmap, 0);
error = bp ? bp->b_error : XFS_ERROR(EIO);
if (error) {
xfs_trans_brelse(trans, bp);
goto out_free;
}
*bpp = bp;
out_free:
if (mapp != &map)
kmem_free(mapp);
return error;
}
/*
* Get a buffer for the dir/attr block, fill in the contents.
*/
int
xfs_da_read_buf(
struct xfs_trans *trans,
struct xfs_inode *dp,
xfs_dablk_t bno,
xfs_daddr_t mappedbno,
struct xfs_buf **bpp,
int whichfork,
const struct xfs_buf_ops *ops)
{
struct xfs_buf *bp;
struct xfs_buf_map map;
struct xfs_buf_map *mapp;
int nmap;
int error;
*bpp = NULL;
mapp = ↦
nmap = 1;
error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
&mapp, &nmap);
if (error) {
/* mapping a hole is not an error, but we don't continue */
if (error == -1)
error = 0;
goto out_free;
}
error = xfs_trans_read_buf_map(dp->i_mount, trans,
dp->i_mount->m_ddev_targp,
mapp, nmap, 0, &bp, ops);
if (error)
goto out_free;
if (whichfork == XFS_ATTR_FORK)
xfs_buf_set_ref(bp, XFS_ATTR_BTREE_REF);
else
xfs_buf_set_ref(bp, XFS_DIR_BTREE_REF);
/*
* This verification code will be moved to a CRC verification callback
* function so just leave it here unchanged until then.
*/
{
xfs_dir2_data_hdr_t *hdr = bp->b_addr;
xfs_dir2_free_t *free = bp->b_addr;
xfs_da_blkinfo_t *info = bp->b_addr;
uint magic, magic1;
struct xfs_mount *mp = dp->i_mount;
magic = be16_to_cpu(info->magic);
magic1 = be32_to_cpu(hdr->magic);
if (unlikely(
XFS_TEST_ERROR((magic != XFS_DA_NODE_MAGIC) &&
(magic != XFS_DA3_NODE_MAGIC) &&
(magic != XFS_ATTR_LEAF_MAGIC) &&
(magic != XFS_ATTR3_LEAF_MAGIC) &&
(magic != XFS_DIR2_LEAF1_MAGIC) &&
(magic != XFS_DIR3_LEAF1_MAGIC) &&
(magic != XFS_DIR2_LEAFN_MAGIC) &&
(magic != XFS_DIR3_LEAFN_MAGIC) &&
(magic1 != XFS_DIR2_BLOCK_MAGIC) &&
(magic1 != XFS_DIR3_BLOCK_MAGIC) &&
(magic1 != XFS_DIR2_DATA_MAGIC) &&
(magic1 != XFS_DIR3_DATA_MAGIC) &&
(free->hdr.magic !=
cpu_to_be32(XFS_DIR2_FREE_MAGIC)) &&
(free->hdr.magic !=
cpu_to_be32(XFS_DIR3_FREE_MAGIC)),
mp, XFS_ERRTAG_DA_READ_BUF,
XFS_RANDOM_DA_READ_BUF))) {
trace_xfs_da_btree_corrupt(bp, _RET_IP_);
XFS_CORRUPTION_ERROR("xfs_da_do_buf(2)",
XFS_ERRLEVEL_LOW, mp, info);
error = XFS_ERROR(EFSCORRUPTED);
xfs_trans_brelse(trans, bp);
goto out_free;
}
}
*bpp = bp;
out_free:
if (mapp != &map)
kmem_free(mapp);
return error;
}
/*
* Readahead the dir/attr block.
*/
xfs_daddr_t
xfs_da_reada_buf(
struct xfs_trans *trans,
struct xfs_inode *dp,
xfs_dablk_t bno,
xfs_daddr_t mappedbno,
int whichfork,
const struct xfs_buf_ops *ops)
{
struct xfs_buf_map map;
struct xfs_buf_map *mapp;
int nmap;
int error;
mapp = ↦
nmap = 1;
error = xfs_dabuf_map(trans, dp, bno, mappedbno, whichfork,
&mapp, &nmap);
if (error) {
/* mapping a hole is not an error, but we don't continue */
if (error == -1)
error = 0;
goto out_free;
}
mappedbno = mapp[0].bm_bn;
xfs_buf_readahead_map(dp->i_mount->m_ddev_targp, mapp, nmap, ops);
out_free:
if (mapp != &map)
kmem_free(mapp);
if (error)
return -1;
return mappedbno;
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_2285_0 |
crossvul-cpp_data_good_4709_0 | /*
* dhcp.c Functions to send/receive dhcp packets.
*
* Version: $Id$
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*
* Copyright 2008 The FreeRADIUS server project
* Copyright 2008 Alan DeKok <aland@deployingradius.com>
*/
#include <freeradius-devel/ident.h>
RCSID("$Id$")
#include <freeradius-devel/libradius.h>
#include <freeradius-devel/udpfromto.h>
#include <freeradius-devel/dhcp.h>
#ifdef WITH_DHCP
#define DHCP_CHADDR_LEN (16)
#define DHCP_SNAME_LEN (64)
#define DHCP_FILE_LEN (128)
#define DHCP_VEND_LEN (308)
#define DHCP_OPTION_MAGIC_NUMBER (0x63825363)
#ifndef INADDR_BROADCAST
#define INADDR_BROADCAST INADDR_NONE
#endif
typedef struct dhcp_packet_t {
uint8_t opcode;
uint8_t htype;
uint8_t hlen;
uint8_t hops;
uint32_t xid; /* 4 */
uint16_t secs; /* 8 */
uint16_t flags;
uint32_t ciaddr; /* 12 */
uint32_t yiaddr; /* 16 */
uint32_t siaddr; /* 20 */
uint32_t giaddr; /* 24 */
uint8_t chaddr[DHCP_CHADDR_LEN]; /* 28 */
uint8_t sname[DHCP_SNAME_LEN]; /* 44 */
uint8_t file[DHCP_FILE_LEN]; /* 108 */
uint32_t option_format; /* 236 */
uint8_t options[DHCP_VEND_LEN];
} dhcp_packet_t;
typedef struct dhcp_option_t {
uint8_t code;
uint8_t length;
} dhcp_option_t;
/*
* INADDR_ANY : 68 -> INADDR_BROADCAST : 67 DISCOVER
* INADDR_BROADCAST : 68 <- SERVER_IP : 67 OFFER
* INADDR_ANY : 68 -> INADDR_BROADCAST : 67 REQUEST
* INADDR_BROADCAST : 68 <- SERVER_IP : 67 ACK
*/
static const char *dhcp_header_names[] = {
"DHCP-Opcode",
"DHCP-Hardware-Type",
"DHCP-Hardware-Address-Length",
"DHCP-Hop-Count",
"DHCP-Transaction-Id",
"DHCP-Number-of-Seconds",
"DHCP-Flags",
"DHCP-Client-IP-Address",
"DHCP-Your-IP-Address",
"DHCP-Server-IP-Address",
"DHCP-Gateway-IP-Address",
"DHCP-Client-Hardware-Address",
"DHCP-Server-Host-Name",
"DHCP-Boot-Filename",
NULL
};
static const char *dhcp_message_types[] = {
"invalid",
"DHCP-Discover",
"DHCP-Offer",
"DHCP-Request",
"DHCP-Decline",
"DHCP-Ack",
"DHCP-NAK",
"DHCP-Release",
"DHCP-Inform",
"DHCP-Force-Renew",
};
static int dhcp_header_sizes[] = {
1, 1, 1, 1,
4, 2, 2, 4,
4, 4, 4,
DHCP_CHADDR_LEN,
DHCP_SNAME_LEN,
DHCP_FILE_LEN
};
/*
* Some clients silently ignore responses less than 300 bytes.
*/
#define MIN_PACKET_SIZE (244)
#define DEFAULT_PACKET_SIZE (300)
#define MAX_PACKET_SIZE (1500 - 40)
#define DHCP_OPTION_FIELD (0)
#define DHCP_FILE_FIELD (1)
#define DHCP_SNAME_FIELD (2)
static uint8_t *dhcp_get_option(dhcp_packet_t *packet, size_t packet_size,
unsigned int option)
{
int overload = 0;
int field = DHCP_OPTION_FIELD;
size_t where, size;
uint8_t *data = packet->options;
where = 0;
size = packet_size - offsetof(dhcp_packet_t, options);
data = &packet->options[where];
while (where < size) {
if (data[0] == 0) { /* padding */
where++;
continue;
}
if (data[0] == 255) { /* end of options */
if ((field == DHCP_OPTION_FIELD) &&
(overload & DHCP_FILE_FIELD)) {
data = packet->file;
where = 0;
size = sizeof(packet->file);
field = DHCP_FILE_FIELD;
continue;
} else if ((field == DHCP_FILE_FIELD) &&
(overload && DHCP_SNAME_FIELD)) {
data = packet->sname;
where = 0;
size = sizeof(packet->sname);
field = DHCP_SNAME_FIELD;
continue;
}
return NULL;
}
/*
* We MUST have a real option here.
*/
if ((where + 2) > size) {
fr_strerror_printf("Options overflow field at %u",
(unsigned int) (data - (uint8_t *) packet));
return NULL;
}
if ((where + 2 + data[1]) > size) {
fr_strerror_printf("Option length overflows field at %u",
(unsigned int) (data - (uint8_t *) packet));
return NULL;
}
if (data[0] == option) return data;
if (data[0] == 52) { /* overload sname and/or file */
overload = data[3];
}
where += data[1] + 2;
data += data[1] + 2;
}
return NULL;
}
/*
* DHCPv4 is only for IPv4. Broadcast only works if udpfromto is
* defined.
*/
RADIUS_PACKET *fr_dhcp_recv(int sockfd)
{
uint32_t magic;
struct sockaddr_storage src;
struct sockaddr_storage dst;
socklen_t sizeof_src;
socklen_t sizeof_dst;
RADIUS_PACKET *packet;
int port;
uint8_t *code;
packet = rad_alloc(0);
if (!packet) {
fr_strerror_printf("Failed allocating packet");
return NULL;
}
memset(packet, 0, sizeof(packet));
packet->data = malloc(MAX_PACKET_SIZE);
if (!packet->data) {
fr_strerror_printf("Failed in malloc");
rad_free(&packet);
return NULL;
}
packet->sockfd = sockfd;
sizeof_src = sizeof(src);
#ifdef WITH_UDPFROMTO
sizeof_dst = sizeof(dst);
packet->data_len = recvfromto(sockfd, packet->data, MAX_PACKET_SIZE, 0,
(struct sockaddr *)&src, &sizeof_src,
(struct sockaddr *)&dst, &sizeof_dst);
#else
packet->data_len = recvfrom(sockfd, packet->data, MAX_PACKET_SIZE, 0,
(struct sockaddr *)&src, &sizeof_src);
#endif
if (packet->data_len <= 0) {
fr_strerror_printf("Failed reading DHCP socket: %s", strerror(errno));
rad_free(&packet);
return NULL;
}
if (packet->data_len < MIN_PACKET_SIZE) {
fr_strerror_printf("DHCP packet is too small (%d < %d)",
packet->data_len, MIN_PACKET_SIZE);
rad_free(&packet);
return NULL;
}
if (packet->data[1] != 1) {
fr_strerror_printf("DHCP can only receive ethernet requests, not type %02x",
packet->data[1]);
rad_free(&packet);
return NULL;
}
if (packet->data[2] != 6) {
fr_strerror_printf("Ethernet HW length is wrong length %d",
packet->data[2]);
rad_free(&packet);
return NULL;
}
memcpy(&magic, packet->data + 236, 4);
magic = ntohl(magic);
if (magic != DHCP_OPTION_MAGIC_NUMBER) {
fr_strerror_printf("Cannot do BOOTP");
rad_free(&packet);
return NULL;
}
/*
* Create unique keys for the packet.
*/
memcpy(&magic, packet->data + 4, 4);
packet->id = ntohl(magic);
code = dhcp_get_option((dhcp_packet_t *) packet->data,
packet->data_len, 53);
if (!code) {
fr_strerror_printf("No message-type option was found in the packet");
rad_free(&packet);
return NULL;
}
if ((code[1] < 1) || (code[2] == 0) || (code[2] > 8)) {
fr_strerror_printf("Unknown value for message-type option");
rad_free(&packet);
return NULL;
}
packet->code = code[2] | PW_DHCP_OFFSET;
/*
* Create a unique vector from the MAC address and the
* DHCP opcode. This is a hack for the RADIUS
* infrastructure in the rest of the server.
*
* Note: packet->data[2] == 6, which is smaller than
* sizeof(packet->vector)
*
* FIXME: Look for client-identifier in packet,
* and use that, too?
*/
memset(packet->vector, 0, sizeof(packet->vector));
memcpy(packet->vector, packet->data + 28, packet->data[2]);
packet->vector[packet->data[2]] = packet->code & 0xff;
/*
* FIXME: for DISCOVER / REQUEST: src_port == dst_port + 1
* FIXME: for OFFER / ACK : src_port = dst_port - 1
*/
/*
* Unique keys are xid, client mac, and client ID?
*/
/*
* FIXME: More checks, like DHCP packet type?
*/
sizeof_dst = sizeof(dst);
#ifndef WITH_UDPFROMTO
/*
* This should never fail...
*/
getsockname(sockfd, (struct sockaddr *) &dst, &sizeof_dst);
#endif
fr_sockaddr2ipaddr(&dst, sizeof_dst, &packet->dst_ipaddr, &port);
packet->dst_port = port;
fr_sockaddr2ipaddr(&src, sizeof_src, &packet->src_ipaddr, &port);
packet->src_port = port;
if (fr_debug_flag > 1) {
char type_buf[64];
const char *name = type_buf;
char src_ip_buf[256], dst_ip_buf[256];
if ((packet->code >= PW_DHCP_DISCOVER) &&
(packet->code <= PW_DHCP_INFORM)) {
name = dhcp_message_types[packet->code - PW_DHCP_OFFSET];
} else {
snprintf(type_buf, sizeof(type_buf), "%d",
packet->code - PW_DHCP_OFFSET);
}
DEBUG("Received %s of id %08x from %s:%d to %s:%d",
name, (unsigned int) packet->id,
inet_ntop(packet->src_ipaddr.af,
&packet->src_ipaddr.ipaddr,
src_ip_buf, sizeof(src_ip_buf)),
packet->src_port,
inet_ntop(packet->dst_ipaddr.af,
&packet->dst_ipaddr.ipaddr,
dst_ip_buf, sizeof(dst_ip_buf)),
packet->dst_port);
}
return packet;
}
/*
* Send a DHCP packet.
*/
int fr_dhcp_send(RADIUS_PACKET *packet)
{
struct sockaddr_storage dst;
socklen_t sizeof_dst;
#ifdef WITH_UDPFROMTO
struct sockaddr_storage src;
socklen_t sizeof_src;
#endif
fr_ipaddr2sockaddr(&packet->dst_ipaddr, packet->dst_port,
&dst, &sizeof_dst);
#ifndef WITH_UDPFROMTO
/*
* Assume that the packet is encoded before sending it.
*/
return sendto(packet->sockfd, packet->data, packet->data_len, 0,
(struct sockaddr *)&dst, sizeof_dst);
#else
fr_ipaddr2sockaddr(&packet->src_ipaddr, packet->src_port,
&src, &sizeof_src);
return sendfromto(packet->sockfd,
packet->data, packet->data_len, 0,
(struct sockaddr *)&src, sizeof_src,
(struct sockaddr *)&dst, sizeof_dst);
#endif
}
static int fr_dhcp_attr2vp(VALUE_PAIR *vp, const uint8_t *p, size_t alen);
static int decode_tlv(VALUE_PAIR *tlv, const uint8_t *data, size_t data_len)
{
const uint8_t *p;
VALUE_PAIR *head, **tail, *vp;
/*
* Take a pass at parsing it.
*/
p = data;
while (p < (data + data_len)) {
if ((p + 2) > (data + data_len)) goto make_tlv;
if ((p + p[1] + 2) > (data + data_len)) goto make_tlv;
p += 2 + p[1];
}
/*
* Got here... must be well formed.
*/
head = NULL;
tail = &head;
p = data;
while (p < (data + data_len)) {
vp = paircreate(tlv->attribute | (p[0] << 8), PW_TYPE_OCTETS);
if (!vp) {
pairfree(&head);
goto make_tlv;
}
if (fr_dhcp_attr2vp(vp, p + 2, p[1]) < 0) {
pairfree(&head);
goto make_tlv;
}
*tail = vp;
tail = &(vp->next);
p += 2 + p[1];
}
/*
* The caller allocated TLV, so we need to copy the FIRST
* attribute over top of that.
*/
memcpy(tlv, head, sizeof(*tlv));
head->next = NULL;
pairfree(&head);
return 0;
make_tlv:
tlv->vp_tlv = malloc(data_len);
if (!tlv->vp_tlv) {
fr_strerror_printf("No memory");
return -1;
}
memcpy(tlv->vp_tlv, data, data_len);
tlv->length = data_len;
return 0;
}
/*
* Decode ONE value into a VP
*/
static int fr_dhcp_attr2vp(VALUE_PAIR *vp, const uint8_t *p, size_t alen)
{
switch (vp->type) {
case PW_TYPE_BYTE:
if (alen != 1) goto raw;
vp->vp_integer = p[0];
break;
case PW_TYPE_SHORT:
if (alen != 2) goto raw;
vp->vp_integer = (p[0] << 8) | p[1];
break;
case PW_TYPE_INTEGER:
if (alen != 4) goto raw;
memcpy(&vp->vp_integer, p, 4);
vp->vp_integer = ntohl(vp->vp_integer);
break;
case PW_TYPE_IPADDR:
if (alen != 4) goto raw;
memcpy(&vp->vp_ipaddr, p , 4);
vp->length = 4;
break;
case PW_TYPE_STRING:
if (alen > 253) return -1;
memcpy(vp->vp_strvalue, p , alen);
vp->vp_strvalue[alen] = '\0';
break;
raw:
vp->type = PW_TYPE_OCTETS;
case PW_TYPE_OCTETS:
if (alen > 253) return -1;
memcpy(vp->vp_octets, p, alen);
break;
case PW_TYPE_TLV:
return decode_tlv(vp, p, alen);
default:
fr_strerror_printf("Internal sanity check %d %d", vp->type, __LINE__);
break;
} /* switch over type */
vp->length = alen;
return 0;
}
int fr_dhcp_decode(RADIUS_PACKET *packet)
{
int i;
uint8_t *p, *next;
uint32_t giaddr;
VALUE_PAIR *head, *vp, **tail;
VALUE_PAIR *maxms, *mtu;
char buffer[2048];
head = NULL;
tail = &head;
p = packet->data;
if ((fr_debug_flag > 2) && fr_log_fp) {
for (i = 0; i < packet->data_len; i++) {
if ((i & 0x0f) == 0x00) fr_strerror_printf("%d: ", i);
fprintf(fr_log_fp, "%02x ", packet->data[i]);
if ((i & 0x0f) == 0x0f) fprintf(fr_log_fp, "\n");
}
fprintf(fr_log_fp, "\n");
}
if (packet->data[1] != 1) {
fr_strerror_printf("Packet is not Ethernet: %u",
packet->data[1]);
return -1;
}
/*
* Decode the header.
*/
for (i = 0; i < 14; i++) {
vp = pairmake(dhcp_header_names[i], NULL, T_OP_EQ);
if (!vp) {
fr_strerror_printf("Parse error %s", fr_strerror());
pairfree(&head);
return -1;
}
if ((i == 11) &&
(packet->data[1] == 1) &&
(packet->data[2] == 6)) {
vp->type = PW_TYPE_ETHERNET;
}
switch (vp->type) {
case PW_TYPE_BYTE:
vp->vp_integer = p[0];
vp->length = 1;
break;
case PW_TYPE_SHORT:
vp->vp_integer = (p[0] << 8) | p[1];
vp->length = 2;
break;
case PW_TYPE_INTEGER:
memcpy(&vp->vp_integer, p, 4);
vp->vp_integer = ntohl(vp->vp_integer);
vp->length = 4;
break;
case PW_TYPE_IPADDR:
memcpy(&vp->vp_ipaddr, p, 4);
vp->length = 4;
break;
case PW_TYPE_STRING:
memcpy(vp->vp_strvalue, p, dhcp_header_sizes[i]);
vp->vp_strvalue[dhcp_header_sizes[i]] = '\0';
vp->length = strlen(vp->vp_strvalue);
if (vp->length == 0) {
pairfree(&vp);
}
break;
case PW_TYPE_OCTETS:
memcpy(vp->vp_octets, p, packet->data[2]);
vp->length = packet->data[2];
break;
case PW_TYPE_ETHERNET:
memcpy(vp->vp_ether, p, sizeof(vp->vp_ether));
vp->length = sizeof(vp->vp_ether);
break;
default:
fr_strerror_printf("BAD TYPE %d", vp->type);
pairfree(&vp);
break;
}
p += dhcp_header_sizes[i];
if (!vp) continue;
if (fr_debug_flag > 1) {
vp_prints(buffer, sizeof(buffer), vp);
fr_strerror_printf("\t%s", buffer);
}
*tail = vp;
tail = &vp->next;
}
/*
* Loop over the options.
*/
next = packet->data + 240;
/*
* FIXME: This should also check sname && file fields.
* See the dhcp_get_option() function above.
*/
while (next < (packet->data + packet->data_len)) {
int num_entries, alen;
DICT_ATTR *da;
p = next;
if (*p == 0) break;
if (*p == 255) break; /* end of options signifier */
if ((p + 2) > (packet->data + packet->data_len)) break;
next = p + 2 + p[1];
if (p[1] >= 253) {
fr_strerror_printf("Attribute too long %u %u",
p[0], p[1]);
continue;
}
da = dict_attrbyvalue(DHCP2ATTR(p[0]));
if (!da) {
fr_strerror_printf("Attribute not in our dictionary: %u",
p[0]);
continue;
}
vp = NULL;
num_entries = 1;
alen = p[1];
p += 2;
/*
* Could be an array of bytes, integers, etc.
*/
if (da->flags.array) {
switch (da->type) {
case PW_TYPE_BYTE:
num_entries = alen;
alen = 1;
break;
case PW_TYPE_SHORT: /* ignore any trailing data */
num_entries = alen >> 1;
alen = 2;
break;
case PW_TYPE_IPADDR:
case PW_TYPE_INTEGER:
case PW_TYPE_DATE: /* ignore any trailing data */
num_entries = alen >> 2;
alen = 4;
break;
default:
break; /* really an internal sanity failure */
}
}
/*
* Loop over all of the entries, building VPs
*/
for (i = 0; i < num_entries; i++) {
vp = pairmake(da->name, NULL, T_OP_EQ);
if (!vp) {
fr_strerror_printf("Cannot build attribute %s",
fr_strerror());
pairfree(&head);
return -1;
}
/*
* Hack for ease of use.
*/
if ((da->attr == DHCP2ATTR(0x3d)) &&
!da->flags.array &&
(alen == 7) && (*p == 1) && (num_entries == 1)) {
vp->type = PW_TYPE_ETHERNET;
memcpy(vp->vp_octets, p + 1, 6);
vp->length = alen;
} else if (fr_dhcp_attr2vp(vp, p, alen) < 0) {
pairfree(&vp);
pairfree(&head);
return -1;
}
if (fr_debug_flag > 1) {
vp_prints(buffer, sizeof(buffer), vp);
fr_strerror_printf("\t%s", buffer);
}
*tail = vp;
while (*tail) tail = &(*tail)->next;
p += alen;
} /* loop over array entries */
} /* loop over the entire packet */
/*
* If DHCP request, set ciaddr to zero.
*/
/*
* Set broadcast flag for broken vendors, but only if
* giaddr isn't set.
*/
memcpy(&giaddr, packet->data + 24, sizeof(giaddr));
if (giaddr == htonl(INADDR_ANY)) {
/*
* DHCP Opcode is request
*/
vp = pairfind(head, DHCP2ATTR(256));
if (vp && vp->lvalue == 3) {
/*
* Vendor is "MSFT 98"
*/
vp = pairfind(head, DHCP2ATTR(63));
if (vp && (strcmp(vp->vp_strvalue, "MSFT 98") == 0)) {
vp = pairfind(head, DHCP2ATTR(262));
/*
* Reply should be broadcast.
*/
if (vp) vp->lvalue |= 0x8000;
packet->data[10] |= 0x80;
}
}
}
/*
* FIXME: Nuke attributes that aren't used in the normal
* header for discover/requests.
*/
packet->vps = head;
/*
* Client can request a LARGER size, but not a smaller
* one. They also cannot request a size larger than MTU.
*/
maxms = pairfind(packet->vps, DHCP2ATTR(57));
mtu = pairfind(packet->vps, DHCP2ATTR(26));
if (mtu && (mtu->vp_integer < DEFAULT_PACKET_SIZE)) {
fr_strerror_printf("DHCP Fatal: Client says MTU is smaller than minimum permitted by the specification.");
return -1;
}
if (maxms && (maxms->vp_integer < DEFAULT_PACKET_SIZE)) {
fr_strerror_printf("DHCP WARNING: Client says maximum message size is smaller than minimum permitted by the specification: fixing it");
maxms->vp_integer = DEFAULT_PACKET_SIZE;
}
if (maxms && mtu && (maxms->vp_integer > mtu->vp_integer)) {
fr_strerror_printf("DHCP WARNING: Client says MTU is smaller than maximum message size: fixing it");
maxms->vp_integer = mtu->vp_integer;
}
if (fr_debug_flag) fflush(stdout);
return 0;
}
static int attr_cmp(const void *one, const void *two)
{
const VALUE_PAIR * const *a = one;
const VALUE_PAIR * const *b = two;
/*
* DHCP-Message-Type is first, for simplicity.
*/
if (((*a)->attribute == DHCP2ATTR(53)) &&
(*b)->attribute != DHCP2ATTR(53)) return -1;
/*
* Relay-Agent is last
*/
if (((*a)->attribute == DHCP2ATTR(82)) &&
(*b)->attribute != DHCP2ATTR(82)) return +1;
return ((*a)->attribute - (*b)->attribute);
}
static size_t fr_dhcp_vp2attr(VALUE_PAIR *vp, uint8_t *p, size_t room)
{
size_t length;
uint32_t lvalue;
/*
* FIXME: Check room!
*/
room = room; /* -Wunused */
/*
* Search for all attributes of the same
* type, and pack them into the same
* attribute.
*/
switch (vp->type) {
case PW_TYPE_BYTE:
length = 1;
*p = vp->vp_integer & 0xff;
break;
case PW_TYPE_SHORT:
length = 2;
p[0] = (vp->vp_integer >> 8) & 0xff;
p[1] = vp->vp_integer & 0xff;
break;
case PW_TYPE_INTEGER:
length = 4;
lvalue = htonl(vp->vp_integer);
memcpy(p, &lvalue, 4);
break;
case PW_TYPE_IPADDR:
length = 4;
memcpy(p, &vp->vp_ipaddr, 4);
break;
case PW_TYPE_ETHERNET:
length = 6;
memcpy(p, &vp->vp_ether, 6);
break;
case PW_TYPE_STRING:
memcpy(p, vp->vp_strvalue, vp->length);
length = vp->length;
break;
case PW_TYPE_TLV: /* FIXME: split it on 255? */
memcpy(p, vp->vp_tlv, vp->length);
length = vp->length;
break;
case PW_TYPE_OCTETS:
memcpy(p, vp->vp_octets, vp->length);
length = vp->length;
break;
default:
fr_strerror_printf("BAD TYPE2 %d", vp->type);
length = 0;
break;
}
return length;
}
static VALUE_PAIR *fr_dhcp_vp2suboption(VALUE_PAIR *vps)
{
int length;
unsigned int attribute;
uint8_t *ptr;
VALUE_PAIR *vp, *tlv;
attribute = vps->attribute & 0xffff00ff;
tlv = paircreate(attribute, PW_TYPE_TLV);
if (!tlv) return NULL;
tlv->length = 0;
for (vp = vps; vp != NULL; vp = vp->next) {
/*
* Group the attributes ONLY until we see a
* non-TLV attribute.
*/
if (!vp->flags.is_tlv ||
vp->flags.encoded ||
((vp->attribute & 0xffff00ff) != attribute)) {
break;
}
tlv->length += vp->length + 2;
}
if (!tlv->length) {
pairfree(&tlv);
return NULL;
}
tlv->vp_tlv = malloc(tlv->length);
if (!tlv->vp_tlv) {
pairfree(&tlv);
return NULL;
}
ptr = tlv->vp_tlv;
for (vp = vps; vp != NULL; vp = vp->next) {
if (!vp->flags.is_tlv ||
vp->flags.encoded ||
((vp->attribute & 0xffff00ff) != attribute)) {
break;
}
length = fr_dhcp_vp2attr(vp, ptr + 2,
tlv->vp_tlv + tlv->length - ptr);
if (length > 255) return NULL;
/*
* Pack the attribute.
*/
ptr[0] = (vp->attribute & 0xff00) >> 8;
ptr[1] = length;
ptr += length + 2;
vp->flags.encoded = 1;
}
return tlv;
}
int fr_dhcp_encode(RADIUS_PACKET *packet, RADIUS_PACKET *original)
{
int i, num_vps;
uint8_t *p;
VALUE_PAIR *vp;
uint32_t lvalue, mms;
size_t dhcp_size, length;
dhcp_packet_t *dhcp;
char buffer[1024];
if (packet->data) return 0;
packet->data = malloc(MAX_PACKET_SIZE);
if (!packet->data) return -1;
packet->data_len = MAX_PACKET_SIZE;
if (packet->code == 0) packet->code = PW_DHCP_NAK;
/*
* If there's a request, use it as a template.
* Otherwise, assume that the caller has set up
* everything appropriately.
*/
if (original) {
packet->dst_ipaddr.af = AF_INET;
packet->src_ipaddr.af = AF_INET;
packet->dst_port = original->src_port;
packet->src_port = original->dst_port;
/*
* Note that for DHCP, we NEVER send the response
* to the source IP address of the request. It
* may have traversed multiple relays, and we
* need to send the request to the relay closest
* to the client.
*
* if giaddr, send to giaddr.
* if NAK, send broadcast packet
* if ciaddr, unicast to ciaddr
* if flags & 0x8000, broadcast (client request)
* if sent from 0.0.0.0, broadcast response
* unicast to client yiaddr
*/
/*
* FIXME: alignment issues. We likely don't want to
* de-reference the packet structure directly..
*/
dhcp = (dhcp_packet_t *) original->data;
if (dhcp->giaddr != htonl(INADDR_ANY)) {
packet->dst_ipaddr.ipaddr.ip4addr.s_addr = dhcp->giaddr;
if (dhcp->giaddr != htonl(INADDR_LOOPBACK)) {
packet->dst_port = original->dst_port;
} else {
packet->dst_port = original->src_port; /* debugging */
}
} else if (packet->code == PW_DHCP_NAK) {
packet->dst_ipaddr.ipaddr.ip4addr.s_addr = htonl(INADDR_BROADCAST);
} else if (dhcp->ciaddr != htonl(INADDR_ANY)) {
packet->dst_ipaddr.ipaddr.ip4addr.s_addr = dhcp->ciaddr;
} else if ((dhcp->flags & 0x8000) != 0) {
packet->dst_ipaddr.ipaddr.ip4addr.s_addr = htonl(INADDR_BROADCAST);
} else if (packet->dst_ipaddr.ipaddr.ip4addr.s_addr == htonl(INADDR_ANY)) {
packet->dst_ipaddr.ipaddr.ip4addr.s_addr = htonl(INADDR_BROADCAST);
} else if (dhcp->yiaddr != htonl(INADDR_ANY)) {
packet->dst_ipaddr.ipaddr.ip4addr.s_addr = dhcp->yiaddr;
} else {
/* leave destination IP alone. */
}
/*
* Rewrite the source IP to be our own, if we know it.
*/
if (packet->src_ipaddr.ipaddr.ip4addr.s_addr == htonl(INADDR_BROADCAST)) {
packet->src_ipaddr.ipaddr.ip4addr.s_addr = htonl(INADDR_ANY);
}
} else {
memset(packet->data, 0, packet->data_len);
}
if (fr_debug_flag > 1) {
char type_buf[64];
const char *name = type_buf;
char src_ip_buf[256], dst_ip_buf[256];
if ((packet->code >= PW_DHCP_DISCOVER) &&
(packet->code <= PW_DHCP_INFORM)) {
name = dhcp_message_types[packet->code - PW_DHCP_OFFSET];
} else {
snprintf(type_buf, sizeof(type_buf), "%d",
packet->code - PW_DHCP_OFFSET);
}
DEBUG("Sending %s of id %08x from %s:%d to %s:%d",
name, (unsigned int) packet->id,
inet_ntop(packet->src_ipaddr.af,
&packet->src_ipaddr.ipaddr,
src_ip_buf, sizeof(src_ip_buf)),
packet->src_port,
inet_ntop(packet->dst_ipaddr.af,
&packet->dst_ipaddr.ipaddr,
dst_ip_buf, sizeof(dst_ip_buf)),
packet->dst_port);
}
p = packet->data;
mms = DEFAULT_PACKET_SIZE; /* maximum message size */
if (original) {
/*
* Clients can request a LARGER size, but not a
* smaller one. They also cannot request a size
* larger than MTU.
*/
vp = pairfind(original->vps, DHCP2ATTR(57));
if (vp && (vp->vp_integer > mms)) {
mms = vp->vp_integer;
if (mms > MAX_PACKET_SIZE) mms = MAX_PACKET_SIZE;
}
}
/*
* RFC 3118: Authentication option.
*/
vp = pairfind(packet->vps, DHCP2ATTR(90));
if (vp) {
if (vp->length < 2) {
memset(vp->vp_octets + vp->length, 0,
2 - vp->length);
vp->length = 2;
}
if (vp->length < 3) {
struct timeval tv;
gettimeofday(&tv, NULL);
vp->vp_octets[2] = 0;
timeval2ntp(&tv, vp->vp_octets + 3);
vp->length = 3 + 8;
}
/*
* Configuration token (clear-text token)
*/
if (vp->vp_octets[0] == 0) {
VALUE_PAIR *pass;
vp->vp_octets[1] = 0;
pass = pairfind(packet->vps, PW_CLEARTEXT_PASSWORD);
if (pass) {
length = pass->length;
if ((length + 11) > sizeof(vp->vp_octets)) {
length -= ((length + 11) - sizeof(vp->vp_octets));
}
memcpy(vp->vp_octets + 11, pass->vp_strvalue,
length);
vp->length = length + 11;
} else {
vp->length = 11 + 8;
memset(vp->vp_octets + 11, 0, 8);
vp->length = 11 + 8;
}
} else { /* we don't support this type! */
fr_strerror_printf("DHCP-Authentication %d unsupported",
vp->vp_octets[0]);
}
}
if (!original) {
*p++ = 1; /* client message */
} else {
*p++ = 2; /* server message */
}
*p++ = 1; /* hardware type = ethernet */
*p++ = 6; /* 6 bytes of ethernet */
*p++ = 0; /* hops */
if (original) { /* Xid */
memcpy(p, original->data + 4, 4);
} else {
lvalue = fr_rand();
memcpy(p, &lvalue, 4);
}
p += 4;
memset(p, 0, 2); /* secs are zero */
p += 2;
if (original) {
memcpy(p, original->data + 10, 6); /* copy flags && ciaddr */
}
/*
* Allow the admin to set the broadcast flag.
*/
vp = pairfind(packet->vps, DHCP2ATTR(262));
if (vp) {
p[0] |= (vp->vp_integer & 0xff00) >> 8;
p[1] |= (vp->vp_integer & 0xff);
}
p += 6;
/*
* Set client IP address.
*/
vp = pairfind(packet->vps, DHCP2ATTR(264)); /* Your IP address */
if (vp) {
lvalue = vp->vp_ipaddr;
} else {
lvalue = htonl(INADDR_ANY);
}
memcpy(p, &lvalue, 4); /* your IP address */
p += 4;
vp = pairfind(packet->vps, DHCP2ATTR(265)); /* server IP address */
if (!vp) vp = pairfind(packet->vps, DHCP2ATTR(54)); /* identifier */
if (vp) {
lvalue = vp->vp_ipaddr;
} else {
lvalue = htonl(INADDR_ANY);
}
memcpy(p, &lvalue, 4); /* Server IP address */
p += 4;
if (original) {
memcpy(p, original->data + 24, 4); /* copy gateway IP address */
} else {
vp = pairfind(packet->vps, DHCP2ATTR(266));
if (vp) {
lvalue = vp->vp_ipaddr;
} else {
lvalue = htonl(INADDR_NONE);
}
memcpy(p, &lvalue, 4);
}
p += 4;
if (original) {
memcpy(p, original->data + 28, DHCP_CHADDR_LEN);
} else {
vp = pairfind(packet->vps, DHCP2ATTR(267));
if (vp) {
if (vp->length > DHCP_CHADDR_LEN) {
memcpy(p, vp->vp_octets, DHCP_CHADDR_LEN);
} else {
memcpy(p, vp->vp_octets, vp->length);
}
}
}
p += DHCP_CHADDR_LEN;
memset(p, 0, 192); /* bootp legacy */
p += 192;
lvalue = htonl(DHCP_OPTION_MAGIC_NUMBER); /* DHCP magic number */
memcpy(p, &lvalue, 4);
p += 4;
/*
* Print the header.
*/
if (fr_debug_flag > 1) {
uint8_t *pp = p;
p = packet->data;
for (i = 0; i < 14; i++) {
vp = pairmake(dhcp_header_names[i], NULL, T_OP_EQ);
if (!vp) {
fr_strerror_printf("Parse error %s", fr_strerror());
return -1;
}
switch (vp->type) {
case PW_TYPE_BYTE:
vp->vp_integer = p[0];
vp->length = 1;
break;
case PW_TYPE_SHORT:
vp->vp_integer = (p[0] << 8) | p[1];
vp->length = 2;
break;
case PW_TYPE_INTEGER:
memcpy(&vp->vp_integer, p, 4);
vp->vp_integer = ntohl(vp->vp_integer);
vp->length = 4;
break;
case PW_TYPE_IPADDR:
memcpy(&vp->vp_ipaddr, p, 4);
vp->length = 4;
break;
case PW_TYPE_STRING:
memcpy(vp->vp_strvalue, p, dhcp_header_sizes[i]);
vp->vp_strvalue[dhcp_header_sizes[i]] = '\0';
vp->length = strlen(vp->vp_strvalue);
break;
case PW_TYPE_OCTETS: /* only for Client HW Address */
memcpy(vp->vp_octets, p, packet->data[2]);
vp->length = packet->data[2];
break;
case PW_TYPE_ETHERNET: /* only for Client HW Address */
memcpy(vp->vp_ether, p, sizeof(vp->vp_ether));
vp->length = sizeof(vp->vp_ether);
break;
default:
fr_strerror_printf("Internal sanity check failed %d %d", vp->type, __LINE__);
pairfree(&vp);
break;
}
p += dhcp_header_sizes[i];
vp_prints(buffer, sizeof(buffer), vp);
fr_strerror_printf("\t%s", buffer);
pairfree(&vp);
}
/*
* Jump over DHCP magic number, response, etc.
*/
p = pp;
}
/*
* Before packing the attributes, re-order them so that
* the array ones are all contiguous. This simplifies
* the later code.
*/
num_vps = 0;
for (vp = packet->vps; vp != NULL; vp = vp->next) {
num_vps++;
}
if (num_vps > 1) {
VALUE_PAIR **array, **last;
array = malloc(num_vps * sizeof(VALUE_PAIR *));
i = 0;
for (vp = packet->vps; vp != NULL; vp = vp->next) {
array[i++] = vp;
}
/*
* Sort the attributes.
*/
qsort(array, (size_t) num_vps, sizeof(VALUE_PAIR *),
attr_cmp);
last = &packet->vps;
for (i = 0; i < num_vps; i++) {
*last = array[i];
array[i]->next = NULL;
last = &(array[i]->next);
}
free(array);
}
p[0] = 0x35; /* DHCP-Message-Type */
p[1] = 1;
p[2] = packet->code - PW_DHCP_OFFSET;
p += 3;
/*
* Pack in the attributes.
*/
vp = packet->vps;
while (vp) {
int num_entries = 1;
VALUE_PAIR *same;
uint8_t *plength, *pattr;
if (!IS_DHCP_ATTR(vp)) goto next;
if (vp->attribute == DHCP2ATTR(53)) goto next; /* already done */
if (((vp->attribute & 0xffff) > 255) &&
(DHCP_BASE_ATTR(vp->attribute) != PW_DHCP_OPTION_82)) goto next;
length = vp->length;
for (same = vp->next; same != NULL; same = same->next) {
if (same->attribute != vp->attribute) break;
num_entries++;
}
/*
* For client-identifier
*/
if ((vp->type == PW_TYPE_ETHERNET) &&
(vp->length == 6) &&
(num_entries == 1)) {
vp->type = PW_TYPE_OCTETS;
memmove(vp->vp_octets + 1, vp->vp_octets, 6);
vp->vp_octets[0] = 1;
}
pattr = p;
*(p++) = vp->attribute & 0xff;
plength = p;
*(p++) = 0; /* header isn't included in attr length */
for (i = 0; i < num_entries; i++) {
if (fr_debug_flag > 1) {
vp_prints(buffer, sizeof(buffer), vp);
fr_strerror_printf("\t%s", buffer);
}
if (vp->flags.is_tlv) {
VALUE_PAIR *tlv = fr_dhcp_vp2suboption(vp);
if (vp) {
tlv->next = vp->next;
vp->next = tlv;
}
/*
* The encoded flag MUST be set in the vp!
*/
vp = vp->next;
}
length = fr_dhcp_vp2attr(vp, p, 0);
/*
* This will never happen due to FreeRADIUS
* limitations: sizeof(vp->vp_octets) < 255
*/
if (length > 255) {
fr_strerror_printf("WARNING Ignoring too long attribute %s!", vp->name);
break;
}
/*
* More than one attribute of the same type
* in a row: they are packed together
* into the same TLV. If we overflow,
* go bananas!
*/
if ((*plength + length) > 255) {
fr_strerror_printf("WARNING Ignoring too long attribute %s!", vp->name);
break;
}
*plength += length;
p += length;
if (vp->next &&
(vp->next->attribute == vp->attribute))
vp = vp->next;
} /* loop over num_entries */
next:
vp = vp->next;
}
p[0] = 0xff; /* end of option option */
p[1] = 0x00;
p += 2;
dhcp_size = p - packet->data;
/*
* FIXME: if (dhcp_size > mms),
* then we put the extra options into the "sname" and "file"
* fields, AND set the "end option option" in the "options"
* field. We also set the "overload option",
* and put options into the "file" field, followed by
* the "sname" field. Where each option is completely
* enclosed in the "file" and/or "sname" field, AND
* followed by the "end of option", and MUST be followed
* by padding option.
*
* Yuck. That sucks...
*/
packet->data_len = dhcp_size;
if (original) {
/*
* FIXME: This may set it to broadcast, which we don't
* want. Instead, set it to the real address of the
* socket.
*/
packet->src_ipaddr = original->dst_ipaddr;
packet->sockfd = original->sockfd;
}
if (packet->data_len < DEFAULT_PACKET_SIZE) {
memset(packet->data + packet->data_len, 0,
DEFAULT_PACKET_SIZE - packet->data_len);
packet->data_len = DEFAULT_PACKET_SIZE;
}
if ((fr_debug_flag > 2) && fr_log_fp) {
for (i = 0; i < packet->data_len; i++) {
if ((i & 0x0f) == 0x00) fprintf(fr_log_fp, "%d: ", i);
fprintf(fr_log_fp, "%02x ", packet->data[i]);
if ((i & 0x0f) == 0x0f) fprintf(fr_log_fp, "\n");
}
fprintf(fr_log_fp, "\n");
}
return 0;
}
#endif /* WITH_DHCP */
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_4709_0 |
crossvul-cpp_data_bad_2275_0 | /*
* fs/cifs/smb2pdu.c
*
* Copyright (C) International Business Machines Corp., 2009, 2013
* Etersoft, 2012
* Author(s): Steve French (sfrench@us.ibm.com)
* Pavel Shilovsky (pshilovsky@samba.org) 2012
*
* Contains the routines for constructing the SMB2 PDUs themselves
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* SMB2 PDU handling routines here - except for leftovers (eg session setup) */
/* Note that there are handle based routines which must be */
/* treated slightly differently for reconnection purposes since we never */
/* want to reuse a stale file handle and only the caller knows the file info */
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/vfs.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/uaccess.h>
#include <linux/pagemap.h>
#include <linux/xattr.h>
#include "smb2pdu.h"
#include "cifsglob.h"
#include "cifsacl.h"
#include "cifsproto.h"
#include "smb2proto.h"
#include "cifs_unicode.h"
#include "cifs_debug.h"
#include "ntlmssp.h"
#include "smb2status.h"
#include "smb2glob.h"
#include "cifspdu.h"
/*
* The following table defines the expected "StructureSize" of SMB2 requests
* in order by SMB2 command. This is similar to "wct" in SMB/CIFS requests.
*
* Note that commands are defined in smb2pdu.h in le16 but the array below is
* indexed by command in host byte order.
*/
static const int smb2_req_struct_sizes[NUMBER_OF_SMB2_COMMANDS] = {
/* SMB2_NEGOTIATE */ 36,
/* SMB2_SESSION_SETUP */ 25,
/* SMB2_LOGOFF */ 4,
/* SMB2_TREE_CONNECT */ 9,
/* SMB2_TREE_DISCONNECT */ 4,
/* SMB2_CREATE */ 57,
/* SMB2_CLOSE */ 24,
/* SMB2_FLUSH */ 24,
/* SMB2_READ */ 49,
/* SMB2_WRITE */ 49,
/* SMB2_LOCK */ 48,
/* SMB2_IOCTL */ 57,
/* SMB2_CANCEL */ 4,
/* SMB2_ECHO */ 4,
/* SMB2_QUERY_DIRECTORY */ 33,
/* SMB2_CHANGE_NOTIFY */ 32,
/* SMB2_QUERY_INFO */ 41,
/* SMB2_SET_INFO */ 33,
/* SMB2_OPLOCK_BREAK */ 24 /* BB this is 36 for LEASE_BREAK variant */
};
static void
smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
const struct cifs_tcon *tcon)
{
struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
char *temp = (char *)hdr;
/* lookup word count ie StructureSize from table */
__u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_cmd)];
/*
* smaller than SMALL_BUFFER_SIZE but bigger than fixed area of
* largest operations (Create)
*/
memset(temp, 0, 256);
/* Note this is only network field converted to big endian */
hdr->smb2_buf_length = cpu_to_be32(parmsize + sizeof(struct smb2_hdr)
- 4 /* RFC 1001 length field itself not counted */);
hdr->ProtocolId[0] = 0xFE;
hdr->ProtocolId[1] = 'S';
hdr->ProtocolId[2] = 'M';
hdr->ProtocolId[3] = 'B';
hdr->StructureSize = cpu_to_le16(64);
hdr->Command = smb2_cmd;
hdr->CreditRequest = cpu_to_le16(2); /* BB make this dynamic */
hdr->ProcessId = cpu_to_le32((__u16)current->tgid);
if (!tcon)
goto out;
/* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
/* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
if ((tcon->ses) &&
(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
hdr->CreditCharge = cpu_to_le16(1);
/* else CreditCharge MBZ */
hdr->TreeId = tcon->tid;
/* Uid is not converted */
if (tcon->ses)
hdr->SessionId = tcon->ses->Suid;
/*
* If we would set SMB2_FLAGS_DFS_OPERATIONS on open we also would have
* to pass the path on the Open SMB prefixed by \\server\share.
* Not sure when we would need to do the augmented path (if ever) and
* setting this flag breaks the SMB2 open operation since it is
* illegal to send an empty path name (without \\server\share prefix)
* when the DFS flag is set in the SMB open header. We could
* consider setting the flag on all operations other than open
* but it is safer to net set it for now.
*/
/* if (tcon->share_flags & SHI1005_FLAGS_DFS)
hdr->Flags |= SMB2_FLAGS_DFS_OPERATIONS; */
if (tcon->ses && tcon->ses->server && tcon->ses->server->sign)
hdr->Flags |= SMB2_FLAGS_SIGNED;
out:
pdu->StructureSize2 = cpu_to_le16(parmsize);
return;
}
static int
smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
{
int rc = 0;
struct nls_table *nls_codepage;
struct cifs_ses *ses;
struct TCP_Server_Info *server;
/*
* SMB2s NegProt, SessSetup, Logoff do not have tcon yet so
* check for tcp and smb session status done differently
* for those three - in the calling routine.
*/
if (tcon == NULL)
return rc;
if (smb2_command == SMB2_TREE_CONNECT)
return rc;
if (tcon->tidStatus == CifsExiting) {
/*
* only tree disconnect, open, and write,
* (and ulogoff which does not have tcon)
* are allowed as we start force umount.
*/
if ((smb2_command != SMB2_WRITE) &&
(smb2_command != SMB2_CREATE) &&
(smb2_command != SMB2_TREE_DISCONNECT)) {
cifs_dbg(FYI, "can not send cmd %d while umounting\n",
smb2_command);
return -ENODEV;
}
}
if ((!tcon->ses) || (tcon->ses->status == CifsExiting) ||
(!tcon->ses->server))
return -EIO;
ses = tcon->ses;
server = ses->server;
/*
* Give demultiplex thread up to 10 seconds to reconnect, should be
* greater than cifs socket timeout which is 7 seconds
*/
while (server->tcpStatus == CifsNeedReconnect) {
/*
* Return to caller for TREE_DISCONNECT and LOGOFF and CLOSE
* here since they are implicitly done when session drops.
*/
switch (smb2_command) {
/*
* BB Should we keep oplock break and add flush to exceptions?
*/
case SMB2_TREE_DISCONNECT:
case SMB2_CANCEL:
case SMB2_CLOSE:
case SMB2_OPLOCK_BREAK:
return -EAGAIN;
}
wait_event_interruptible_timeout(server->response_q,
(server->tcpStatus != CifsNeedReconnect), 10 * HZ);
/* are we still trying to reconnect? */
if (server->tcpStatus != CifsNeedReconnect)
break;
/*
* on "soft" mounts we wait once. Hard mounts keep
* retrying until process is killed or server comes
* back on-line
*/
if (!tcon->retry) {
cifs_dbg(FYI, "gave up waiting on reconnect in smb_init\n");
return -EHOSTDOWN;
}
}
if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
return rc;
nls_codepage = load_nls_default();
/*
* need to prevent multiple threads trying to simultaneously reconnect
* the same SMB session
*/
mutex_lock(&tcon->ses->session_mutex);
rc = cifs_negotiate_protocol(0, tcon->ses);
if (!rc && tcon->ses->need_reconnect)
rc = cifs_setup_session(0, tcon->ses, nls_codepage);
if (rc || !tcon->need_reconnect) {
mutex_unlock(&tcon->ses->session_mutex);
goto out;
}
cifs_mark_open_files_invalid(tcon);
rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nls_codepage);
mutex_unlock(&tcon->ses->session_mutex);
cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc);
if (rc)
goto out;
atomic_inc(&tconInfoReconnectCount);
out:
/*
* Check if handle based operation so we know whether we can continue
* or not without returning to caller to reset file handle.
*/
/*
* BB Is flush done by server on drop of tcp session? Should we special
* case it and skip above?
*/
switch (smb2_command) {
case SMB2_FLUSH:
case SMB2_READ:
case SMB2_WRITE:
case SMB2_LOCK:
case SMB2_IOCTL:
case SMB2_QUERY_DIRECTORY:
case SMB2_CHANGE_NOTIFY:
case SMB2_QUERY_INFO:
case SMB2_SET_INFO:
return -EAGAIN;
}
unload_nls(nls_codepage);
return rc;
}
/*
* Allocate and return pointer to an SMB request hdr, and set basic
* SMB information in the SMB header. If the return code is zero, this
* function must have filled in request_buf pointer.
*/
static int
small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
void **request_buf)
{
int rc = 0;
rc = smb2_reconnect(smb2_command, tcon);
if (rc)
return rc;
/* BB eventually switch this to SMB2 specific small buf size */
*request_buf = cifs_small_buf_get();
if (*request_buf == NULL) {
/* BB should we add a retry in here if not a writepage? */
return -ENOMEM;
}
smb2_hdr_assemble((struct smb2_hdr *) *request_buf, smb2_command, tcon);
if (tcon != NULL) {
#ifdef CONFIG_CIFS_STATS2
uint16_t com_code = le16_to_cpu(smb2_command);
cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
#endif
cifs_stats_inc(&tcon->num_smbs_sent);
}
return rc;
}
/*
*
* SMB2 Worker functions follow:
*
* The general structure of the worker functions is:
* 1) Call smb2_init (assembles SMB2 header)
* 2) Initialize SMB2 command specific fields in fixed length area of SMB
* 3) Call smb_sendrcv2 (sends request on socket and waits for response)
* 4) Decode SMB2 command specific fields in the fixed length area
* 5) Decode variable length data area (if any for this SMB2 command type)
* 6) Call free smb buffer
* 7) return
*
*/
int
SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
{
struct smb2_negotiate_req *req;
struct smb2_negotiate_rsp *rsp;
struct kvec iov[1];
int rc = 0;
int resp_buftype;
struct TCP_Server_Info *server = ses->server;
int blob_offset, blob_length;
char *security_blob;
int flags = CIFS_NEG_OP;
cifs_dbg(FYI, "Negotiate protocol\n");
if (!server) {
WARN(1, "%s: server is NULL!\n", __func__);
return -EIO;
}
rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req);
if (rc)
return rc;
req->hdr.SessionId = 0;
req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id);
req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */
inc_rfc1001_len(req, 2);
/* only one of SMB2 signing flags may be set in SMB2 request */
if (ses->sign)
req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
else if (global_secflags & CIFSSEC_MAY_SIGN)
req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
else
req->SecurityMode = 0;
req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities);
/* ClientGUID must be zero for SMB2.02 dialect */
if (ses->server->vals->protocol_id == SMB20_PROT_ID)
memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE);
else
memcpy(req->ClientGUID, server->client_guid,
SMB2_CLIENT_GUID_SIZE);
iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field */
iov[0].iov_len = get_rfc1002_length(req) + 4;
rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, flags);
rsp = (struct smb2_negotiate_rsp *)iov[0].iov_base;
/*
* No tcon so can't do
* cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
*/
if (rc != 0)
goto neg_exit;
cifs_dbg(FYI, "mode 0x%x\n", rsp->SecurityMode);
/* BB we may eventually want to match the negotiated vs. requested
dialect, even though we are only requesting one at a time */
if (rsp->DialectRevision == cpu_to_le16(SMB20_PROT_ID))
cifs_dbg(FYI, "negotiated smb2.0 dialect\n");
else if (rsp->DialectRevision == cpu_to_le16(SMB21_PROT_ID))
cifs_dbg(FYI, "negotiated smb2.1 dialect\n");
else if (rsp->DialectRevision == cpu_to_le16(SMB30_PROT_ID))
cifs_dbg(FYI, "negotiated smb3.0 dialect\n");
else if (rsp->DialectRevision == cpu_to_le16(SMB302_PROT_ID))
cifs_dbg(FYI, "negotiated smb3.02 dialect\n");
else {
cifs_dbg(VFS, "Illegal dialect returned by server %d\n",
le16_to_cpu(rsp->DialectRevision));
rc = -EIO;
goto neg_exit;
}
server->dialect = le16_to_cpu(rsp->DialectRevision);
/* SMB2 only has an extended negflavor */
server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
/* set it to the maximum buffer size value we can send with 1 credit */
server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
SMB2_MAX_BUFFER_SIZE);
server->max_read = le32_to_cpu(rsp->MaxReadSize);
server->max_write = le32_to_cpu(rsp->MaxWriteSize);
/* BB Do we need to validate the SecurityMode? */
server->sec_mode = le16_to_cpu(rsp->SecurityMode);
server->capabilities = le32_to_cpu(rsp->Capabilities);
/* Internal types */
server->capabilities |= SMB2_NT_FIND | SMB2_LARGE_FILES;
security_blob = smb2_get_data_area_len(&blob_offset, &blob_length,
&rsp->hdr);
/*
* See MS-SMB2 section 2.2.4: if no blob, client picks default which
* for us will be
* ses->sectype = RawNTLMSSP;
* but for time being this is our only auth choice so doesn't matter.
* We just found a server which sets blob length to zero expecting raw.
*/
if (blob_length == 0)
cifs_dbg(FYI, "missing security blob on negprot\n");
rc = cifs_enable_signing(server, ses->sign);
#ifdef CONFIG_SMB2_ASN1 /* BB REMOVEME when updated asn1.c ready */
if (rc)
goto neg_exit;
if (blob_length)
rc = decode_neg_token_init(security_blob, blob_length,
&server->sec_type);
if (rc == 1)
rc = 0;
else if (rc == 0) {
rc = -EIO;
goto neg_exit;
}
#endif
neg_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
}
int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
{
int rc = 0;
struct validate_negotiate_info_req vneg_inbuf;
struct validate_negotiate_info_rsp *pneg_rsp;
u32 rsplen;
cifs_dbg(FYI, "validate negotiate\n");
/*
* validation ioctl must be signed, so no point sending this if we
* can not sign it. We could eventually change this to selectively
* sign just this, the first and only signed request on a connection.
* This is good enough for now since a user who wants better security
* would also enable signing on the mount. Having validation of
* negotiate info for signed connections helps reduce attack vectors
*/
if (tcon->ses->server->sign == false)
return 0; /* validation requires signing */
vneg_inbuf.Capabilities =
cpu_to_le32(tcon->ses->server->vals->req_capabilities);
memcpy(vneg_inbuf.Guid, tcon->ses->server->client_guid,
SMB2_CLIENT_GUID_SIZE);
if (tcon->ses->sign)
vneg_inbuf.SecurityMode =
cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED);
else if (global_secflags & CIFSSEC_MAY_SIGN)
vneg_inbuf.SecurityMode =
cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED);
else
vneg_inbuf.SecurityMode = 0;
vneg_inbuf.DialectCount = cpu_to_le16(1);
vneg_inbuf.Dialects[0] =
cpu_to_le16(tcon->ses->server->vals->protocol_id);
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
(char *)&vneg_inbuf, sizeof(struct validate_negotiate_info_req),
(char **)&pneg_rsp, &rsplen);
if (rc != 0) {
cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
return -EIO;
}
if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
return -EIO;
}
/* check validate negotiate info response matches what we got earlier */
if (pneg_rsp->Dialect !=
cpu_to_le16(tcon->ses->server->vals->protocol_id))
goto vneg_out;
if (pneg_rsp->SecurityMode != cpu_to_le16(tcon->ses->server->sec_mode))
goto vneg_out;
/* do not validate server guid because not saved at negprot time yet */
if ((le32_to_cpu(pneg_rsp->Capabilities) | SMB2_NT_FIND |
SMB2_LARGE_FILES) != tcon->ses->server->capabilities)
goto vneg_out;
/* validate negotiate successful */
cifs_dbg(FYI, "validate negotiate info successful\n");
return 0;
vneg_out:
cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n");
return -EIO;
}
int
SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
struct smb2_sess_setup_req *req;
struct smb2_sess_setup_rsp *rsp = NULL;
struct kvec iov[2];
int rc = 0;
int resp_buftype;
__le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */
struct TCP_Server_Info *server = ses->server;
u16 blob_length = 0;
char *security_blob;
char *ntlmssp_blob = NULL;
bool use_spnego = false; /* else use raw ntlmssp */
cifs_dbg(FYI, "Session Setup\n");
if (!server) {
WARN(1, "%s: server is NULL!\n", __func__);
return -EIO;
}
/*
* If we are here due to reconnect, free per-smb session key
* in case signing was required.
*/
kfree(ses->auth_key.response);
ses->auth_key.response = NULL;
/*
* If memory allocation is successful, caller of this function
* frees it.
*/
ses->ntlmssp = kmalloc(sizeof(struct ntlmssp_auth), GFP_KERNEL);
if (!ses->ntlmssp)
return -ENOMEM;
ses->ntlmssp->sesskey_per_smbsess = true;
/* FIXME: allow for other auth types besides NTLMSSP (e.g. krb5) */
ses->sectype = RawNTLMSSP;
ssetup_ntlmssp_authenticate:
if (phase == NtLmChallenge)
phase = NtLmAuthenticate; /* if ntlmssp, now final phase */
rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req);
if (rc)
return rc;
req->hdr.SessionId = 0; /* First session, not a reauthenticate */
req->VcNumber = 0; /* MBZ */
/* to enable echos and oplocks */
req->hdr.CreditRequest = cpu_to_le16(3);
/* only one of SMB2 signing flags may be set in SMB2 request */
if (server->sign)
req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED;
else if (global_secflags & CIFSSEC_MAY_SIGN) /* one flag unlike MUST_ */
req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED;
else
req->SecurityMode = 0;
req->Capabilities = 0;
req->Channel = 0; /* MBZ */
iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field and 1 for pad */
iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
if (phase == NtLmNegotiate) {
ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
GFP_KERNEL);
if (ntlmssp_blob == NULL) {
rc = -ENOMEM;
goto ssetup_exit;
}
build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
if (use_spnego) {
/* blob_length = build_spnego_ntlmssp_blob(
&security_blob,
sizeof(struct _NEGOTIATE_MESSAGE),
ntlmssp_blob); */
/* BB eventually need to add this */
cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
rc = -EOPNOTSUPP;
kfree(ntlmssp_blob);
goto ssetup_exit;
} else {
blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
/* with raw NTLMSSP we don't encapsulate in SPNEGO */
security_blob = ntlmssp_blob;
}
} else if (phase == NtLmAuthenticate) {
req->hdr.SessionId = ses->Suid;
ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
GFP_KERNEL);
if (ntlmssp_blob == NULL) {
rc = -ENOMEM;
goto ssetup_exit;
}
rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
nls_cp);
if (rc) {
cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
rc);
goto ssetup_exit; /* BB double check error handling */
}
if (use_spnego) {
/* blob_length = build_spnego_ntlmssp_blob(
&security_blob,
blob_length,
ntlmssp_blob); */
cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
rc = -EOPNOTSUPP;
kfree(ntlmssp_blob);
goto ssetup_exit;
} else {
security_blob = ntlmssp_blob;
}
} else {
cifs_dbg(VFS, "illegal ntlmssp phase\n");
rc = -EIO;
goto ssetup_exit;
}
/* Testing shows that buffer offset must be at location of Buffer[0] */
req->SecurityBufferOffset =
cpu_to_le16(sizeof(struct smb2_sess_setup_req) -
1 /* pad */ - 4 /* rfc1001 len */);
req->SecurityBufferLength = cpu_to_le16(blob_length);
iov[1].iov_base = security_blob;
iov[1].iov_len = blob_length;
inc_rfc1001_len(req, blob_length - 1 /* pad */);
/* BB add code to build os and lm fields */
rc = SendReceive2(xid, ses, iov, 2, &resp_buftype,
CIFS_LOG_ERROR | CIFS_NEG_OP);
kfree(security_blob);
rsp = (struct smb2_sess_setup_rsp *)iov[0].iov_base;
if (resp_buftype != CIFS_NO_BUFFER &&
rsp->hdr.Status == STATUS_MORE_PROCESSING_REQUIRED) {
if (phase != NtLmNegotiate) {
cifs_dbg(VFS, "Unexpected more processing error\n");
goto ssetup_exit;
}
if (offsetof(struct smb2_sess_setup_rsp, Buffer) - 4 !=
le16_to_cpu(rsp->SecurityBufferOffset)) {
cifs_dbg(VFS, "Invalid security buffer offset %d\n",
le16_to_cpu(rsp->SecurityBufferOffset));
rc = -EIO;
goto ssetup_exit;
}
/* NTLMSSP Negotiate sent now processing challenge (response) */
phase = NtLmChallenge; /* process ntlmssp challenge */
rc = 0; /* MORE_PROCESSING is not an error here but expected */
ses->Suid = rsp->hdr.SessionId;
rc = decode_ntlmssp_challenge(rsp->Buffer,
le16_to_cpu(rsp->SecurityBufferLength), ses);
}
/*
* BB eventually add code for SPNEGO decoding of NtlmChallenge blob,
* but at least the raw NTLMSSP case works.
*/
/*
* No tcon so can't do
* cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
*/
if (rc != 0)
goto ssetup_exit;
ses->session_flags = le16_to_cpu(rsp->SessionFlags);
if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
cifs_dbg(VFS, "SMB3 encryption not supported yet\n");
ssetup_exit:
free_rsp_buf(resp_buftype, rsp);
/* if ntlmssp, and negotiate succeeded, proceed to authenticate phase */
if ((phase == NtLmChallenge) && (rc == 0))
goto ssetup_ntlmssp_authenticate;
if (!rc) {
mutex_lock(&server->srv_mutex);
if (server->sign && server->ops->generate_signingkey) {
rc = server->ops->generate_signingkey(ses);
kfree(ses->auth_key.response);
ses->auth_key.response = NULL;
if (rc) {
cifs_dbg(FYI,
"SMB3 session key generation failed\n");
mutex_unlock(&server->srv_mutex);
goto keygen_exit;
}
}
if (!server->session_estab) {
server->sequence_number = 0x2;
server->session_estab = true;
}
mutex_unlock(&server->srv_mutex);
cifs_dbg(FYI, "SMB2/3 session established successfully\n");
spin_lock(&GlobalMid_Lock);
ses->status = CifsGood;
ses->need_reconnect = false;
spin_unlock(&GlobalMid_Lock);
}
keygen_exit:
if (!server->sign) {
kfree(ses->auth_key.response);
ses->auth_key.response = NULL;
}
kfree(ses->ntlmssp);
return rc;
}
int
SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
{
struct smb2_logoff_req *req; /* response is also trivial struct */
int rc = 0;
struct TCP_Server_Info *server;
cifs_dbg(FYI, "disconnect session %p\n", ses);
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
/* no need to send SMB logoff if uid already closed due to reconnect */
if (ses->need_reconnect)
goto smb2_session_already_dead;
rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req);
if (rc)
return rc;
/* since no tcon, smb2_init can not do this, so do here */
req->hdr.SessionId = ses->Suid;
if (server->sign)
req->hdr.Flags |= SMB2_FLAGS_SIGNED;
rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0);
/*
* No tcon so can't do
* cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_fail[SMB2...]);
*/
smb2_session_already_dead:
return rc;
}
static inline void cifs_stats_fail_inc(struct cifs_tcon *tcon, uint16_t code)
{
cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_failed[code]);
}
#define MAX_SHARENAME_LENGTH (255 /* server */ + 80 /* share */ + 1 /* NULL */)
/* These are similar values to what Windows uses */
static inline void init_copy_chunk_defaults(struct cifs_tcon *tcon)
{
tcon->max_chunks = 256;
tcon->max_bytes_chunk = 1048576;
tcon->max_bytes_copy = 16777216;
}
int
SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
struct cifs_tcon *tcon, const struct nls_table *cp)
{
struct smb2_tree_connect_req *req;
struct smb2_tree_connect_rsp *rsp = NULL;
struct kvec iov[2];
int rc = 0;
int resp_buftype;
int unc_path_len;
struct TCP_Server_Info *server;
__le16 *unc_path = NULL;
cifs_dbg(FYI, "TCON\n");
if ((ses->server) && tree)
server = ses->server;
else
return -EIO;
if (tcon && tcon->bad_network_name)
return -ENOENT;
unc_path = kmalloc(MAX_SHARENAME_LENGTH * 2, GFP_KERNEL);
if (unc_path == NULL)
return -ENOMEM;
unc_path_len = cifs_strtoUTF16(unc_path, tree, strlen(tree), cp) + 1;
unc_path_len *= 2;
if (unc_path_len < 2) {
kfree(unc_path);
return -EINVAL;
}
rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
if (rc) {
kfree(unc_path);
return rc;
}
if (tcon == NULL) {
/* since no tcon, smb2_init can not do this, so do here */
req->hdr.SessionId = ses->Suid;
/* if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED)
req->hdr.Flags |= SMB2_FLAGS_SIGNED; */
}
iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field and 1 for pad */
iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
/* Testing shows that buffer offset must be at location of Buffer[0] */
req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req)
- 1 /* pad */ - 4 /* do not count rfc1001 len field */);
req->PathLength = cpu_to_le16(unc_path_len - 2);
iov[1].iov_base = unc_path;
iov[1].iov_len = unc_path_len;
inc_rfc1001_len(req, unc_path_len - 1 /* pad */);
rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
rsp = (struct smb2_tree_connect_rsp *)iov[0].iov_base;
if (rc != 0) {
if (tcon) {
cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
tcon->need_reconnect = true;
}
goto tcon_error_exit;
}
if (tcon == NULL) {
ses->ipc_tid = rsp->hdr.TreeId;
goto tcon_exit;
}
if (rsp->ShareType & SMB2_SHARE_TYPE_DISK)
cifs_dbg(FYI, "connection to disk share\n");
else if (rsp->ShareType & SMB2_SHARE_TYPE_PIPE) {
tcon->ipc = true;
cifs_dbg(FYI, "connection to pipe share\n");
} else if (rsp->ShareType & SMB2_SHARE_TYPE_PRINT) {
tcon->print = true;
cifs_dbg(FYI, "connection to printer\n");
} else {
cifs_dbg(VFS, "unknown share type %d\n", rsp->ShareType);
rc = -EOPNOTSUPP;
goto tcon_error_exit;
}
tcon->share_flags = le32_to_cpu(rsp->ShareFlags);
tcon->capabilities = rsp->Capabilities; /* we keep caps little endian */
tcon->maximal_access = le32_to_cpu(rsp->MaximalAccess);
tcon->tidStatus = CifsGood;
tcon->need_reconnect = false;
tcon->tid = rsp->hdr.TreeId;
strlcpy(tcon->treeName, tree, sizeof(tcon->treeName));
if ((rsp->Capabilities & SMB2_SHARE_CAP_DFS) &&
((tcon->share_flags & SHI1005_FLAGS_DFS) == 0))
cifs_dbg(VFS, "DFS capability contradicts DFS flag\n");
init_copy_chunk_defaults(tcon);
if (tcon->ses->server->ops->validate_negotiate)
rc = tcon->ses->server->ops->validate_negotiate(xid, tcon);
tcon_exit:
free_rsp_buf(resp_buftype, rsp);
kfree(unc_path);
return rc;
tcon_error_exit:
if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
tcon->bad_network_name = true;
}
goto tcon_exit;
}
int
SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
{
struct smb2_tree_disconnect_req *req; /* response is trivial */
int rc = 0;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
cifs_dbg(FYI, "Tree Disconnect\n");
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
if ((tcon->need_reconnect) || (tcon->ses->need_reconnect))
return 0;
rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req);
if (rc)
return rc;
rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0);
if (rc)
cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
return rc;
}
static struct create_durable *
create_durable_buf(void)
{
struct create_durable *buf;
buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
if (!buf)
return NULL;
buf->ccontext.DataOffset = cpu_to_le16(offsetof
(struct create_durable, Data));
buf->ccontext.DataLength = cpu_to_le32(16);
buf->ccontext.NameOffset = cpu_to_le16(offsetof
(struct create_durable, Name));
buf->ccontext.NameLength = cpu_to_le16(4);
/* SMB2_CREATE_DURABLE_HANDLE_REQUEST is "DHnQ" */
buf->Name[0] = 'D';
buf->Name[1] = 'H';
buf->Name[2] = 'n';
buf->Name[3] = 'Q';
return buf;
}
static struct create_durable *
create_reconnect_durable_buf(struct cifs_fid *fid)
{
struct create_durable *buf;
buf = kzalloc(sizeof(struct create_durable), GFP_KERNEL);
if (!buf)
return NULL;
buf->ccontext.DataOffset = cpu_to_le16(offsetof
(struct create_durable, Data));
buf->ccontext.DataLength = cpu_to_le32(16);
buf->ccontext.NameOffset = cpu_to_le16(offsetof
(struct create_durable, Name));
buf->ccontext.NameLength = cpu_to_le16(4);
buf->Data.Fid.PersistentFileId = fid->persistent_fid;
buf->Data.Fid.VolatileFileId = fid->volatile_fid;
/* SMB2_CREATE_DURABLE_HANDLE_RECONNECT is "DHnC" */
buf->Name[0] = 'D';
buf->Name[1] = 'H';
buf->Name[2] = 'n';
buf->Name[3] = 'C';
return buf;
}
static __u8
parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
unsigned int *epoch)
{
char *data_offset;
struct create_context *cc;
unsigned int next = 0;
char *name;
data_offset = (char *)rsp + 4 + le32_to_cpu(rsp->CreateContextsOffset);
cc = (struct create_context *)data_offset;
do {
cc = (struct create_context *)((char *)cc + next);
name = le16_to_cpu(cc->NameOffset) + (char *)cc;
if (le16_to_cpu(cc->NameLength) != 4 ||
strncmp(name, "RqLs", 4)) {
next = le32_to_cpu(cc->Next);
continue;
}
return server->ops->parse_lease_buf(cc, epoch);
} while (next != 0);
return 0;
}
static int
add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
unsigned int *num_iovec, __u8 *oplock)
{
struct smb2_create_req *req = iov[0].iov_base;
unsigned int num = *num_iovec;
iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock);
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = server->vals->create_lease_size;
req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE;
if (!req->CreateContextsOffset)
req->CreateContextsOffset = cpu_to_le32(
sizeof(struct smb2_create_req) - 4 +
iov[num - 1].iov_len);
le32_add_cpu(&req->CreateContextsLength,
server->vals->create_lease_size);
inc_rfc1001_len(&req->hdr, server->vals->create_lease_size);
*num_iovec = num + 1;
return 0;
}
static int
add_durable_context(struct kvec *iov, unsigned int *num_iovec,
struct cifs_open_parms *oparms)
{
struct smb2_create_req *req = iov[0].iov_base;
unsigned int num = *num_iovec;
if (oparms->reconnect) {
iov[num].iov_base = create_reconnect_durable_buf(oparms->fid);
/* indicate that we don't need to relock the file */
oparms->reconnect = false;
} else
iov[num].iov_base = create_durable_buf();
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = sizeof(struct create_durable);
if (!req->CreateContextsOffset)
req->CreateContextsOffset =
cpu_to_le32(sizeof(struct smb2_create_req) - 4 +
iov[1].iov_len);
le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable));
inc_rfc1001_len(&req->hdr, sizeof(struct create_durable));
*num_iovec = num + 1;
return 0;
}
int
SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
__u8 *oplock, struct smb2_file_all_info *buf,
struct smb2_err_rsp **err_buf)
{
struct smb2_create_req *req;
struct smb2_create_rsp *rsp;
struct TCP_Server_Info *server;
struct cifs_tcon *tcon = oparms->tcon;
struct cifs_ses *ses = tcon->ses;
struct kvec iov[4];
int resp_buftype;
int uni_path_len;
__le16 *copy_path = NULL;
int copy_size;
int rc = 0;
unsigned int num_iovecs = 2;
__u32 file_attributes = 0;
char *dhc_buf = NULL, *lc_buf = NULL;
cifs_dbg(FYI, "create/open\n");
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req);
if (rc)
return rc;
if (oparms->create_options & CREATE_OPTION_READONLY)
file_attributes |= ATTR_READONLY;
req->ImpersonationLevel = IL_IMPERSONATION;
req->DesiredAccess = cpu_to_le32(oparms->desired_access);
/* File attributes ignored on open (used in create though) */
req->FileAttributes = cpu_to_le32(file_attributes);
req->ShareAccess = FILE_SHARE_ALL_LE;
req->CreateDisposition = cpu_to_le32(oparms->disposition);
req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK);
uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
/* do not count rfc1001 len field */
req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4);
iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field */
iov[0].iov_len = get_rfc1002_length(req) + 4;
/* MUST set path len (NameLength) to 0 opening root of share */
req->NameLength = cpu_to_le16(uni_path_len - 2);
/* -1 since last byte is buf[0] which is sent below (path) */
iov[0].iov_len--;
if (uni_path_len % 8 != 0) {
copy_size = uni_path_len / 8 * 8;
if (copy_size < uni_path_len)
copy_size += 8;
copy_path = kzalloc(copy_size, GFP_KERNEL);
if (!copy_path)
return -ENOMEM;
memcpy((char *)copy_path, (const char *)path,
uni_path_len);
uni_path_len = copy_size;
path = copy_path;
}
iov[1].iov_len = uni_path_len;
iov[1].iov_base = path;
/* -1 since last byte is buf[0] which was counted in smb2_buf_len */
inc_rfc1001_len(req, uni_path_len - 1);
if (!server->oplocks)
*oplock = SMB2_OPLOCK_LEVEL_NONE;
if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
*oplock == SMB2_OPLOCK_LEVEL_NONE)
req->RequestedOplockLevel = *oplock;
else {
rc = add_lease_context(server, iov, &num_iovecs, oplock);
if (rc) {
cifs_small_buf_release(req);
kfree(copy_path);
return rc;
}
lc_buf = iov[num_iovecs-1].iov_base;
}
if (*oplock == SMB2_OPLOCK_LEVEL_BATCH) {
/* need to set Next field of lease context if we request it */
if (server->capabilities & SMB2_GLOBAL_CAP_LEASING) {
struct create_context *ccontext =
(struct create_context *)iov[num_iovecs-1].iov_base;
ccontext->Next =
cpu_to_le32(server->vals->create_lease_size);
}
rc = add_durable_context(iov, &num_iovecs, oparms);
if (rc) {
cifs_small_buf_release(req);
kfree(copy_path);
kfree(lc_buf);
return rc;
}
dhc_buf = iov[num_iovecs-1].iov_base;
}
rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
rsp = (struct smb2_create_rsp *)iov[0].iov_base;
if (rc != 0) {
cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
if (err_buf)
*err_buf = kmemdup(rsp, get_rfc1002_length(rsp) + 4,
GFP_KERNEL);
goto creat_exit;
}
oparms->fid->persistent_fid = rsp->PersistentFileId;
oparms->fid->volatile_fid = rsp->VolatileFileId;
if (buf) {
memcpy(buf, &rsp->CreationTime, 32);
buf->AllocationSize = rsp->AllocationSize;
buf->EndOfFile = rsp->EndofFile;
buf->Attributes = rsp->FileAttributes;
buf->NumberOfLinks = cpu_to_le32(1);
buf->DeletePending = 0;
}
if (rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
*oplock = parse_lease_state(server, rsp, &oparms->fid->epoch);
else
*oplock = rsp->OplockLevel;
creat_exit:
kfree(copy_path);
kfree(lc_buf);
kfree(dhc_buf);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
/*
* SMB2 IOCTL is used for both IOCTLs and FSCTLs
*/
int
SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
u64 volatile_fid, u32 opcode, bool is_fsctl, char *in_data,
u32 indatalen, char **out_data, u32 *plen /* returned data len */)
{
struct smb2_ioctl_req *req;
struct smb2_ioctl_rsp *rsp;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
struct kvec iov[2];
int resp_buftype;
int num_iovecs;
int rc = 0;
cifs_dbg(FYI, "SMB2 IOCTL\n");
if (out_data != NULL)
*out_data = NULL;
/* zero out returned data len, in case of error */
if (plen)
*plen = 0;
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
rc = small_smb2_init(SMB2_IOCTL, tcon, (void **) &req);
if (rc)
return rc;
req->CtlCode = cpu_to_le32(opcode);
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
if (indatalen) {
req->InputCount = cpu_to_le32(indatalen);
/* do not set InputOffset if no input data */
req->InputOffset =
cpu_to_le32(offsetof(struct smb2_ioctl_req, Buffer) - 4);
iov[1].iov_base = in_data;
iov[1].iov_len = indatalen;
num_iovecs = 2;
} else
num_iovecs = 1;
req->OutputOffset = 0;
req->OutputCount = 0; /* MBZ */
/*
* Could increase MaxOutputResponse, but that would require more
* than one credit. Windows typically sets this smaller, but for some
* ioctls it may be useful to allow server to send more. No point
* limiting what the server can send as long as fits in one credit
*/
req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */
if (is_fsctl)
req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
else
req->Flags = 0;
iov[0].iov_base = (char *)req;
/*
* If no input data, the size of ioctl struct in
* protocol spec still includes a 1 byte data buffer,
* but if input data passed to ioctl, we do not
* want to double count this, so we do not send
* the dummy one byte of data in iovec[0] if sending
* input data (in iovec[1]). We also must add 4 bytes
* in first iovec to allow for rfc1002 length field.
*/
if (indatalen) {
iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
inc_rfc1001_len(req, indatalen - 1);
} else
iov[0].iov_len = get_rfc1002_length(req) + 4;
rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buftype, 0);
rsp = (struct smb2_ioctl_rsp *)iov[0].iov_base;
if ((rc != 0) && (rc != -EINVAL)) {
if (tcon)
cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
goto ioctl_exit;
} else if (rc == -EINVAL) {
if ((opcode != FSCTL_SRV_COPYCHUNK_WRITE) &&
(opcode != FSCTL_SRV_COPYCHUNK)) {
if (tcon)
cifs_stats_fail_inc(tcon, SMB2_IOCTL_HE);
goto ioctl_exit;
}
}
/* check if caller wants to look at return data or just return rc */
if ((plen == NULL) || (out_data == NULL))
goto ioctl_exit;
*plen = le32_to_cpu(rsp->OutputCount);
/* We check for obvious errors in the output buffer length and offset */
if (*plen == 0)
goto ioctl_exit; /* server returned no data */
else if (*plen > 0xFF00) {
cifs_dbg(VFS, "srv returned invalid ioctl length: %d\n", *plen);
*plen = 0;
rc = -EIO;
goto ioctl_exit;
}
if (get_rfc1002_length(rsp) < le32_to_cpu(rsp->OutputOffset) + *plen) {
cifs_dbg(VFS, "Malformed ioctl resp: len %d offset %d\n", *plen,
le32_to_cpu(rsp->OutputOffset));
*plen = 0;
rc = -EIO;
goto ioctl_exit;
}
*out_data = kmalloc(*plen, GFP_KERNEL);
if (*out_data == NULL) {
rc = -ENOMEM;
goto ioctl_exit;
}
memcpy(*out_data, rsp->hdr.ProtocolId + le32_to_cpu(rsp->OutputOffset),
*plen);
ioctl_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
}
/*
* Individual callers to ioctl worker function follow
*/
int
SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid)
{
int rc;
struct compress_ioctl fsctl_input;
char *ret_data = NULL;
fsctl_input.CompressionState =
__constant_cpu_to_le16(COMPRESSION_FORMAT_DEFAULT);
rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
FSCTL_SET_COMPRESSION, true /* is_fsctl */,
(char *)&fsctl_input /* data input */,
2 /* in data len */, &ret_data /* out data */, NULL);
cifs_dbg(FYI, "set compression rc %d\n", rc);
return rc;
}
int
SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid)
{
struct smb2_close_req *req;
struct smb2_close_rsp *rsp;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
struct kvec iov[1];
int resp_buftype;
int rc = 0;
cifs_dbg(FYI, "Close\n");
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req);
if (rc)
return rc;
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field */
iov[0].iov_len = get_rfc1002_length(req) + 4;
rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
rsp = (struct smb2_close_rsp *)iov[0].iov_base;
if (rc != 0) {
if (tcon)
cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE);
goto close_exit;
}
/* BB FIXME - decode close response, update inode for caching */
close_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
}
static int
validate_buf(unsigned int offset, unsigned int buffer_length,
struct smb2_hdr *hdr, unsigned int min_buf_size)
{
unsigned int smb_len = be32_to_cpu(hdr->smb2_buf_length);
char *end_of_smb = smb_len + 4 /* RFC1001 length field */ + (char *)hdr;
char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
char *end_of_buf = begin_of_buf + buffer_length;
if (buffer_length < min_buf_size) {
cifs_dbg(VFS, "buffer length %d smaller than minimum size %d\n",
buffer_length, min_buf_size);
return -EINVAL;
}
/* check if beyond RFC1001 maximum length */
if ((smb_len > 0x7FFFFF) || (buffer_length > 0x7FFFFF)) {
cifs_dbg(VFS, "buffer length %d or smb length %d too large\n",
buffer_length, smb_len);
return -EINVAL;
}
if ((begin_of_buf > end_of_smb) || (end_of_buf > end_of_smb)) {
cifs_dbg(VFS, "illegal server response, bad offset to data\n");
return -EINVAL;
}
return 0;
}
/*
* If SMB buffer fields are valid, copy into temporary buffer to hold result.
* Caller must free buffer.
*/
static int
validate_and_copy_buf(unsigned int offset, unsigned int buffer_length,
struct smb2_hdr *hdr, unsigned int minbufsize,
char *data)
{
char *begin_of_buf = 4 /* RFC1001 len field */ + offset + (char *)hdr;
int rc;
if (!data)
return -EINVAL;
rc = validate_buf(offset, buffer_length, hdr, minbufsize);
if (rc)
return rc;
memcpy(data, begin_of_buf, buffer_length);
return 0;
}
static int
query_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u8 info_class,
size_t output_len, size_t min_len, void *data)
{
struct smb2_query_info_req *req;
struct smb2_query_info_rsp *rsp = NULL;
struct kvec iov[2];
int rc = 0;
int resp_buftype;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
cifs_dbg(FYI, "Query Info\n");
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
if (rc)
return rc;
req->InfoType = SMB2_O_INFO_FILE;
req->FileInfoClass = info_class;
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
/* 4 for rfc1002 length field and 1 for Buffer */
req->InputBufferOffset =
cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
req->OutputBufferLength = cpu_to_le32(output_len);
iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field */
iov[0].iov_len = get_rfc1002_length(req) + 4;
rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
rsp = (struct smb2_query_info_rsp *)iov[0].iov_base;
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
goto qinf_exit;
}
rc = validate_and_copy_buf(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength),
&rsp->hdr, min_len, data);
qinf_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
}
int
SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid,
struct smb2_file_all_info *data)
{
return query_info(xid, tcon, persistent_fid, volatile_fid,
FILE_ALL_INFORMATION,
sizeof(struct smb2_file_all_info) + MAX_NAME * 2,
sizeof(struct smb2_file_all_info), data);
}
int
SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, __le64 *uniqueid)
{
return query_info(xid, tcon, persistent_fid, volatile_fid,
FILE_INTERNAL_INFORMATION,
sizeof(struct smb2_file_internal_info),
sizeof(struct smb2_file_internal_info), uniqueid);
}
/*
* This is a no-op for now. We're not really interested in the reply, but
* rather in the fact that the server sent one and that server->lstrp
* gets updated.
*
* FIXME: maybe we should consider checking that the reply matches request?
*/
static void
smb2_echo_callback(struct mid_q_entry *mid)
{
struct TCP_Server_Info *server = mid->callback_data;
struct smb2_echo_rsp *smb2 = (struct smb2_echo_rsp *)mid->resp_buf;
unsigned int credits_received = 1;
if (mid->mid_state == MID_RESPONSE_RECEIVED)
credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
DeleteMidQEntry(mid);
add_credits(server, credits_received, CIFS_ECHO_OP);
}
int
SMB2_echo(struct TCP_Server_Info *server)
{
struct smb2_echo_req *req;
int rc = 0;
struct kvec iov;
struct smb_rqst rqst = { .rq_iov = &iov,
.rq_nvec = 1 };
cifs_dbg(FYI, "In echo request\n");
rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
if (rc)
return rc;
req->hdr.CreditRequest = cpu_to_le16(1);
iov.iov_base = (char *)req;
/* 4 for rfc1002 length field */
iov.iov_len = get_rfc1002_length(req) + 4;
rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, server,
CIFS_ECHO_OP);
if (rc)
cifs_dbg(FYI, "Echo request failed: %d\n", rc);
cifs_small_buf_release(req);
return rc;
}
int
SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
u64 volatile_fid)
{
struct smb2_flush_req *req;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
struct kvec iov[1];
int resp_buftype;
int rc = 0;
cifs_dbg(FYI, "Flush\n");
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req);
if (rc)
return rc;
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field */
iov[0].iov_len = get_rfc1002_length(req) + 4;
rc = SendReceive2(xid, ses, iov, 1, &resp_buftype, 0);
if ((rc != 0) && tcon)
cifs_stats_fail_inc(tcon, SMB2_FLUSH_HE);
free_rsp_buf(resp_buftype, iov[0].iov_base);
return rc;
}
/*
* To form a chain of read requests, any read requests after the first should
* have the end_of_chain boolean set to true.
*/
static int
smb2_new_read_req(struct kvec *iov, struct cifs_io_parms *io_parms,
unsigned int remaining_bytes, int request_type)
{
int rc = -EACCES;
struct smb2_read_req *req = NULL;
rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req);
if (rc)
return rc;
if (io_parms->tcon->ses->server == NULL)
return -ECONNABORTED;
req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
req->PersistentFileId = io_parms->persistent_fid;
req->VolatileFileId = io_parms->volatile_fid;
req->ReadChannelInfoOffset = 0; /* reserved */
req->ReadChannelInfoLength = 0; /* reserved */
req->Channel = 0; /* reserved */
req->MinimumCount = 0;
req->Length = cpu_to_le32(io_parms->length);
req->Offset = cpu_to_le64(io_parms->offset);
if (request_type & CHAINED_REQUEST) {
if (!(request_type & END_OF_CHAIN)) {
/* 4 for rfc1002 length field */
req->hdr.NextCommand =
cpu_to_le32(get_rfc1002_length(req) + 4);
} else /* END_OF_CHAIN */
req->hdr.NextCommand = 0;
if (request_type & RELATED_REQUEST) {
req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS;
/*
* Related requests use info from previous read request
* in chain.
*/
req->hdr.SessionId = 0xFFFFFFFF;
req->hdr.TreeId = 0xFFFFFFFF;
req->PersistentFileId = 0xFFFFFFFF;
req->VolatileFileId = 0xFFFFFFFF;
}
}
if (remaining_bytes > io_parms->length)
req->RemainingBytes = cpu_to_le32(remaining_bytes);
else
req->RemainingBytes = 0;
iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field */
iov[0].iov_len = get_rfc1002_length(req) + 4;
return rc;
}
static void
smb2_readv_callback(struct mid_q_entry *mid)
{
struct cifs_readdata *rdata = mid->callback_data;
struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
struct smb2_hdr *buf = (struct smb2_hdr *)rdata->iov.iov_base;
unsigned int credits_received = 1;
struct smb_rqst rqst = { .rq_iov = &rdata->iov,
.rq_nvec = 1,
.rq_pages = rdata->pages,
.rq_npages = rdata->nr_pages,
.rq_pagesz = rdata->pagesz,
.rq_tailsz = rdata->tailsz };
cifs_dbg(FYI, "%s: mid=%llu state=%d result=%d bytes=%u\n",
__func__, mid->mid, mid->mid_state, rdata->result,
rdata->bytes);
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
credits_received = le16_to_cpu(buf->CreditRequest);
/* result already set, check signature */
if (server->sign) {
int rc;
rc = smb2_verify_signature(&rqst, server);
if (rc)
cifs_dbg(VFS, "SMB signature verification returned error = %d\n",
rc);
}
/* FIXME: should this be counted toward the initiating task? */
task_io_account_read(rdata->got_bytes);
cifs_stats_bytes_read(tcon, rdata->got_bytes);
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
rdata->result = -EAGAIN;
if (server->sign && rdata->got_bytes)
/* reset bytes number since we can not check a sign */
rdata->got_bytes = 0;
/* FIXME: should this be counted toward the initiating task? */
task_io_account_read(rdata->got_bytes);
cifs_stats_bytes_read(tcon, rdata->got_bytes);
break;
default:
if (rdata->result != -ENODATA)
rdata->result = -EIO;
}
if (rdata->result)
cifs_stats_fail_inc(tcon, SMB2_READ_HE);
queue_work(cifsiod_wq, &rdata->work);
DeleteMidQEntry(mid);
add_credits(server, credits_received, 0);
}
/* smb2_async_readv - send an async write, and set up mid to handle result */
int
smb2_async_readv(struct cifs_readdata *rdata)
{
int rc, flags = 0;
struct smb2_hdr *buf;
struct cifs_io_parms io_parms;
struct smb_rqst rqst = { .rq_iov = &rdata->iov,
.rq_nvec = 1 };
struct TCP_Server_Info *server;
cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
__func__, rdata->offset, rdata->bytes);
io_parms.tcon = tlink_tcon(rdata->cfile->tlink);
io_parms.offset = rdata->offset;
io_parms.length = rdata->bytes;
io_parms.persistent_fid = rdata->cfile->fid.persistent_fid;
io_parms.volatile_fid = rdata->cfile->fid.volatile_fid;
io_parms.pid = rdata->pid;
server = io_parms.tcon->ses->server;
rc = smb2_new_read_req(&rdata->iov, &io_parms, 0, 0);
if (rc) {
if (rc == -EAGAIN && rdata->credits) {
/* credits was reset by reconnect */
rdata->credits = 0;
/* reduce in_flight value since we won't send the req */
spin_lock(&server->req_lock);
server->in_flight--;
spin_unlock(&server->req_lock);
}
return rc;
}
buf = (struct smb2_hdr *)rdata->iov.iov_base;
/* 4 for rfc1002 length field */
rdata->iov.iov_len = get_rfc1002_length(rdata->iov.iov_base) + 4;
if (rdata->credits) {
buf->CreditCharge = cpu_to_le16(DIV_ROUND_UP(rdata->bytes,
SMB2_MAX_BUFFER_SIZE));
spin_lock(&server->req_lock);
server->credits += rdata->credits -
le16_to_cpu(buf->CreditCharge);
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
flags = CIFS_HAS_CREDITS;
}
kref_get(&rdata->refcount);
rc = cifs_call_async(io_parms.tcon->ses->server, &rqst,
cifs_readv_receive, smb2_readv_callback,
rdata, flags);
if (rc) {
kref_put(&rdata->refcount, cifs_readdata_release);
cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE);
}
cifs_small_buf_release(buf);
return rc;
}
int
SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, char **buf, int *buf_type)
{
int resp_buftype, rc = -EACCES;
struct smb2_read_rsp *rsp = NULL;
struct kvec iov[1];
*nbytes = 0;
rc = smb2_new_read_req(iov, io_parms, 0, 0);
if (rc)
return rc;
rc = SendReceive2(xid, io_parms->tcon->ses, iov, 1,
&resp_buftype, CIFS_LOG_ERROR);
rsp = (struct smb2_read_rsp *)iov[0].iov_base;
if (rsp->hdr.Status == STATUS_END_OF_FILE) {
free_rsp_buf(resp_buftype, iov[0].iov_base);
return 0;
}
if (rc) {
cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE);
cifs_dbg(VFS, "Send error in read = %d\n", rc);
} else {
*nbytes = le32_to_cpu(rsp->DataLength);
if ((*nbytes > CIFS_MAX_MSGSIZE) ||
(*nbytes > io_parms->length)) {
cifs_dbg(FYI, "bad length %d for count %d\n",
*nbytes, io_parms->length);
rc = -EIO;
*nbytes = 0;
}
}
if (*buf) {
memcpy(*buf, (char *)rsp->hdr.ProtocolId + rsp->DataOffset,
*nbytes);
free_rsp_buf(resp_buftype, iov[0].iov_base);
} else if (resp_buftype != CIFS_NO_BUFFER) {
*buf = iov[0].iov_base;
if (resp_buftype == CIFS_SMALL_BUFFER)
*buf_type = CIFS_SMALL_BUFFER;
else if (resp_buftype == CIFS_LARGE_BUFFER)
*buf_type = CIFS_LARGE_BUFFER;
}
return rc;
}
/*
* Check the mid_state and signature on received buffer (if any), and queue the
* workqueue completion task.
*/
static void
smb2_writev_callback(struct mid_q_entry *mid)
{
struct cifs_writedata *wdata = mid->callback_data;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
unsigned int written;
struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
unsigned int credits_received = 1;
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
credits_received = le16_to_cpu(rsp->hdr.CreditRequest);
wdata->result = smb2_check_receive(mid, tcon->ses->server, 0);
if (wdata->result != 0)
break;
written = le32_to_cpu(rsp->DataLength);
/*
* Mask off high 16 bits when bytes written as returned
* by the server is greater than bytes requested by the
* client. OS/2 servers are known to set incorrect
* CountHigh values.
*/
if (written > wdata->bytes)
written &= 0xFFFF;
if (written < wdata->bytes)
wdata->result = -ENOSPC;
else
wdata->bytes = written;
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
wdata->result = -EAGAIN;
break;
default:
wdata->result = -EIO;
break;
}
if (wdata->result)
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
queue_work(cifsiod_wq, &wdata->work);
DeleteMidQEntry(mid);
add_credits(tcon->ses->server, credits_received, 0);
}
/* smb2_async_writev - send an async write, and set up mid to handle result */
int
smb2_async_writev(struct cifs_writedata *wdata,
void (*release)(struct kref *kref))
{
int rc = -EACCES, flags = 0;
struct smb2_write_req *req = NULL;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
struct kvec iov;
struct smb_rqst rqst;
rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req);
if (rc) {
if (rc == -EAGAIN && wdata->credits) {
/* credits was reset by reconnect */
wdata->credits = 0;
/* reduce in_flight value since we won't send the req */
spin_lock(&server->req_lock);
server->in_flight--;
spin_unlock(&server->req_lock);
}
goto async_writev_out;
}
req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid);
req->PersistentFileId = wdata->cfile->fid.persistent_fid;
req->VolatileFileId = wdata->cfile->fid.volatile_fid;
req->WriteChannelInfoOffset = 0;
req->WriteChannelInfoLength = 0;
req->Channel = 0;
req->Offset = cpu_to_le64(wdata->offset);
/* 4 for rfc1002 length field */
req->DataOffset = cpu_to_le16(
offsetof(struct smb2_write_req, Buffer) - 4);
req->RemainingBytes = 0;
/* 4 for rfc1002 length field and 1 for Buffer */
iov.iov_len = get_rfc1002_length(req) + 4 - 1;
iov.iov_base = req;
rqst.rq_iov = &iov;
rqst.rq_nvec = 1;
rqst.rq_pages = wdata->pages;
rqst.rq_npages = wdata->nr_pages;
rqst.rq_pagesz = wdata->pagesz;
rqst.rq_tailsz = wdata->tailsz;
cifs_dbg(FYI, "async write at %llu %u bytes\n",
wdata->offset, wdata->bytes);
req->Length = cpu_to_le32(wdata->bytes);
inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */);
if (wdata->credits) {
req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes,
SMB2_MAX_BUFFER_SIZE));
spin_lock(&server->req_lock);
server->credits += wdata->credits -
le16_to_cpu(req->hdr.CreditCharge);
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
flags = CIFS_HAS_CREDITS;
}
kref_get(&wdata->refcount);
rc = cifs_call_async(server, &rqst, NULL, smb2_writev_callback, wdata,
flags);
if (rc) {
kref_put(&wdata->refcount, release);
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
}
async_writev_out:
cifs_small_buf_release(req);
return rc;
}
/*
* SMB2_write function gets iov pointer to kvec array with n_vec as a length.
* The length field from io_parms must be at least 1 and indicates a number of
* elements with data to write that begins with position 1 in iov array. All
* data length is specified by count.
*/
int
SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, struct kvec *iov, int n_vec)
{
int rc = 0;
struct smb2_write_req *req = NULL;
struct smb2_write_rsp *rsp = NULL;
int resp_buftype;
*nbytes = 0;
if (n_vec < 1)
return rc;
rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req);
if (rc)
return rc;
if (io_parms->tcon->ses->server == NULL)
return -ECONNABORTED;
req->hdr.ProcessId = cpu_to_le32(io_parms->pid);
req->PersistentFileId = io_parms->persistent_fid;
req->VolatileFileId = io_parms->volatile_fid;
req->WriteChannelInfoOffset = 0;
req->WriteChannelInfoLength = 0;
req->Channel = 0;
req->Length = cpu_to_le32(io_parms->length);
req->Offset = cpu_to_le64(io_parms->offset);
/* 4 for rfc1002 length field */
req->DataOffset = cpu_to_le16(
offsetof(struct smb2_write_req, Buffer) - 4);
req->RemainingBytes = 0;
iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field and 1 for Buffer */
iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
/* length of entire message including data to be written */
inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */);
rc = SendReceive2(xid, io_parms->tcon->ses, iov, n_vec + 1,
&resp_buftype, 0);
rsp = (struct smb2_write_rsp *)iov[0].iov_base;
if (rc) {
cifs_stats_fail_inc(io_parms->tcon, SMB2_WRITE_HE);
cifs_dbg(VFS, "Send error in write = %d\n", rc);
} else
*nbytes = le32_to_cpu(rsp->DataLength);
free_rsp_buf(resp_buftype, rsp);
return rc;
}
static unsigned int
num_entries(char *bufstart, char *end_of_buf, char **lastentry, size_t size)
{
int len;
unsigned int entrycount = 0;
unsigned int next_offset = 0;
FILE_DIRECTORY_INFO *entryptr;
if (bufstart == NULL)
return 0;
entryptr = (FILE_DIRECTORY_INFO *)bufstart;
while (1) {
entryptr = (FILE_DIRECTORY_INFO *)
((char *)entryptr + next_offset);
if ((char *)entryptr + size > end_of_buf) {
cifs_dbg(VFS, "malformed search entry would overflow\n");
break;
}
len = le32_to_cpu(entryptr->FileNameLength);
if ((char *)entryptr + len + size > end_of_buf) {
cifs_dbg(VFS, "directory entry name would overflow frame end of buf %p\n",
end_of_buf);
break;
}
*lastentry = (char *)entryptr;
entrycount++;
next_offset = le32_to_cpu(entryptr->NextEntryOffset);
if (!next_offset)
break;
}
return entrycount;
}
/*
* Readdir/FindFirst
*/
int
SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf)
{
struct smb2_query_directory_req *req;
struct smb2_query_directory_rsp *rsp = NULL;
struct kvec iov[2];
int rc = 0;
int len;
int resp_buftype;
unsigned char *bufptr;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
__le16 asteriks = cpu_to_le16('*');
char *end_of_smb;
unsigned int output_size = CIFSMaxBufSize;
size_t info_buf_size;
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req);
if (rc)
return rc;
switch (srch_inf->info_level) {
case SMB_FIND_FILE_DIRECTORY_INFO:
req->FileInformationClass = FILE_DIRECTORY_INFORMATION;
info_buf_size = sizeof(FILE_DIRECTORY_INFO) - 1;
break;
case SMB_FIND_FILE_ID_FULL_DIR_INFO:
req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION;
info_buf_size = sizeof(SEARCH_ID_FULL_DIR_INFO) - 1;
break;
default:
cifs_dbg(VFS, "info level %u isn't supported\n",
srch_inf->info_level);
rc = -EINVAL;
goto qdir_exit;
}
req->FileIndex = cpu_to_le32(index);
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
len = 0x2;
bufptr = req->Buffer;
memcpy(bufptr, &asteriks, len);
req->FileNameOffset =
cpu_to_le16(sizeof(struct smb2_query_directory_req) - 1 - 4);
req->FileNameLength = cpu_to_le16(len);
/*
* BB could be 30 bytes or so longer if we used SMB2 specific
* buffer lengths, but this is safe and close enough.
*/
output_size = min_t(unsigned int, output_size, server->maxBuf);
output_size = min_t(unsigned int, output_size, 2 << 15);
req->OutputBufferLength = cpu_to_le32(output_size);
iov[0].iov_base = (char *)req;
/* 4 for RFC1001 length and 1 for Buffer */
iov[0].iov_len = get_rfc1002_length(req) + 4 - 1;
iov[1].iov_base = (char *)(req->Buffer);
iov[1].iov_len = len;
inc_rfc1001_len(req, len - 1 /* Buffer */);
rc = SendReceive2(xid, ses, iov, 2, &resp_buftype, 0);
rsp = (struct smb2_query_directory_rsp *)iov[0].iov_base;
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE);
goto qdir_exit;
}
rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
info_buf_size);
if (rc)
goto qdir_exit;
srch_inf->unicode = true;
if (srch_inf->ntwrk_buf_start) {
if (srch_inf->smallBuf)
cifs_small_buf_release(srch_inf->ntwrk_buf_start);
else
cifs_buf_release(srch_inf->ntwrk_buf_start);
}
srch_inf->ntwrk_buf_start = (char *)rsp;
srch_inf->srch_entries_start = srch_inf->last_entry = 4 /* rfclen */ +
(char *)&rsp->hdr + le16_to_cpu(rsp->OutputBufferOffset);
/* 4 for rfc1002 length field */
end_of_smb = get_rfc1002_length(rsp) + 4 + (char *)&rsp->hdr;
srch_inf->entries_in_buffer =
num_entries(srch_inf->srch_entries_start, end_of_smb,
&srch_inf->last_entry, info_buf_size);
srch_inf->index_of_last_entry += srch_inf->entries_in_buffer;
cifs_dbg(FYI, "num entries %d last_index %lld srch start %p srch end %p\n",
srch_inf->entries_in_buffer, srch_inf->index_of_last_entry,
srch_inf->srch_entries_start, srch_inf->last_entry);
if (resp_buftype == CIFS_LARGE_BUFFER)
srch_inf->smallBuf = false;
else if (resp_buftype == CIFS_SMALL_BUFFER)
srch_inf->smallBuf = true;
else
cifs_dbg(VFS, "illegal search buffer type\n");
if (rsp->hdr.Status == STATUS_NO_MORE_FILES)
srch_inf->endOfSearch = 1;
else
srch_inf->endOfSearch = 0;
return rc;
qdir_exit:
free_rsp_buf(resp_buftype, rsp);
return rc;
}
static int
send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u32 pid, int info_class,
unsigned int num, void **data, unsigned int *size)
{
struct smb2_set_info_req *req;
struct smb2_set_info_rsp *rsp = NULL;
struct kvec *iov;
int rc = 0;
int resp_buftype;
unsigned int i;
struct TCP_Server_Info *server;
struct cifs_ses *ses = tcon->ses;
if (ses && (ses->server))
server = ses->server;
else
return -EIO;
if (!num)
return -EINVAL;
iov = kmalloc(sizeof(struct kvec) * num, GFP_KERNEL);
if (!iov)
return -ENOMEM;
rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req);
if (rc) {
kfree(iov);
return rc;
}
req->hdr.ProcessId = cpu_to_le32(pid);
req->InfoType = SMB2_O_INFO_FILE;
req->FileInfoClass = info_class;
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
/* 4 for RFC1001 length and 1 for Buffer */
req->BufferOffset =
cpu_to_le16(sizeof(struct smb2_set_info_req) - 1 - 4);
req->BufferLength = cpu_to_le32(*size);
inc_rfc1001_len(req, *size - 1 /* Buffer */);
memcpy(req->Buffer, *data, *size);
iov[0].iov_base = (char *)req;
/* 4 for RFC1001 length */
iov[0].iov_len = get_rfc1002_length(req) + 4;
for (i = 1; i < num; i++) {
inc_rfc1001_len(req, size[i]);
le32_add_cpu(&req->BufferLength, size[i]);
iov[i].iov_base = (char *)data[i];
iov[i].iov_len = size[i];
}
rc = SendReceive2(xid, ses, iov, num, &resp_buftype, 0);
rsp = (struct smb2_set_info_rsp *)iov[0].iov_base;
if (rc != 0)
cifs_stats_fail_inc(tcon, SMB2_SET_INFO_HE);
free_rsp_buf(resp_buftype, rsp);
kfree(iov);
return rc;
}
int
SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
{
struct smb2_file_rename_info info;
void **data;
unsigned int size[2];
int rc;
int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
if (!data)
return -ENOMEM;
info.ReplaceIfExists = 1; /* 1 = replace existing target with new */
/* 0 = fail if target already exists */
info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
info.FileNameLength = cpu_to_le32(len);
data[0] = &info;
size[0] = sizeof(struct smb2_file_rename_info);
data[1] = target_file;
size[1] = len + 2 /* null */;
rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
current->tgid, FILE_RENAME_INFORMATION, 2, data,
size);
kfree(data);
return rc;
}
int
SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, __le16 *target_file)
{
struct smb2_file_link_info info;
void **data;
unsigned int size[2];
int rc;
int len = (2 * UniStrnlen((wchar_t *)target_file, PATH_MAX));
data = kmalloc(sizeof(void *) * 2, GFP_KERNEL);
if (!data)
return -ENOMEM;
info.ReplaceIfExists = 0; /* 1 = replace existing link with new */
/* 0 = fail if link already exists */
info.RootDirectory = 0; /* MBZ for network ops (why does spec say?) */
info.FileNameLength = cpu_to_le32(len);
data[0] = &info;
size[0] = sizeof(struct smb2_file_link_info);
data[1] = target_file;
size[1] = len + 2 /* null */;
rc = send_set_info(xid, tcon, persistent_fid, volatile_fid,
current->tgid, FILE_LINK_INFORMATION, 2, data, size);
kfree(data);
return rc;
}
int
SMB2_set_eof(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
u64 volatile_fid, u32 pid, __le64 *eof, bool is_falloc)
{
struct smb2_file_eof_info info;
void *data;
unsigned int size;
info.EndOfFile = *eof;
data = &info;
size = sizeof(struct smb2_file_eof_info);
if (is_falloc)
return send_set_info(xid, tcon, persistent_fid, volatile_fid,
pid, FILE_ALLOCATION_INFORMATION, 1, &data, &size);
else
return send_set_info(xid, tcon, persistent_fid, volatile_fid,
pid, FILE_END_OF_FILE_INFORMATION, 1, &data, &size);
}
int
SMB2_set_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, FILE_BASIC_INFO *buf)
{
unsigned int size;
size = sizeof(FILE_BASIC_INFO);
return send_set_info(xid, tcon, persistent_fid, volatile_fid,
current->tgid, FILE_BASIC_INFORMATION, 1,
(void **)&buf, &size);
}
int
SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
const u64 persistent_fid, const u64 volatile_fid,
__u8 oplock_level)
{
int rc;
struct smb2_oplock_break *req = NULL;
cifs_dbg(FYI, "SMB2_oplock_break\n");
rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
if (rc)
return rc;
req->VolatileFid = volatile_fid;
req->PersistentFid = persistent_fid;
req->OplockLevel = oplock_level;
req->hdr.CreditRequest = cpu_to_le16(1);
rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
/* SMB2 buffer freed by function above */
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
}
return rc;
}
static void
copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
struct kstatfs *kst)
{
kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
return;
}
static int
build_qfs_info_req(struct kvec *iov, struct cifs_tcon *tcon, int level,
int outbuf_len, u64 persistent_fid, u64 volatile_fid)
{
int rc;
struct smb2_query_info_req *req;
cifs_dbg(FYI, "Query FSInfo level %d\n", level);
if ((tcon->ses == NULL) || (tcon->ses->server == NULL))
return -EIO;
rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req);
if (rc)
return rc;
req->InfoType = SMB2_O_INFO_FILESYSTEM;
req->FileInfoClass = level;
req->PersistentFileId = persistent_fid;
req->VolatileFileId = volatile_fid;
/* 4 for rfc1002 length field and 1 for pad */
req->InputBufferOffset =
cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4);
req->OutputBufferLength = cpu_to_le32(
outbuf_len + sizeof(struct smb2_query_info_rsp) - 1 - 4);
iov->iov_base = (char *)req;
/* 4 for rfc1002 length field */
iov->iov_len = get_rfc1002_length(req) + 4;
return 0;
}
int
SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
{
struct smb2_query_info_rsp *rsp = NULL;
struct kvec iov;
int rc = 0;
int resp_buftype;
struct cifs_ses *ses = tcon->ses;
struct smb2_fs_full_size_info *info = NULL;
rc = build_qfs_info_req(&iov, tcon, FS_FULL_SIZE_INFORMATION,
sizeof(struct smb2_fs_full_size_info),
persistent_fid, volatile_fid);
if (rc)
return rc;
rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
goto qfsinf_exit;
}
rsp = (struct smb2_query_info_rsp *)iov.iov_base;
info = (struct smb2_fs_full_size_info *)(4 /* RFC1001 len */ +
le16_to_cpu(rsp->OutputBufferOffset) + (char *)&rsp->hdr);
rc = validate_buf(le16_to_cpu(rsp->OutputBufferOffset),
le32_to_cpu(rsp->OutputBufferLength), &rsp->hdr,
sizeof(struct smb2_fs_full_size_info));
if (!rc)
copy_fs_info_to_kstatfs(info, fsdata);
qfsinf_exit:
free_rsp_buf(resp_buftype, iov.iov_base);
return rc;
}
int
SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int level)
{
struct smb2_query_info_rsp *rsp = NULL;
struct kvec iov;
int rc = 0;
int resp_buftype, max_len, min_len;
struct cifs_ses *ses = tcon->ses;
unsigned int rsp_len, offset;
if (level == FS_DEVICE_INFORMATION) {
max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
min_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
} else if (level == FS_ATTRIBUTE_INFORMATION) {
max_len = sizeof(FILE_SYSTEM_ATTRIBUTE_INFO);
min_len = MIN_FS_ATTR_INFO_SIZE;
} else if (level == FS_SECTOR_SIZE_INFORMATION) {
max_len = sizeof(struct smb3_fs_ss_info);
min_len = sizeof(struct smb3_fs_ss_info);
} else {
cifs_dbg(FYI, "Invalid qfsinfo level %d\n", level);
return -EINVAL;
}
rc = build_qfs_info_req(&iov, tcon, level, max_len,
persistent_fid, volatile_fid);
if (rc)
return rc;
rc = SendReceive2(xid, ses, &iov, 1, &resp_buftype, 0);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
goto qfsattr_exit;
}
rsp = (struct smb2_query_info_rsp *)iov.iov_base;
rsp_len = le32_to_cpu(rsp->OutputBufferLength);
offset = le16_to_cpu(rsp->OutputBufferOffset);
rc = validate_buf(offset, rsp_len, &rsp->hdr, min_len);
if (rc)
goto qfsattr_exit;
if (level == FS_ATTRIBUTE_INFORMATION)
memcpy(&tcon->fsAttrInfo, 4 /* RFC1001 len */ + offset
+ (char *)&rsp->hdr, min_t(unsigned int,
rsp_len, max_len));
else if (level == FS_DEVICE_INFORMATION)
memcpy(&tcon->fsDevInfo, 4 /* RFC1001 len */ + offset
+ (char *)&rsp->hdr, sizeof(FILE_SYSTEM_DEVICE_INFO));
else if (level == FS_SECTOR_SIZE_INFORMATION) {
struct smb3_fs_ss_info *ss_info = (struct smb3_fs_ss_info *)
(4 /* RFC1001 len */ + offset + (char *)&rsp->hdr);
tcon->ss_flags = le32_to_cpu(ss_info->Flags);
tcon->perf_sector_size =
le32_to_cpu(ss_info->PhysicalBytesPerSectorForPerf);
}
qfsattr_exit:
free_rsp_buf(resp_buftype, iov.iov_base);
return rc;
}
int
smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
const __u32 num_lock, struct smb2_lock_element *buf)
{
int rc = 0;
struct smb2_lock_req *req = NULL;
struct kvec iov[2];
int resp_buf_type;
unsigned int count;
cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req);
if (rc)
return rc;
req->hdr.ProcessId = cpu_to_le32(pid);
req->LockCount = cpu_to_le16(num_lock);
req->PersistentFileId = persist_fid;
req->VolatileFileId = volatile_fid;
count = num_lock * sizeof(struct smb2_lock_element);
inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element));
iov[0].iov_base = (char *)req;
/* 4 for rfc1002 length field and count for all locks */
iov[0].iov_len = get_rfc1002_length(req) + 4 - count;
iov[1].iov_base = (char *)buf;
iov[1].iov_len = count;
cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
if (rc) {
cifs_dbg(FYI, "Send error in smb2_lockv = %d\n", rc);
cifs_stats_fail_inc(tcon, SMB2_LOCK_HE);
}
return rc;
}
int
SMB2_lock(const unsigned int xid, struct cifs_tcon *tcon,
const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
const __u64 length, const __u64 offset, const __u32 lock_flags,
const bool wait)
{
struct smb2_lock_element lock;
lock.Offset = cpu_to_le64(offset);
lock.Length = cpu_to_le64(length);
lock.Flags = cpu_to_le32(lock_flags);
if (!wait && lock_flags != SMB2_LOCKFLAG_UNLOCK)
lock.Flags |= cpu_to_le32(SMB2_LOCKFLAG_FAIL_IMMEDIATELY);
return smb2_lockv(xid, tcon, persist_fid, volatile_fid, pid, 1, &lock);
}
int
SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
__u8 *lease_key, const __le32 lease_state)
{
int rc;
struct smb2_lease_ack *req = NULL;
cifs_dbg(FYI, "SMB2_lease_break\n");
rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req);
if (rc)
return rc;
req->hdr.CreditRequest = cpu_to_le16(1);
req->StructureSize = cpu_to_le16(36);
inc_rfc1001_len(req, 12);
memcpy(req->LeaseKey, lease_key, 16);
req->LeaseState = lease_state;
rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP);
/* SMB2 buffer freed by function above */
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
}
return rc;
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_2275_0 |
crossvul-cpp_data_bad_4815_1 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA PPPP TTTTT IIIII OOO N N %
% C A A P P T I O O NN N %
% C AAAAA PPPP T I O O N N N %
% C A A P T I O O N NN %
% CCCC A A P T IIIII OOO N N %
% %
% %
% Read Text Caption. %
% %
% Software Design %
% Cristy %
% February 2002 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/annotate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/composite-private.h"
#include "magick/draw.h"
#include "magick/draw-private.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/option.h"
#include "magick/pixel-accessor.h"
#include "magick/property.h"
#include "magick/quantum-private.h"
#include "magick/static.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/utility.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e a d C A P T I O N I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadCAPTIONImage() reads a CAPTION image file and returns it. It
% allocates the memory necessary for the new Image structure and returns a
% pointer to the new image.
%
% The format of the ReadCAPTIONImage method is:
%
% Image *ReadCAPTIONImage(const ImageInfo *image_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Image *ReadCAPTIONImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
char
*caption,
geometry[MaxTextExtent],
*property,
*text;
const char
*gravity,
*option;
DrawInfo
*draw_info;
Image
*image;
MagickBooleanType
split,
status;
register ssize_t
i;
size_t
height,
width;
TypeMetric
metrics;
/*
Initialize Image structure.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=AcquireImage(image_info);
(void) ResetImagePage(image,"0x0+0+0");
/*
Format caption.
*/
option=GetImageOption(image_info,"filename");
if (option == (const char *) NULL)
property=InterpretImageProperties(image_info,image,image_info->filename);
else
if (LocaleNCompare(option,"caption:",8) == 0)
property=InterpretImageProperties(image_info,image,option+8);
else
property=InterpretImageProperties(image_info,image,option);
(void) SetImageProperty(image,"caption",property);
property=DestroyString(property);
caption=ConstantString(GetImageProperty(image,"caption"));
draw_info=CloneDrawInfo(image_info,(DrawInfo *) NULL);
(void) CloneString(&draw_info->text,caption);
gravity=GetImageOption(image_info,"gravity");
if (gravity != (char *) NULL)
draw_info->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,gravity);
split=MagickFalse;
status=MagickTrue;
if (image->columns == 0)
{
text=AcquireString(caption);
i=FormatMagickCaption(image,draw_info,split,&metrics,&text);
(void) CloneString(&draw_info->text,text);
text=DestroyString(text);
(void) FormatLocaleString(geometry,MaxTextExtent,"%+g%+g",
-metrics.bounds.x1,metrics.ascent);
if (draw_info->gravity == UndefinedGravity)
(void) CloneString(&draw_info->geometry,geometry);
status=GetMultilineTypeMetrics(image,draw_info,&metrics);
width=(size_t) floor(metrics.width+draw_info->stroke_width+0.5);
image->columns=width;
}
if (image->rows == 0)
{
split=MagickTrue;
text=AcquireString(caption);
i=FormatMagickCaption(image,draw_info,split,&metrics,&text);
(void) CloneString(&draw_info->text,text);
text=DestroyString(text);
(void) FormatLocaleString(geometry,MaxTextExtent,"%+g%+g",
-metrics.bounds.x1,metrics.ascent);
if (draw_info->gravity == UndefinedGravity)
(void) CloneString(&draw_info->geometry,geometry);
status=GetMultilineTypeMetrics(image,draw_info,&metrics);
image->rows=(size_t) ((i+1)*(metrics.ascent-metrics.descent+
draw_info->interline_spacing+draw_info->stroke_width)+0.5);
}
if (status != MagickFalse)
status=SetImageExtent(image,image->columns,image->rows);
if (status == MagickFalse)
{
draw_info=DestroyDrawInfo(draw_info);
InheritException(exception,&image->exception);
return(DestroyImageList(image));
}
if (SetImageBackgroundColor(image) == MagickFalse)
{
draw_info=DestroyDrawInfo(draw_info);
InheritException(exception,&image->exception);
image=DestroyImageList(image);
return((Image *) NULL);
}
if ((fabs(image_info->pointsize) < MagickEpsilon) && (strlen(caption) > 0))
{
double
high,
low;
/*
Auto fit text into bounding box.
*/
for ( ; ; draw_info->pointsize*=2.0)
{
text=AcquireString(caption);
i=FormatMagickCaption(image,draw_info,split,&metrics,&text);
(void) CloneString(&draw_info->text,text);
text=DestroyString(text);
(void) FormatLocaleString(geometry,MaxTextExtent,"%+g%+g",
-metrics.bounds.x1,metrics.ascent);
if (draw_info->gravity == UndefinedGravity)
(void) CloneString(&draw_info->geometry,geometry);
status=GetMultilineTypeMetrics(image,draw_info,&metrics);
(void) status;
width=(size_t) floor(metrics.width+draw_info->stroke_width+0.5);
height=(size_t) floor(metrics.height+draw_info->stroke_width+0.5);
if ((image->columns != 0) && (image->rows != 0))
{
if ((width >= image->columns) && (height >= image->rows))
break;
}
else
if (((image->columns != 0) && (width >= image->columns)) ||
((image->rows != 0) && (height >= image->rows)))
break;
}
high=draw_info->pointsize;
for (low=1.0; (high-low) > 0.5; )
{
draw_info->pointsize=(low+high)/2.0;
text=AcquireString(caption);
i=FormatMagickCaption(image,draw_info,split,&metrics,&text);
(void) CloneString(&draw_info->text,text);
text=DestroyString(text);
(void) FormatLocaleString(geometry,MaxTextExtent,"%+g%+g",
-metrics.bounds.x1,metrics.ascent);
if (draw_info->gravity == UndefinedGravity)
(void) CloneString(&draw_info->geometry,geometry);
(void) GetMultilineTypeMetrics(image,draw_info,&metrics);
width=(size_t) floor(metrics.width+draw_info->stroke_width+0.5);
height=(size_t) floor(metrics.height+draw_info->stroke_width+0.5);
if ((image->columns != 0) && (image->rows != 0))
{
if ((width < image->columns) && (height < image->rows))
low=draw_info->pointsize+0.5;
else
high=draw_info->pointsize-0.5;
}
else
if (((image->columns != 0) && (width < image->columns)) ||
((image->rows != 0) && (height < image->rows)))
low=draw_info->pointsize+0.5;
else
high=draw_info->pointsize-0.5;
}
draw_info->pointsize=floor((low+high)/2.0-0.5);
}
/*
Draw caption.
*/
i=FormatMagickCaption(image,draw_info,split,&metrics,&caption);
(void) CloneString(&draw_info->text,caption);
(void) FormatLocaleString(geometry,MaxTextExtent,"%+g%+g",MagickMax(
draw_info->direction == RightToLeftDirection ? image->columns-
metrics.bounds.x2 : -metrics.bounds.x1,0.0),draw_info->gravity ==
UndefinedGravity ? metrics.ascent : 0.0);
draw_info->geometry=AcquireString(geometry);
status=AnnotateImage(image,draw_info);
if (image_info->pointsize == 0.0)
{
char
pointsize[MaxTextExtent];
(void) FormatLocaleString(pointsize,MaxTextExtent,"%.20g",
draw_info->pointsize);
(void) SetImageProperty(image,"caption:pointsize",pointsize);
}
draw_info=DestroyDrawInfo(draw_info);
caption=DestroyString(caption);
if (status == MagickFalse)
{
image=DestroyImageList(image);
return((Image *) NULL);
}
return(GetFirstImageInList(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e g i s t e r C A P T I O N I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RegisterCAPTIONImage() adds attributes for the CAPTION image format to
% the list of supported formats. The attributes include the image format
% tag, a method to read and/or write the format, whether the format
% supports the saving of more than one frame to the same file or blob,
% whether the format supports native in-memory I/O, and a brief
% description of the format.
%
% The format of the RegisterCAPTIONImage method is:
%
% size_t RegisterCAPTIONImage(void)
%
*/
ModuleExport size_t RegisterCAPTIONImage(void)
{
MagickInfo
*entry;
entry=SetMagickInfo("CAPTION");
entry->decoder=(DecodeImageHandler *) ReadCAPTIONImage;
entry->description=ConstantString("Caption");
entry->adjoin=MagickFalse;
entry->module=ConstantString("CAPTION");
(void) RegisterMagickInfo(entry);
return(MagickImageCoderSignature);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n r e g i s t e r C A P T I O N I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnregisterCAPTIONImage() removes format registrations made by the
% CAPTION module from the list of supported formats.
%
% The format of the UnregisterCAPTIONImage method is:
%
% UnregisterCAPTIONImage(void)
%
*/
ModuleExport void UnregisterCAPTIONImage(void)
{
(void) UnregisterMagickInfo("CAPTION");
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_4815_1 |
crossvul-cpp_data_bad_946_1 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% OOO PPPP EEEE RRRR AA TTTTT III OOO N N %
% O O P P E R R A A T I O O NN N %
% O O PPPP EEE RRRR AAAA T I O O N N N %
% O O P E R R A A T I O O N NN %
% OOO P EEEE R RR A A T III OOO N N %
% %
% %
% CLI Magick Option Methods %
% %
% Dragon Computing %
% Anthony Thyssen %
% September 2011 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Apply the given options (settings, and simple, or sequence operations) to
% the given image(s) according to the current "image_info", "draw_info", and
% "quantize_info" settings, stored in a special CLI Image Wand.
%
% The final goal is to allow the execution in a strict one option at a time
% manner that is needed for 'pipelining and file scripting' of options in
% IMv7.
%
% Anthony Thyssen, September 2011
*/
/*
Include declarations.
*/
#include "MagickWand/studio.h"
#include "MagickWand/MagickWand.h"
#include "MagickWand/magick-wand-private.h"
#include "MagickWand/mogrify.h"
#include "MagickWand/operation.h"
#include "MagickWand/wand.h"
#include "MagickWand/wandcli.h"
#include "MagickWand/wandcli-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/image-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer-private.h"
/*
Constant declaration.
*/
static const char
MogrifyAlphaColor[] = "#bdbdbd", /* slightly darker gray */
MogrifyBackgroundColor[] = "#fff", /* white */
MogrifyBorderColor[] = "#dfdfdf"; /* sRGB gray */
/*
Define declarations.
*/
#define USE_WAND_METHODS 1
#define MAX_STACK_DEPTH 32
#define UNDEFINED_COMPRESSION_QUALITY 0UL
/* FUTURE: why is this default so specific? */
#define DEFAULT_DISSIMILARITY_THRESHOLD "0.31830988618379067154"
/* For Debugging Geometry Input */
#define ReportGeometry(flags,info) \
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n", \
flags, info.rho, info.sigma, info.xi, info.psi )
/*
** Function to report on the progress of image operations
*/
static MagickBooleanType MonitorProgress(const char *text,
const MagickOffsetType offset,const MagickSizeType extent,
void *wand_unused(client_data))
{
char
message[MagickPathExtent],
tag[MagickPathExtent];
const char
*locale_message;
register char
*p;
magick_unreferenced(client_data);
if ((extent <= 1) || (offset < 0) || (offset >= (MagickOffsetType) extent))
return(MagickTrue);
if ((offset != (MagickOffsetType) (extent-1)) && ((offset % 50) != 0))
return(MagickTrue);
(void) CopyMagickString(tag,text,MagickPathExtent);
p=strrchr(tag,'/');
if (p != (char *) NULL)
*p='\0';
(void) FormatLocaleString(message,MagickPathExtent,"Monitor/%s",tag);
locale_message=GetLocaleMessage(message);
if (locale_message == message)
locale_message=tag;
if (p == (char *) NULL)
(void) FormatLocaleFile(stderr,"%s: %ld of %lu, %02ld%% complete\r",
locale_message,(long) offset,(unsigned long) extent,(long)
(100L*offset/(extent-1)));
else
(void) FormatLocaleFile(stderr,"%s[%s]: %ld of %lu, %02ld%% complete\r",
locale_message,p+1,(long) offset,(unsigned long) extent,(long)
(100L*offset/(extent-1)));
if (offset == (MagickOffsetType) (extent-1))
(void) FormatLocaleFile(stderr,"\n");
(void) fflush(stderr);
return(MagickTrue);
}
/*
** GetImageCache() will read an image into a image cache if not already
** present then return the image that is in the cache under that filename.
*/
static inline Image *GetImageCache(const ImageInfo *image_info,const char *path,
ExceptionInfo *exception)
{
char
key[MagickPathExtent];
ExceptionInfo
*sans_exception;
Image
*image;
ImageInfo
*read_info;
(void) FormatLocaleString(key,MagickPathExtent,"cache:%s",path);
sans_exception=AcquireExceptionInfo();
image=(Image *) GetImageRegistry(ImageRegistryType,key,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (image != (Image *) NULL)
return(image);
read_info=CloneImageInfo(image_info);
if (path != (const char *) NULL)
(void) CopyMagickString(read_info->filename,path,MagickPathExtent);
image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
if (image != (Image *) NULL)
(void) SetImageRegistry(ImageRegistryType,key,image,exception);
return(image);
}
/*
SparseColorOption() parse the complex -sparse-color argument into an
an array of floating point values than call SparseColorImage().
Argument is a complex mix of floating-point pixel coodinates, and color
specifications (or direct floating point numbers). The number of floats
needed to represent a color varies depending on the current channel
setting.
This really should be in MagickCore, so that other API's can make use of it.
*/
static Image *SparseColorOption(const Image *image,
const SparseColorMethod method,const char *arguments,ExceptionInfo *exception)
{
char
token[MagickPathExtent];
const char
*p;
double
*sparse_arguments;
Image
*sparse_image;
PixelInfo
color;
MagickBooleanType
error;
register size_t
x;
size_t
number_arguments,
number_colors;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Limit channels according to image
add up number of values needed per color.
*/
number_colors=0;
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
number_colors++;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
number_colors++;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
image->alpha_trait != UndefinedPixelTrait)
number_colors++;
/*
Read string, to determine number of arguments needed,
*/
p=arguments;
x=0;
while( *p != '\0' )
{
GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == ',' ) continue;
if ( isalpha((int) token[0]) || token[0] == '#' )
x += number_colors; /* color argument found */
else
x++; /* floating point argument */
}
/* control points and color values */
if ((x % (2+number_colors)) != 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","'%s': %s", "sparse-color",
"Invalid number of Arguments");
return( (Image *) NULL);
}
error=MagickFalse;
number_arguments=x;
/* Allocate and fill in the floating point arguments */
sparse_arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*sparse_arguments));
if (sparse_arguments == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),ResourceLimitError,
"MemoryAllocationFailed","%s","SparseColorOption");
return( (Image *) NULL);
}
(void) memset(sparse_arguments,0,number_arguments*
sizeof(*sparse_arguments));
p=arguments;
x=0;
while( *p != '\0' && x < number_arguments ) {
/* X coordinate */
token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "'%s': %s", "sparse-color",
"Color found, instead of X-coord");
error=MagickTrue;
break;
}
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
/* Y coordinate */
token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "'%s': %s", "sparse-color",
"Color found, instead of Y-coord");
error=MagickTrue;
break;
}
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
/* color name or function given in string argument */
token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
/* Color string given */
(void) QueryColorCompliance(token,AllCompliance,&color,
exception);
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
sparse_arguments[x++] = QuantumScale*color.red;
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
sparse_arguments[x++] = QuantumScale*color.green;
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
sparse_arguments[x++] = QuantumScale*color.blue;
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
sparse_arguments[x++] = QuantumScale*color.black;
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
image->alpha_trait != UndefinedPixelTrait)
sparse_arguments[x++] = QuantumScale*color.alpha;
}
else {
/* Colors given as a set of floating point values - experimental */
/* NB: token contains the first floating point value to use! */
if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0)
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0)
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0)
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) &&
(image->colorspace == CMYKColorspace))
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) &&
image->alpha_trait != UndefinedPixelTrait)
{
while ( token[0] == ',' ) GetNextToken(p,&p,MagickPathExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
}
}
if (error != MagickFalse)
{
sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments);
return((Image *) NULL);
}
if (number_arguments != x)
{
sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments);
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","'%s': %s","sparse-color","Argument Parsing Error");
return((Image *) NULL);
}
/* Call the Sparse Color Interpolation function with the parsed arguments */
sparse_image=SparseColorImage(image,method,number_arguments,sparse_arguments,
exception);
sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments);
return( sparse_image );
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C L I S e t t i n g O p t i o n I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLISettingOptionInfo() applies a single settings option into a CLI wand
% holding the image_info, draw_info, quantize_info structures that will be
% used when processing the images.
%
% These options do no require images to be present in the CLI wand for them
% to be able to be set, in which case they will generally be applied to image
% that are read in later
%
% Options handled by this function are listed in CommandOptions[] of
% "option.c" that is one of "SettingOptionFlags" option flags.
%
% The format of the CLISettingOptionInfo method is:
%
% void CLISettingOptionInfo(MagickCLI *cli_wand,
% const char *option, const char *arg1, const char *arg2)
%
% A description of each parameter follows:
%
% o cli_wand: structure holding settings to be applied
%
% o option: The option string to be set
%
% o arg1, arg2: optional argument strings to the operation
% arg2 is currently only used by "-limit"
%
*/
WandPrivate void CLISettingOptionInfo(MagickCLI *cli_wand,
const char *option,const char *arg1n, const char *arg2n)
{
ssize_t
parse; /* option argument parsing (string to value table lookup) */
const char /* percent escaped versions of the args */
*arg1,
*arg2;
#define _image_info (cli_wand->wand.image_info)
#define _image (cli_wand->wand.images)
#define _exception (cli_wand->wand.exception)
#define _draw_info (cli_wand->draw_info)
#define _quantize_info (cli_wand->quantize_info)
#define IfSetOption (*option=='-')
#define ArgBoolean IfSetOption ? MagickTrue : MagickFalse
#define ArgBooleanNot IfSetOption ? MagickFalse : MagickTrue
#define ArgBooleanString (IfSetOption?"true":"false")
#define ArgOption(def) (IfSetOption?arg1:(const char *)(def))
assert(cli_wand != (MagickCLI *) NULL);
assert(cli_wand->signature == MagickWandSignature);
assert(cli_wand->wand.signature == MagickWandSignature);
if (cli_wand->wand.debug != MagickFalse)
(void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(),
"- Setting Option: %s \"%s\" \"%s\"", option,arg1n,arg2n);
arg1 = arg1n,
arg2 = arg2n;
#if 1
#define _process_flags (cli_wand->process_flags)
#define _option_type ((CommandOptionFlags) cli_wand->command->flags)
/* Interpret Percent Escapes in Arguments - using first image */
if ( (((_process_flags & ProcessInterpretProperities) != 0 )
|| ((_option_type & AlwaysInterpretArgsFlag) != 0)
) && ((_option_type & NeverInterpretArgsFlag) == 0) ) {
/* Interpret Percent escapes in argument 1 */
if (arg1n != (char *) NULL) {
arg1=InterpretImageProperties(_image_info,_image,arg1n,_exception);
if (arg1 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg1=arg1n; /* use the given argument as is */
}
}
if (arg2n != (char *) NULL) {
arg2=InterpretImageProperties(_image_info,_image,arg2n,_exception);
if (arg2 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg2=arg2n; /* use the given argument as is */
}
}
}
#undef _process_flags
#undef _option_type
#endif
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adjoin",option+1) == 0)
{
_image_info->adjoin = ArgBoolean;
break;
}
if (LocaleCompare("affine",option+1) == 0)
{
CLIWandWarnReplaced("-draw 'affine ...'");
if (IfSetOption)
(void) ParseAffineGeometry(arg1,&_draw_info->affine,_exception);
else
GetAffineMatrix(&_draw_info->affine);
break;
}
if (LocaleCompare("antialias",option+1) == 0)
{
_image_info->antialias =
_draw_info->stroke_antialias =
_draw_info->text_antialias = ArgBoolean;
break;
}
if (LocaleCompare("attenuate",option+1) == 0)
{
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption("1.0"));
break;
}
if (LocaleCompare("authenticate",option+1) == 0)
{
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'b':
{
if (LocaleCompare("background",option+1) == 0)
{
/* FUTURE: both _image_info attribute & ImageOption in use!
_image_info only used directly for generating new images.
SyncImageSettings() used to set per-image attribute.
FUTURE: if _image_info->background_color is not set then
we should fall back to per-image background_color
At this time -background will 'wipe out' the per-image
background color!
Better error handling of QueryColorCompliance() needed.
*/
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
(void) QueryColorCompliance(ArgOption(MogrifyBackgroundColor),AllCompliance,
&_image_info->background_color,_exception);
break;
}
if (LocaleCompare("bias",option+1) == 0)
{
/* FUTURE: bias OBSOLETED, replaced by Artifact "convolve:bias"
as it is actually rarely used except in direct convolve operations
Usage outside a direct convolve operation is actally non-sensible!
SyncImageSettings() used to set per-image attribute.
*/
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,"convolve:bias",ArgOption(NULL));
break;
}
if (LocaleCompare("black-point-compensation",option+1) == 0)
{
/* Used as a image chromaticity setting
SyncImageSettings() used to set per-image attribute.
*/
(void) SetImageOption(_image_info,option+1,ArgBooleanString);
break;
}
if (LocaleCompare("blue-primary",option+1) == 0)
{
/* Image chromaticity X,Y NB: Y=X if Y not defined
Used by many coders including PNG
SyncImageSettings() used to set per-image attribute.
*/
arg1=ArgOption("0.0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
/* FUTURE: both _image_info attribute & ImageOption in use!
SyncImageSettings() used to set per-image attribute.
Better error checking of QueryColorCompliance().
*/
if (IfSetOption)
{
(void) SetImageOption(_image_info,option+1,arg1);
(void) QueryColorCompliance(arg1,AllCompliance,
&_image_info->border_color,_exception);
(void) QueryColorCompliance(arg1,AllCompliance,
&_draw_info->border_color,_exception);
break;
}
(void) DeleteImageOption(_image_info,option+1);
(void) QueryColorCompliance(MogrifyBorderColor,AllCompliance,
&_image_info->border_color,_exception);
(void) QueryColorCompliance(MogrifyBorderColor,AllCompliance,
&_draw_info->border_color,_exception);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
CLIWandWarnReplaced("-undercolor");
CLISettingOptionInfo(cli_wand,"-undercolor",arg1, arg2);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'c':
{
if (LocaleCompare("cache",option+1) == 0)
{
MagickSizeType
limit;
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
limit=MagickResourceInfinity;
if (LocaleCompare("unlimited",arg1) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(arg1,100.0);
(void) SetMagickResourceLimit(MemoryResource,limit);
(void) SetMagickResourceLimit(MapResource,2*limit);
break;
}
if (LocaleCompare("caption",option+1) == 0)
{
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
/* Setting used for new images via AquireImage()
But also used as a SimpleImageOperator
Undefined colorspace means don't modify images on
read or as a operation */
parse=ParseCommandOption(MagickColorspaceOptions,MagickFalse,
ArgOption("undefined"));
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedColorspace",option,
arg1);
_image_info->colorspace=(ColorspaceType) parse;
break;
}
if (LocaleCompare("comment",option+1) == 0)
{
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
if (LocaleCompare("compose",option+1) == 0)
{
/* FUTURE: _image_info should be used,
SyncImageSettings() used to set per-image attribute. - REMOVE
This setting should NOT be used to set image 'compose'
"-layer" operators shoud use _image_info if defined otherwise
they should use a per-image compose setting.
*/
parse = ParseCommandOption(MagickComposeOptions,MagickFalse,
ArgOption("undefined"));
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedComposeOperator",
option,arg1);
_image_info->compose=(CompositeOperator) parse;
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
if (LocaleCompare("compress",option+1) == 0)
{
/* FUTURE: What should be used? _image_info or ImageOption ???
The former is more efficent, but Crisy prefers the latter!
SyncImageSettings() used to set per-image attribute.
The coders appears to use _image_info, not Image_Option
however the image attribute (for save) is set from the
ImageOption!
Note that "undefined" is a different setting to "none".
*/
parse = ParseCommandOption(MagickCompressOptions,MagickFalse,
ArgOption("undefined"));
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedImageCompression",
option,arg1);
_image_info->compression=(CompressionType) parse;
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'd':
{
if (LocaleCompare("debug",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
arg1=ArgOption("none");
parse = ParseCommandOption(MagickLogEventOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedEventType",
option,arg1);
(void) SetLogEventMask(arg1);
_image_info->debug=IsEventLogging(); /* extract logging*/
cli_wand->wand.debug=IsEventLogging();
break;
}
if (LocaleCompare("define",option+1) == 0)
{
if (LocaleNCompare(arg1,"registry:",9) == 0)
{
if (IfSetOption)
(void) DefineImageRegistry(StringRegistryType,arg1+9,_exception);
else
(void) DeleteImageRegistry(arg1+9);
break;
}
/* DefineImageOption() equals SetImageOption() but with '=' */
if (IfSetOption)
(void) DefineImageOption(_image_info,arg1);
else if (DeleteImageOption(_image_info,arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"NoSuchOption",option,arg1);
break;
}
if (LocaleCompare("delay",option+1) == 0)
{
/* Only used for new images via AcquireImage()
FUTURE: Option should also be used for "-morph" (color morphing)
*/
arg1=ArgOption("0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("density",option+1) == 0)
{
/* FUTURE: strings used in _image_info attr and _draw_info!
Basically as density can be in a XxY form!
SyncImageSettings() used to set per-image attribute.
*/
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
(void) CloneString(&_image_info->density,ArgOption(NULL));
(void) CloneString(&_draw_info->density,_image_info->density);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
/* This is also a SimpleImageOperator! for 8->16 vaule trunc !!!!
SyncImageSettings() used to set per-image attribute.
*/
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
_image_info->depth=IfSetOption?StringToUnsignedLong(arg1)
:MAGICKCORE_QUANTUM_DEPTH;
break;
}
if (LocaleCompare("direction",option+1) == 0)
{
/* Image Option is only used to set _draw_info */
arg1=ArgOption("undefined");
parse = ParseCommandOption(MagickDirectionOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedDirectionType",
option,arg1);
_draw_info->direction=(DirectionType) parse;
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
(void) CloneString(&_image_info->server_name,ArgOption(NULL));
(void) CloneString(&_draw_info->server_name,_image_info->server_name);
break;
}
if (LocaleCompare("dispose",option+1) == 0)
{
/* only used in setting new images */
arg1=ArgOption("undefined");
parse = ParseCommandOption(MagickDisposeOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedDisposeMethod",
option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption("undefined"));
break;
}
if (LocaleCompare("dissimilarity-threshold",option+1) == 0)
{
/* FUTURE: this is only used by CompareImages() which is used
only by the "compare" CLI program at this time. */
arg1=ArgOption(DEFAULT_DISSIMILARITY_THRESHOLD);
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
/* _image_info attr (on/off), _quantize_info attr (on/off)
but also ImageInfo and _quantize_info method!
FUTURE: merge the duality of the dithering options
*/
_image_info->dither = ArgBoolean;
(void) SetImageOption(_image_info,option+1,ArgOption("none"));
_quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,ArgOption("none"));
if (_quantize_info->dither_method == NoDitherMethod)
_image_info->dither = MagickFalse;
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'e':
{
if (LocaleCompare("encoding",option+1) == 0)
{
(void) CloneString(&_draw_info->encoding,ArgOption("undefined"));
(void) SetImageOption(_image_info,option+1,_draw_info->encoding);
break;
}
if (LocaleCompare("endian",option+1) == 0)
{
/* Both _image_info attr and ImageInfo */
arg1 = ArgOption("undefined");
parse = ParseCommandOption(MagickEndianOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedEndianType",
option,arg1);
/* FUTURE: check alloc/free of endian string! - remove? */
_image_info->endian=(EndianType) (*arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("extract",option+1) == 0)
{
(void) CloneString(&_image_info->extract,ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
(void) CloneString(&_draw_info->family,ArgOption(NULL));
break;
}
if (LocaleCompare("features",option+1) == 0)
{
(void) SetImageOption(_image_info,"identify:features",
ArgBooleanString);
if (IfSetOption)
(void) SetImageArtifact(_image,"verbose","true");
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
/* Set "fill" OR "fill-pattern" in _draw_info
The original fill color is preserved if a fill-pattern is given.
That way it does not effect other operations that directly using
the fill color and, can be retored using "+tile".
*/
MagickBooleanType
status;
ExceptionInfo
*sans;
PixelInfo
color;
arg1 = ArgOption("none"); /* +fill turns it off! */
(void) SetImageOption(_image_info,option+1,arg1);
if (_draw_info->fill_pattern != (Image *) NULL)
_draw_info->fill_pattern=DestroyImage(_draw_info->fill_pattern);
/* is it a color or a image? -- ignore exceptions */
sans=AcquireExceptionInfo();
status=QueryColorCompliance(arg1,AllCompliance,&color,sans);
sans=DestroyExceptionInfo(sans);
if (status == MagickFalse)
_draw_info->fill_pattern=GetImageCache(_image_info,arg1,_exception);
else
_draw_info->fill=color;
break;
}
if (LocaleCompare("filter",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
arg1 = ArgOption("undefined");
parse = ParseCommandOption(MagickFilterOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedImageFilter",
option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
(void) CloneString(&_draw_info->font,ArgOption(NULL));
(void) CloneString(&_image_info->font,_draw_info->font);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
/* FUTURE: why the ping test, you could set ping after this! */
/*
register const char
*q;
for (q=strchr(arg1,'%'); q != (char *) NULL; q=strchr(q+1,'%'))
if (strchr("Agkrz@[#",*(q+1)) != (char *) NULL)
_image_info->ping=MagickFalse;
*/
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
if (LocaleCompare("fuzz",option+1) == 0)
{
/* Option used to set image fuzz! unless blank canvas (from color)
Image attribute used for color compare operations
SyncImageSettings() used to set per-image attribute.
FUTURE: Can't find anything else using _image_info->fuzz directly!
convert structure attribute to 'option' string
*/
arg1=ArgOption("0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
_image_info->fuzz=StringToDoubleInterval(arg1,(double)
QuantumRange+1.0);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'g':
{
if (LocaleCompare("gravity",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
arg1 = ArgOption("none");
parse = ParseCommandOption(MagickGravityOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedGravityType",
option,arg1);
_draw_info->gravity=(GravityType) parse;
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("green-primary",option+1) == 0)
{
/* Image chromaticity X,Y NB: Y=X if Y not defined
SyncImageSettings() used to set per-image attribute.
Used directly by many coders
*/
arg1=ArgOption("0.0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'h':
{
if (LocaleCompare("highlight-color",option+1) == 0)
{
/* FUTURE: this is only used by CompareImages() which is used
only by the "compare" CLI program at this time. */
(void) SetImageOption(_image_info,"compare:highlight-color",
ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'i':
{
if (LocaleCompare("intensity",option+1) == 0)
{
arg1 = ArgOption("undefined");
parse = ParseCommandOption(MagickPixelIntensityOptions,MagickFalse,
arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedIntensityType",
option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("intent",option+1) == 0)
{
/* Only used by coders: MIFF, MPC, BMP, PNG
and for image profile call to AcquireTransformThreadSet()
SyncImageSettings() used to set per-image attribute.
*/
arg1 = ArgOption("undefined");
parse = ParseCommandOption(MagickIntentOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedIntentType",
option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("interlace",option+1) == 0)
{
/* _image_info is directly used by coders (so why an image setting?)
SyncImageSettings() used to set per-image attribute.
*/
arg1 = ArgOption("undefined");
parse = ParseCommandOption(MagickInterlaceOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedInterlaceType",
option,arg1);
_image_info->interlace=(InterlaceType) parse;
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1, ArgOption(NULL));
_draw_info->interline_spacing=StringToDouble(ArgOption("0"),
(char **) NULL);
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
arg1 = ArgOption("undefined");
parse = ParseCommandOption(MagickInterpolateOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedInterpolateMethod",
option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1, ArgOption(NULL));
_draw_info->interword_spacing=StringToDouble(ArgOption("0"),(char **) NULL);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
_draw_info->kerning=StringToDouble(ArgOption("0"),(char **) NULL);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'l':
{
if (LocaleCompare("label",option+1) == 0)
{
/* only used for new images - not in SyncImageOptions() */
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
if (LocaleCompare("limit",option+1) == 0)
{
MagickSizeType
limit;
limit=MagickResourceInfinity;
parse= ParseCommandOption(MagickResourceOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedResourceType",
option,arg1);
if (LocaleCompare("unlimited",arg2) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(arg2,100.0);
(void) SetMagickResourceLimit((ResourceType)parse,limit);
break;
}
if (LocaleCompare("log",option+1) == 0)
{
if (IfSetOption) {
if ((strchr(arg1,'%') == (char *) NULL))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetLogFormat(arg1);
}
break;
}
if (LocaleCompare("lowlight-color",option+1) == 0)
{
/* FUTURE: this is only used by CompareImages() which is used
only by the "compare" CLI program at this time. */
(void) SetImageOption(_image_info,"compare:lowlight-color",
ArgOption(NULL));
break;
}
if (LocaleCompare("loop",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
arg1=ArgOption("0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'm':
{
if (LocaleCompare("mattecolor",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
(void) QueryColorCompliance(ArgOption(MogrifyAlphaColor),
AllCompliance,&_image_info->matte_color,_exception);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
/* FUTURE: this is only used by CompareImages() which is used
only by the "compare" CLI program at this time. */
parse=ParseCommandOption(MagickMetricOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedMetricType",
option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
if (LocaleCompare("moments",option+1) == 0)
{
(void) SetImageOption(_image_info,"identify:moments",
ArgBooleanString);
if (IfSetOption)
(void) SetImageArtifact(_image,"verbose","true");
break;
}
if (LocaleCompare("monitor",option+1) == 0)
{
(void) SetImageInfoProgressMonitor(_image_info, IfSetOption?
MonitorProgress: (MagickProgressMonitor) NULL, (void *) NULL);
break;
}
if (LocaleCompare("monochrome",option+1) == 0)
{
/* Setting (used by some input coders!) -- why?
Warning: This is also Special '-type' SimpleOperator
*/
_image_info->monochrome= ArgBoolean;
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'o':
{
if (LocaleCompare("orient",option+1) == 0)
{
/* Is not used when defining for new images.
This makes it more of a 'operation' than a setting
FUTURE: make set meta-data operator instead.
SyncImageSettings() used to set per-image attribute.
*/
parse=ParseCommandOption(MagickOrientationOptions,MagickFalse,
ArgOption("undefined"));
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedImageOrientation",
option,arg1);
_image_info->orientation=(OrientationType)parse;
(void) SetImageOption(_image_info,option+1, ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'p':
{
if (LocaleCompare("page",option+1) == 0)
{
/* Only used for new images and image generators.
SyncImageSettings() used to set per-image attribute. ?????
That last is WRONG!!!!
FUTURE: adjust named 'page' sizes according density
*/
char
*canonical_page,
page[MagickPathExtent];
const char
*image_option;
MagickStatusType
flags;
RectangleInfo
geometry;
if (!IfSetOption)
{
(void) DeleteImageOption(_image_info,option+1);
(void) CloneString(&_image_info->page,(char *) NULL);
break;
}
(void) memset(&geometry,0,sizeof(geometry));
image_option=GetImageOption(_image_info,"page");
if (image_option != (const char *) NULL)
flags=ParseAbsoluteGeometry(image_option,&geometry);
canonical_page=GetPageGeometry(arg1);
flags=ParseAbsoluteGeometry(canonical_page,&geometry);
canonical_page=DestroyString(canonical_page);
(void) FormatLocaleString(page,MagickPathExtent,"%lux%lu",
(unsigned long) geometry.width,(unsigned long) geometry.height);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
(void) FormatLocaleString(page,MagickPathExtent,"%lux%lu%+ld%+ld",
(unsigned long) geometry.width,(unsigned long) geometry.height,
(long) geometry.x,(long) geometry.y);
(void) SetImageOption(_image_info,option+1,page);
(void) CloneString(&_image_info->page,page);
break;
}
if (LocaleCompare("ping",option+1) == 0)
{
_image_info->ping = ArgBoolean;
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (IfSetOption) {
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
_image_info->pointsize =
_draw_info->pointsize =
StringToDouble(arg1,(char **) NULL);
}
else {
_image_info->pointsize=0.0; /* unset pointsize */
_draw_info->pointsize=12.0;
}
break;
}
if (LocaleCompare("precision",option+1) == 0)
{
arg1=ArgOption("-1");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetMagickPrecision(StringToInteger(arg1));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'q':
{
if (LocaleCompare("quality",option+1) == 0)
{
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
_image_info->quality= IfSetOption ? StringToUnsignedLong(arg1)
: UNDEFINED_COMPRESSION_QUALITY;
(void) SetImageOption(_image_info,option+1,ArgOption("0"));
break;
}
if (LocaleCompare("quantize",option+1) == 0)
{
/* Just a set direct in _quantize_info */
arg1=ArgOption("undefined");
parse=ParseCommandOption(MagickColorspaceOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedColorspace",
option,arg1);
_quantize_info->colorspace=(ColorspaceType)parse;
break;
}
if (LocaleCompare("quiet",option+1) == 0)
{
/* FUTURE: if two -quiet is performed you can not do +quiet!
This needs to be checked over thoughly.
*/
static WarningHandler
warning_handler = (WarningHandler) NULL;
WarningHandler
tmp = SetWarningHandler((WarningHandler) NULL);
if ( tmp != (WarningHandler) NULL)
warning_handler = tmp; /* remember the old handler */
if (!IfSetOption) /* set the old handler */
warning_handler=SetWarningHandler(warning_handler);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'r':
{
if (LocaleCompare("red-primary",option+1) == 0)
{
/* Image chromaticity X,Y NB: Y=X if Y not defined
Used by many coders
SyncImageSettings() used to set per-image attribute.
*/
arg1=ArgOption("0.0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("regard-warnings",option+1) == 0)
/* FUTURE: to be replaced by a 'fatal-level' type setting */
break;
if (LocaleCompare("render",option+1) == 0)
{
/* _draw_info only setting */
_draw_info->render= ArgBooleanNot;
break;
}
if (LocaleCompare("respect-parenthesis",option+1) == 0)
{
/* link image and setting stacks - option is itself saved on stack! */
(void) SetImageOption(_image_info,option+1,ArgBooleanString);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 's':
{
if (LocaleCompare("sampling-factor",option+1) == 0)
{
/* FUTURE: should be converted to jpeg:sampling_factor */
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) CloneString(&_image_info->sampling_factor,ArgOption(NULL));
break;
}
if (LocaleCompare("scene",option+1) == 0)
{
/* SyncImageSettings() used to set this as a per-image attribute.
What ??? Why ????
*/
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
_image_info->scene=StringToUnsignedLong(ArgOption("0"));
break;
}
if (LocaleCompare("seed",option+1) == 0)
{
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
SetRandomSecretKey(
IfSetOption ? (unsigned long) StringToUnsignedLong(arg1)
: (unsigned long) time((time_t *) NULL));
break;
}
if (LocaleCompare("size",option+1) == 0)
{
/* FUTURE: string in _image_info -- convert to Option ???
Look at the special handling for "size" in SetImageOption()
*/
(void) CloneString(&_image_info->size,ArgOption(NULL));
break;
}
if (LocaleCompare("stretch",option+1) == 0)
{
arg1=ArgOption("undefined");
parse = ParseCommandOption(MagickStretchOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedStretchType",
option,arg1);
_draw_info->stretch=(StretchType) parse;
break;
}
if (LocaleCompare("stroke",option+1) == 0)
{
/* set stroke color OR stroke-pattern
UPDATE: ensure stroke color is not destroyed is a pattern
is given. Just in case the color is also used for other purposes.
*/
MagickBooleanType
status;
ExceptionInfo
*sans;
PixelInfo
color;
arg1 = ArgOption("none"); /* +fill turns it off! */
(void) SetImageOption(_image_info,option+1,arg1);
if (_draw_info->stroke_pattern != (Image *) NULL)
_draw_info->stroke_pattern=DestroyImage(_draw_info->stroke_pattern);
/* is it a color or a image? -- ignore exceptions */
sans=AcquireExceptionInfo();
status=QueryColorCompliance(arg1,AllCompliance,&color,sans);
sans=DestroyExceptionInfo(sans);
if (status == MagickFalse)
_draw_info->stroke_pattern=GetImageCache(_image_info,arg1,_exception);
else
_draw_info->stroke=color;
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
_draw_info->stroke_width=StringToDouble(ArgOption("1.0"),
(char **) NULL);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
arg1=ArgOption("undefined");
parse = ParseCommandOption(MagickStyleOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedStyleType",
option,arg1);
_draw_info->style=(StyleType) parse;
break;
}
#if 0
if (LocaleCompare("subimage-search",option+1) == 0)
{
/* FUTURE: this is only used by CompareImages() which is used
only by the "compare" CLI program at this time. */
(void) SetImageOption(_image_info,option+1,ArgBooleanString);
break;
}
#endif
if (LocaleCompare("synchronize",option+1) == 0)
{
/* FUTURE: syncronize to storage - but what does that mean? */
_image_info->synchronize = ArgBoolean;
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 't':
{
if (LocaleCompare("taint",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
(void) SetImageOption(_image_info,option+1,ArgBooleanString);
break;
}
if (LocaleCompare("texture",option+1) == 0)
{
/* Note: arguments do not have percent escapes expanded */
/* FUTURE: move _image_info string to option splay-tree
Other than "montage" what uses "texture" ????
*/
(void) CloneString(&_image_info->texture,ArgOption(NULL));
break;
}
if (LocaleCompare("tile",option+1) == 0)
{
/* Note: arguments do not have percent escapes expanded */
_draw_info->fill_pattern=IfSetOption
?GetImageCache(_image_info,arg1,_exception)
:DestroyImage(_draw_info->fill_pattern);
break;
}
if (LocaleCompare("tile-offset",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. ??? */
arg1=ArgOption("0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("transparent-color",option+1) == 0)
{
/* FUTURE: both _image_info attribute & ImageOption in use!
_image_info only used for generating new images.
SyncImageSettings() used to set per-image attribute.
Note that +transparent-color, means fall-back to image
attribute so ImageOption is deleted, not set to a default.
*/
if (IfSetOption && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
(void) QueryColorCompliance(ArgOption("none"),AllCompliance,
&_image_info->transparent_color,_exception);
break;
}
if (LocaleCompare("treedepth",option+1) == 0)
{
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
_quantize_info->tree_depth=StringToUnsignedLong(ArgOption("0"));
break;
}
if (LocaleCompare("type",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute. */
parse=ParseCommandOption(MagickTypeOptions,MagickFalse,
ArgOption("undefined"));
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedImageType",
option,arg1);
_image_info->type=(ImageType) parse;
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
(void) QueryColorCompliance(ArgOption("none"),AllCompliance,
&_draw_info->undercolor,_exception);
break;
}
if (LocaleCompare("units",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute.
Should this effect _draw_info X and Y resolution?
FUTURE: this probably should be part of the density setting
*/
parse=ParseCommandOption(MagickResolutionOptions,MagickFalse,
ArgOption("undefined"));
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedUnitsType",
option,arg1);
_image_info->units=(ResolutionType) parse;
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
/* FUTURE: Remember all options become image artifacts
_image_info->verbose is only used by coders.
*/
(void) SetImageOption(_image_info,option+1,ArgBooleanString);
_image_info->verbose= ArgBoolean;
_image_info->ping=MagickFalse; /* verbose can't be a ping */
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
/* SyncImageSettings() used to set per-image attribute.
This is VERY deep in the image caching structure.
*/
parse=ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,
ArgOption("undefined"));
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedVirtualPixelMethod",
option,arg1);
(void) SetImageOption(_image_info,option+1,ArgOption(NULL));
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'w':
{
if (LocaleCompare("weight",option+1) == 0)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,arg1);
if (weight == -1)
weight=(ssize_t) StringToUnsignedLong(arg1);
_draw_info->weight=(size_t) weight;
break;
}
if (LocaleCompare("white-point",option+1) == 0)
{
/* Used as a image chromaticity setting
SyncImageSettings() used to set per-image attribute.
*/
arg1=ArgOption("0.0");
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
default:
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
/* clean up percent escape interpreted strings */
if ((arg1 && arg1n) && (arg1 != arg1n ))
arg1=DestroyString((char *) arg1);
if ((arg2 && arg2n) && (arg2 != arg2n ))
arg2=DestroyString((char *) arg2);
#undef _image_info
#undef _exception
#undef _draw_info
#undef _quantize_info
#undef IfSetOption
#undef ArgBoolean
#undef ArgBooleanNot
#undef ArgBooleanString
#undef ArgOption
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C L I S i m p l e O p e r a t o r I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLISimpleOperatorImages() applys one simple image operation given to all
% the images in the CLI wand, using any per-image or global settings that was
% previously saved in the CLI wand.
%
% It is assumed that any such settings are up-to-date.
%
% The format of the WandSimpleOperatorImages method is:
%
% MagickBooleanType CLISimpleOperatorImages(MagickCLI *cli_wand,const char *option,
% const char *arg1, const char *arg2,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cli_wand: structure holding settings and images to be operated on
%
% o option: The option string for the operation
%
% o arg1, arg2: optional argument strings to the operation
%
*/
/*
CLISimpleOperatorImage() is an Internal subrountine to apply one simple
image operation to the current image pointed to by the CLI wand.
The image in the list may be modified in three different ways...
* directly modified (EG: -negate, -gamma, -level, -annotate, -draw),
* replaced by a new image (EG: -spread, -resize, -rotate, -morphology)
* one image replace by a list of images (-separate and -crop only!)
In each case the result replaces the single original image in the list, as
well as the pointer to the modified image (last image added if replaced by a
list of images) is returned.
As the image pointed to may be replaced, the first image in the list may
also change. GetFirstImageInList() should be used by caller if they wish
return the Image pointer to the first image in list.
*/
static MagickBooleanType CLISimpleOperatorImage(MagickCLI *cli_wand,
const char *option, const char *arg1n, const char *arg2n,
ExceptionInfo *exception)
{
Image *
new_image;
GeometryInfo
geometry_info;
RectangleInfo
geometry;
MagickStatusType
flags;
ssize_t
parse;
const char /* percent escaped versions of the args */
*arg1,
*arg2;
#define _image_info (cli_wand->wand.image_info)
#define _image (cli_wand->wand.images)
#define _exception (cli_wand->wand.exception)
#define _draw_info (cli_wand->draw_info)
#define _quantize_info (cli_wand->quantize_info)
#define _process_flags (cli_wand->process_flags)
#define _option_type ((CommandOptionFlags) cli_wand->command->flags)
#define IfNormalOp (*option=='-')
#define IfPlusOp (*option!='-')
#define IsNormalOp IfNormalOp ? MagickTrue : MagickFalse
#define IsPlusOp IfNormalOp ? MagickFalse : MagickTrue
assert(cli_wand != (MagickCLI *) NULL);
assert(cli_wand->signature == MagickWandSignature);
assert(cli_wand->wand.signature == MagickWandSignature);
assert(_image != (Image *) NULL); /* an image must be present */
if (cli_wand->wand.debug != MagickFalse)
(void) LogMagickEvent(WandEvent,GetMagickModule(),"%s",cli_wand->wand.name);
arg1 = arg1n,
arg2 = arg2n;
/* Interpret Percent Escapes in Arguments - using first image */
if ( (((_process_flags & ProcessInterpretProperities) != 0 )
|| ((_option_type & AlwaysInterpretArgsFlag) != 0)
) && ((_option_type & NeverInterpretArgsFlag) == 0) ) {
/* Interpret Percent escapes in argument 1 */
if (arg1n != (char *) NULL) {
arg1=InterpretImageProperties(_image_info,_image,arg1n,_exception);
if (arg1 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg1=arg1n; /* use the given argument as is */
}
}
if (arg2n != (char *) NULL) {
arg2=InterpretImageProperties(_image_info,_image,arg2n,_exception);
if (arg2 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg2=arg2n; /* use the given argument as is */
}
}
}
#undef _process_flags
#undef _option_type
#if 0
(void) FormatLocaleFile(stderr,
"CLISimpleOperatorImage: \"%s\" \"%s\" \"%s\"\n",option,arg1,arg2);
#endif
new_image = (Image *) NULL; /* the replacement image, if not null at end */
SetGeometryInfo(&geometry_info);
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adaptive-blur",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=AdaptiveBlurImage(_image,geometry_info.rho,
geometry_info.sigma,_exception);
break;
}
if (LocaleCompare("adaptive-resize",option+1) == 0)
{
/* FUTURE: Roll into a resize special operator */
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseRegionGeometry(_image,arg1,&geometry,_exception);
new_image=AdaptiveResizeImage(_image,geometry.width,geometry.height,
_exception);
break;
}
if (LocaleCompare("adaptive-sharpen",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=AdaptiveSharpenImage(_image,geometry_info.rho,
geometry_info.sigma,_exception);
break;
}
if (LocaleCompare("alpha",option+1) == 0)
{
parse=ParseCommandOption(MagickAlphaChannelOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedAlphaChannelOption",
option,arg1);
(void) SetImageAlphaChannel(_image,(AlphaChannelOption) parse,
_exception);
break;
}
if (LocaleCompare("annotate",option+1) == 0)
{
char
geometry[MagickPathExtent];
SetGeometryInfo(&geometry_info);
flags=ParseGeometry(arg1,&geometry_info);
if (flags == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
(void) CloneString(&_draw_info->text,arg2);
(void) FormatLocaleString(geometry,MagickPathExtent,"%+f%+f",
geometry_info.xi,geometry_info.psi);
(void) CloneString(&_draw_info->geometry,geometry);
_draw_info->affine.sx=cos(DegreesToRadians(
fmod(geometry_info.rho,360.0)));
_draw_info->affine.rx=sin(DegreesToRadians(
fmod(geometry_info.rho,360.0)));
_draw_info->affine.ry=(-sin(DegreesToRadians(
fmod(geometry_info.sigma,360.0))));
_draw_info->affine.sy=cos(DegreesToRadians(
fmod(geometry_info.sigma,360.0)));
(void) AnnotateImage(_image,_draw_info,_exception);
GetAffineMatrix(&_draw_info->affine);
break;
}
if (LocaleCompare("auto-gamma",option+1) == 0)
{
(void) AutoGammaImage(_image,_exception);
break;
}
if (LocaleCompare("auto-level",option+1) == 0)
{
(void) AutoLevelImage(_image,_exception);
break;
}
if (LocaleCompare("auto-orient",option+1) == 0)
{
new_image=AutoOrientImage(_image,_image->orientation,_exception);
break;
}
if (LocaleCompare("auto-threshold",option+1) == 0)
{
AutoThresholdMethod
method;
method=(AutoThresholdMethod) ParseCommandOption(
MagickAutoThresholdOptions,MagickFalse,arg1);
(void) AutoThresholdImage(_image,method,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'b':
{
if (LocaleCompare("black-threshold",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) BlackThresholdImage(_image,arg1,_exception);
break;
}
if (LocaleCompare("blue-shift",option+1) == 0)
{
geometry_info.rho=1.5;
if (IfNormalOp) {
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
}
new_image=BlueShiftImage(_image,geometry_info.rho,_exception);
break;
}
if (LocaleCompare("blur",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=BlurImage(_image,geometry_info.rho,geometry_info.sigma,
_exception);
break;
}
if (LocaleCompare("border",option+1) == 0)
{
CompositeOperator
compose;
const char*
value;
flags=ParsePageGeometry(_image,arg1,&geometry,_exception);
if ((flags & (WidthValue | HeightValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
compose=OverCompositeOp;
value=GetImageOption(_image_info,"compose");
if (value != (const char *) NULL)
compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,value);
new_image=BorderImage(_image,&geometry,compose,_exception);
break;
}
if (LocaleCompare("brightness-contrast",option+1) == 0)
{
double
brightness,
contrast;
GeometryInfo
geometry_info;
MagickStatusType
flags;
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
brightness=geometry_info.rho;
contrast=0.0;
if ((flags & SigmaValue) != 0)
contrast=geometry_info.sigma;
(void) BrightnessContrastImage(_image,brightness,contrast,
_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'c':
{
if (LocaleCompare("canny",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=10;
if ((flags & PsiValue) == 0)
geometry_info.psi=30;
if ((flags & PercentValue) != 0)
{
geometry_info.xi/=100.0;
geometry_info.psi/=100.0;
}
new_image=CannyEdgeImage(_image,geometry_info.rho,geometry_info.sigma,
geometry_info.xi,geometry_info.psi,_exception);
break;
}
if (LocaleCompare("cdl",option+1) == 0)
{
char
*color_correction_collection; /* Note: arguments do not have percent escapes expanded */
/*
Color correct with a color decision list.
*/
color_correction_collection=FileToString(arg1,~0UL,_exception);
if (color_correction_collection == (char *) NULL)
break;
(void) ColorDecisionListImage(_image,color_correction_collection,
_exception);
break;
}
if (LocaleCompare("channel",option+1) == 0)
{
if (IfPlusOp)
{
(void) SetPixelChannelMask(_image,DefaultChannels);
break;
}
parse=ParseChannelOption(arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedChannelType",option,
arg1);
(void) SetPixelChannelMask(_image,(ChannelType) parse);
break;
}
if (LocaleCompare("charcoal",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=1.0;
new_image=CharcoalImage(_image,geometry_info.rho,geometry_info.sigma,
_exception);
break;
}
if (LocaleCompare("chop",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseGravityGeometry(_image,arg1,&geometry,_exception);
new_image=ChopImage(_image,&geometry,_exception);
break;
}
if (LocaleCompare("clahe",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParseGeometry(arg1,&geometry_info);
flags=ParseRegionGeometry(_image,arg1,&geometry,_exception);
(void) CLAHEImage(_image,geometry.width,geometry.height,
(size_t) geometry.x,geometry_info.psi,_exception);
break;
}
if (LocaleCompare("clamp",option+1) == 0)
{
(void) ClampImage(_image,_exception);
break;
}
if (LocaleCompare("clip",option+1) == 0)
{
if (IfNormalOp)
(void) ClipImage(_image,_exception);
else /* "+mask" remove the write mask */
(void) SetImageMask(_image,WritePixelMask,(Image *) NULL,
_exception);
break;
}
if (LocaleCompare("clip-mask",option+1) == 0)
{
Image
*clip_mask;
if (IfPlusOp) {
/* use "+clip-mask" Remove the write mask for -clip-path */
(void) SetImageMask(_image,WritePixelMask,(Image *) NULL,_exception);
break;
}
clip_mask=GetImageCache(_image_info,arg1,_exception);
if (clip_mask == (Image *) NULL)
break;
(void) SetImageMask(_image,WritePixelMask,clip_mask,_exception);
clip_mask=DestroyImage(clip_mask);
break;
}
if (LocaleCompare("clip-path",option+1) == 0)
{
(void) ClipImagePath(_image,arg1,IsNormalOp,_exception);
/* Note: Use "+clip-mask" remove the write mask added */
break;
}
if (LocaleCompare("colorize",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=ColorizeImage(_image,arg1,&_draw_info->fill,_exception);
break;
}
if (LocaleCompare("color-matrix",option+1) == 0)
{
KernelInfo
*kernel;
kernel=AcquireKernelInfo(arg1,exception);
if (kernel == (KernelInfo *) NULL)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=ColorMatrixImage(_image,kernel,_exception);
kernel=DestroyKernelInfo(kernel);
break;
}
if (LocaleCompare("colors",option+1) == 0)
{
/* Reduce the number of colors in the image.
FUTURE: also provide 'plus version with image 'color counts'
*/
_quantize_info->number_colors=StringToUnsignedLong(arg1);
if (_quantize_info->number_colors == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((_image->storage_class == DirectClass) ||
_image->colors > _quantize_info->number_colors)
(void) QuantizeImage(_quantize_info,_image,_exception);
else
(void) CompressImageColormap(_image,_exception);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
/* WARNING: this is both a image_info setting (already done)
and a operator to change image colorspace.
FUTURE: default colorspace should be sRGB!
Unless some type of 'linear colorspace' mode is set.
Note that +colorspace sets "undefined" or no effect on
new images, but forces images already in memory back to RGB!
That seems to be a little strange!
*/
(void) TransformImageColorspace(_image,
IfNormalOp ? _image_info->colorspace : sRGBColorspace,
_exception);
break;
}
if (LocaleCompare("connected-components",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=ConnectedComponentsImage(_image,(size_t)
StringToInteger(arg1),(CCObjectInfo **) NULL,_exception);
break;
}
if (LocaleCompare("contrast",option+1) == 0)
{
CLIWandWarnReplaced(IfNormalOp?"-level":"+level");
(void) ContrastImage(_image,IsNormalOp,_exception);
break;
}
if (LocaleCompare("contrast-stretch",option+1) == 0)
{
double
black_point,
white_point;
MagickStatusType
flags;
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
black_point=geometry_info.rho;
white_point=(flags & SigmaValue) != 0 ? geometry_info.sigma :
black_point;
if ((flags & PercentValue) != 0) {
black_point*=(double) _image->columns*_image->rows/100.0;
white_point*=(double) _image->columns*_image->rows/100.0;
}
white_point=(double) _image->columns*_image->rows-white_point;
(void) ContrastStretchImage(_image,black_point,white_point,
_exception);
break;
}
if (LocaleCompare("convolve",option+1) == 0)
{
double
gamma;
KernelInfo
*kernel_info;
register ssize_t
j;
kernel_info=AcquireKernelInfo(arg1,exception);
if (kernel_info == (KernelInfo *) NULL)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
gamma=0.0;
for (j=0; j < (ssize_t) (kernel_info->width*kernel_info->height); j++)
gamma+=kernel_info->values[j];
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
for (j=0; j < (ssize_t) (kernel_info->width*kernel_info->height); j++)
kernel_info->values[j]*=gamma;
new_image=MorphologyImage(_image,CorrelateMorphology,1,kernel_info,
_exception);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("crop",option+1) == 0)
{
/* WARNING: This can generate multiple images! */
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=CropImageToTiles(_image,arg1,_exception);
break;
}
if (LocaleCompare("cycle",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) CycleColormapImage(_image,(ssize_t) StringToLong(arg1),
_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'd':
{
if (LocaleCompare("decipher",option+1) == 0)
{
/* Note: arguments do not have percent escapes expanded */
StringInfo
*passkey;
passkey=FileToStringInfo(arg1,~0UL,_exception);
if (passkey == (StringInfo *) NULL)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) PasskeyDecipherImage(_image,passkey,_exception);
passkey=DestroyStringInfo(passkey);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
/* The _image_info->depth setting has already been set
We just need to apply it to all images in current sequence
WARNING: Depth from 8 to 16 causes 'quantum rounding to images!
That is it really is an operation, not a setting! Arrgghhh
FUTURE: this should not be an operator!!!
*/
(void) SetImageDepth(_image,_image_info->depth,_exception);
break;
}
if (LocaleCompare("deskew",option+1) == 0)
{
double
threshold;
if (IfNormalOp) {
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
threshold=StringToDoubleInterval(arg1,(double) QuantumRange+1.0);
}
else
threshold=40.0*QuantumRange/100.0;
new_image=DeskewImage(_image,threshold,_exception);
break;
}
if (LocaleCompare("despeckle",option+1) == 0)
{
new_image=DespeckleImage(_image,_exception);
break;
}
if (LocaleCompare("distort",option+1) == 0)
{
double
*args;
ssize_t
count;
parse = ParseCommandOption(MagickDistortOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedDistortMethod",
option,arg1);
if ((DistortMethod) parse == ResizeDistortion)
{
double
resize_args[2];
/* Special Case - Argument is actually a resize geometry!
** Convert that to an appropriate distortion argument array.
** FUTURE: make a separate special resize operator
Roll into a resize special operator */
if (IsGeometry(arg2) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidGeometry",
option,arg2);
(void) ParseRegionGeometry(_image,arg2,&geometry,_exception);
resize_args[0]=(double) geometry.width;
resize_args[1]=(double) geometry.height;
new_image=DistortImage(_image,(DistortMethod) parse,
(size_t)2,resize_args,MagickTrue,_exception);
break;
}
/* convert argument string into an array of doubles */
args = StringToArrayOfDoubles(arg2,&count,_exception);
if (args == (double *) NULL )
CLIWandExceptArgBreak(OptionError,"InvalidNumberList",option,arg2);
new_image=DistortImage(_image,(DistortMethod) parse,(size_t)
count,args,IsPlusOp,_exception);
args=(double *) RelinquishMagickMemory(args);
break;
}
if (LocaleCompare("draw",option+1) == 0)
{
(void) CloneString(&_draw_info->primitive,arg1);
(void) DrawImage(_image,_draw_info,_exception);
(void) CloneString(&_draw_info->primitive,(char *) NULL);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'e':
{
if (LocaleCompare("edge",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=EdgeImage(_image,geometry_info.rho,_exception);
break;
}
if (LocaleCompare("emboss",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=EmbossImage(_image,geometry_info.rho,
geometry_info.sigma,_exception);
break;
}
if (LocaleCompare("encipher",option+1) == 0)
{
/* Note: arguments do not have percent escapes expanded */
StringInfo
*passkey;
passkey=FileToStringInfo(arg1,~0UL,_exception);
if (passkey != (StringInfo *) NULL)
{
(void) PasskeyEncipherImage(_image,passkey,_exception);
passkey=DestroyStringInfo(passkey);
}
break;
}
if (LocaleCompare("enhance",option+1) == 0)
{
new_image=EnhanceImage(_image,_exception);
break;
}
if (LocaleCompare("equalize",option+1) == 0)
{
(void) EqualizeImage(_image,_exception);
break;
}
if (LocaleCompare("evaluate",option+1) == 0)
{
double
constant;
parse = ParseCommandOption(MagickEvaluateOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator",
option,arg1);
if (IsGeometry(arg2) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg2);
constant=StringToDoubleInterval(arg2,(double) QuantumRange+1.0);
(void) EvaluateImage(_image,(MagickEvaluateOperator)parse,constant,
_exception);
break;
}
if (LocaleCompare("extent",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParseGravityGeometry(_image,arg1,&geometry,_exception);
if (geometry.width == 0)
geometry.width=_image->columns;
if (geometry.height == 0)
geometry.height=_image->rows;
new_image=ExtentImage(_image,&geometry,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'f':
{
if (LocaleCompare("flip",option+1) == 0)
{
new_image=FlipImage(_image,_exception);
break;
}
if (LocaleCompare("flop",option+1) == 0)
{
new_image=FlopImage(_image,_exception);
break;
}
if (LocaleCompare("floodfill",option+1) == 0)
{
PixelInfo
target;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParsePageGeometry(_image,arg1,&geometry,_exception);
(void) QueryColorCompliance(arg2,AllCompliance,&target,_exception);
(void) FloodfillPaintImage(_image,_draw_info,&target,geometry.x,
geometry.y,IsPlusOp,_exception);
break;
}
if (LocaleCompare("frame",option+1) == 0)
{
FrameInfo
frame_info;
CompositeOperator
compose;
const char*
value;
value=GetImageOption(_image_info,"compose");
compose=OverCompositeOp; /* use Over not _image->compose */
if (value != (const char *) NULL)
compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,value);
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParsePageGeometry(_image,arg1,&geometry,_exception);
frame_info.width=geometry.width;
frame_info.height=geometry.height;
frame_info.outer_bevel=geometry.x;
frame_info.inner_bevel=geometry.y;
frame_info.x=(ssize_t) frame_info.width;
frame_info.y=(ssize_t) frame_info.height;
frame_info.width=_image->columns+2*frame_info.width;
frame_info.height=_image->rows+2*frame_info.height;
new_image=FrameImage(_image,&frame_info,compose,_exception);
break;
}
if (LocaleCompare("function",option+1) == 0)
{
double
*args;
ssize_t
count;
parse=ParseCommandOption(MagickFunctionOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedFunction",
option,arg1);
/* convert argument string into an array of doubles */
args = StringToArrayOfDoubles(arg2,&count,_exception);
if (args == (double *) NULL )
CLIWandExceptArgBreak(OptionError,"InvalidNumberList",option,arg2);
(void) FunctionImage(_image,(MagickFunction)parse,(size_t) count,args,
_exception);
args=(double *) RelinquishMagickMemory(args);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'g':
{
if (LocaleCompare("gamma",option+1) == 0)
{
double
constant;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
constant=StringToDouble(arg1,(char **) NULL);
#if 0
/* Using Gamma, via a cache */
if (IfPlusOp)
constant=PerceptibleReciprocal(constant);
(void) GammaImage(_image,constant,_exception);
#else
/* Using Evaluate POW, direct update of values - more accurite */
if (IfNormalOp)
constant=PerceptibleReciprocal(constant);
(void) EvaluateImage(_image,PowEvaluateOperator,constant,_exception);
_image->gamma*=StringToDouble(arg1,(char **) NULL);
#endif
/* Set gamma setting -- Old meaning of "+gamma"
* _image->gamma=StringToDouble(arg1,(char **) NULL);
*/
break;
}
if (LocaleCompare("gaussian-blur",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=GaussianBlurImage(_image,geometry_info.rho,
geometry_info.sigma,_exception);
break;
}
if (LocaleCompare("gaussian",option+1) == 0)
{
CLIWandWarnReplaced("-gaussian-blur");
(void) CLISimpleOperatorImage(cli_wand,"-gaussian-blur",arg1,NULL,exception);
}
if (LocaleCompare("geometry",option+1) == 0)
{
/*
Record Image offset for composition. (A Setting)
Resize last _image. (ListOperator) -- DEPRECIATE
FUTURE: Why if no 'offset' does this resize ALL images?
Also why is the setting recorded in the IMAGE non-sense!
*/
if (IfPlusOp)
{ /* remove the previous composition geometry offset! */
if (_image->geometry != (char *) NULL)
_image->geometry=DestroyString(_image->geometry);
break;
}
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParseRegionGeometry(_image,arg1,&geometry,_exception);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
(void) CloneString(&_image->geometry,arg1);
else
new_image=ResizeImage(_image,geometry.width,geometry.height,
_image->filter,_exception);
break;
}
if (LocaleCompare("grayscale",option+1) == 0)
{
parse=ParseCommandOption(MagickPixelIntensityOptions,
MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedIntensityMethod",
option,arg1);
(void) GrayscaleImage(_image,(PixelIntensityMethod) parse,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'h':
{
if (LocaleCompare("hough-lines",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
if ((flags & XiValue) == 0)
geometry_info.xi=40;
new_image=HoughLineImage(_image,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,(size_t) geometry_info.xi,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'i':
{
if (LocaleCompare("identify",option+1) == 0)
{
const char
*format,
*text;
format=GetImageOption(_image_info,"format");
if (format == (char *) NULL)
{
(void) IdentifyImage(_image,stdout,_image_info->verbose,
_exception);
break;
}
text=InterpretImageProperties(_image_info,_image,format,_exception);
if (text == (char *) NULL)
CLIWandExceptionBreak(OptionWarning,"InterpretPropertyFailure",
option);
(void) fputs(text,stdout);
text=DestroyString((char *)text);
break;
}
if (LocaleCompare("implode",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=ImplodeImage(_image,geometry_info.rho,_image->interpolate,
_exception);
break;
}
if (LocaleCompare("interpolative-resize",option+1) == 0)
{
/* FUTURE: New to IMv7
Roll into a resize special operator */
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseRegionGeometry(_image,arg1,&geometry,_exception);
new_image=InterpolativeResizeImage(_image,geometry.width,
geometry.height,_image->interpolate,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'k':
{
if (LocaleCompare("kuwahara",option+1) == 0)
{
/*
Edge preserving blur.
*/
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho-0.5;
new_image=KuwaharaImage(_image,geometry_info.rho,geometry_info.sigma,
_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'l':
{
if (LocaleCompare("lat",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
new_image=AdaptiveThresholdImage(_image,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,(double) geometry_info.xi,
_exception);
break;
}
if (LocaleCompare("level",option+1) == 0)
{
double
black_point,
gamma,
white_point;
MagickStatusType
flags;
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
black_point=geometry_info.rho;
white_point=(double) QuantumRange;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
gamma=1.0;
if ((flags & XiValue) != 0)
gamma=geometry_info.xi;
if ((flags & PercentValue) != 0)
{
black_point*=(double) (QuantumRange/100.0);
white_point*=(double) (QuantumRange/100.0);
}
if ((flags & SigmaValue) == 0)
white_point=(double) QuantumRange-black_point;
if (IfPlusOp || ((flags & AspectValue) != 0))
(void) LevelizeImage(_image,black_point,white_point,gamma,_exception);
else
(void) LevelImage(_image,black_point,white_point,gamma,_exception);
break;
}
if (LocaleCompare("level-colors",option+1) == 0)
{
char
token[MagickPathExtent];
const char
*p;
PixelInfo
black_point,
white_point;
p=(const char *) arg1;
GetNextToken(p,&p,MagickPathExtent,token); /* get black point color */
if ((isalpha((int) *token) != 0) || ((*token == '#') != 0))
(void) QueryColorCompliance(token,AllCompliance,
&black_point,_exception);
else
(void) QueryColorCompliance("#000000",AllCompliance,
&black_point,_exception);
if (isalpha((int) token[0]) || (token[0] == '#'))
GetNextToken(p,&p,MagickPathExtent,token);
if (*token == '\0')
white_point=black_point; /* set everything to that color */
else
{
if ((isalpha((int) *token) == 0) && ((*token == '#') == 0))
GetNextToken(p,&p,MagickPathExtent,token); /* Get white point color. */
if ((isalpha((int) *token) != 0) || ((*token == '#') != 0))
(void) QueryColorCompliance(token,AllCompliance,
&white_point,_exception);
else
(void) QueryColorCompliance("#ffffff",AllCompliance,
&white_point,_exception);
}
(void) LevelImageColors(_image,&black_point,&white_point,
IsPlusOp,_exception);
break;
}
if (LocaleCompare("linear-stretch",option+1) == 0)
{
double
black_point,
white_point;
MagickStatusType
flags;
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
black_point=geometry_info.rho;
white_point=(double) _image->columns*_image->rows;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
black_point*=(double) _image->columns*_image->rows/100.0;
white_point*=(double) _image->columns*_image->rows/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(double) _image->columns*_image->rows-
black_point;
(void) LinearStretchImage(_image,black_point,white_point,_exception);
break;
}
if (LocaleCompare("liquid-rescale",option+1) == 0)
{
/* FUTURE: Roll into a resize special operator */
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParseRegionGeometry(_image,arg1,&geometry,_exception);
if ((flags & XValue) == 0)
geometry.x=1;
if ((flags & YValue) == 0)
geometry.y=0;
new_image=LiquidRescaleImage(_image,geometry.width,
geometry.height,1.0*geometry.x,1.0*geometry.y,_exception);
break;
}
if (LocaleCompare("local-contrast",option+1) == 0)
{
MagickStatusType
flags;
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
geometry_info.rho=10;
if ((flags & SigmaValue) == 0)
geometry_info.sigma=12.5;
new_image=LocalContrastImage(_image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'm':
{
if (LocaleCompare("magnify",option+1) == 0)
{
new_image=MagnifyImage(_image,_exception);
break;
}
if (LocaleCompare("map",option+1) == 0)
{
CLIWandWarnReplaced("-remap");
(void) CLISimpleOperatorImage(cli_wand,"-remap",NULL,NULL,exception);
break;
}
if (LocaleCompare("mask",option+1) == 0)
{
Image
*mask;
if (IfPlusOp)
{
/*
Remove a mask.
*/
(void) SetImageMask(_image,WritePixelMask,(Image *) NULL,
_exception);
break;
}
/*
Set the image mask.
*/
mask=GetImageCache(_image_info,arg1,_exception);
if (mask == (Image *) NULL)
break;
(void) SetImageMask(_image,WritePixelMask,mask,_exception);
mask=DestroyImage(mask);
break;
}
if (LocaleCompare("matte",option+1) == 0)
{
CLIWandWarnReplaced(IfNormalOp?"-alpha Set":"-alpha Off");
(void) SetImageAlphaChannel(_image,IfNormalOp ? SetAlphaChannel :
DeactivateAlphaChannel, _exception);
break;
}
if (LocaleCompare("mean-shift",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.10*QuantumRange;
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
new_image=MeanShiftImage(_image,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,geometry_info.xi,_exception);
break;
}
if (LocaleCompare("median",option+1) == 0)
{
CLIWandWarnReplaced("-statistic Median");
(void) CLISimpleOperatorImage(cli_wand,"-statistic","Median",arg1,exception);
break;
}
if (LocaleCompare("mode",option+1) == 0)
{
/* FUTURE: note this is also a special "montage" option */
CLIWandWarnReplaced("-statistic Mode");
(void) CLISimpleOperatorImage(cli_wand,"-statistic","Mode",arg1,exception);
break;
}
if (LocaleCompare("modulate",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ModulateImage(_image,arg1,_exception);
break;
}
if (LocaleCompare("monitor",option+1) == 0)
{
(void) SetImageProgressMonitor(_image, IfNormalOp ? MonitorProgress :
(MagickProgressMonitor) NULL,(void *) NULL);
break;
}
if (LocaleCompare("monochrome",option+1) == 0)
{
(void) SetImageType(_image,BilevelType,_exception);
break;
}
if (LocaleCompare("morphology",option+1) == 0)
{
char
token[MagickPathExtent];
const char
*p;
KernelInfo
*kernel;
ssize_t
iterations;
p=arg1;
GetNextToken(p,&p,MagickPathExtent,token);
parse=ParseCommandOption(MagickMorphologyOptions,MagickFalse,token);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedFunction",option,
arg1);
iterations=1L;
GetNextToken(p,&p,MagickPathExtent,token);
if ((*p == ':') || (*p == ','))
GetNextToken(p,&p,MagickPathExtent,token);
if ((*p != '\0'))
iterations=(ssize_t) StringToLong(p);
kernel=AcquireKernelInfo(arg2,exception);
if (kernel == (KernelInfo *) NULL)
CLIWandExceptArgBreak(OptionError,"UnabletoParseKernel",option,arg2);
new_image=MorphologyImage(_image,(MorphologyMethod)parse,iterations,
kernel,_exception);
kernel=DestroyKernelInfo(kernel);
break;
}
if (LocaleCompare("motion-blur",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=MotionBlurImage(_image,geometry_info.rho,geometry_info.sigma,
geometry_info.xi,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'n':
{
if (LocaleCompare("negate",option+1) == 0)
{
(void) NegateImage(_image, IsPlusOp, _exception);
break;
}
if (LocaleCompare("noise",option+1) == 0)
{
double
attenuate;
const char*
value;
if (IfNormalOp)
{
CLIWandWarnReplaced("-statistic NonPeak");
(void) CLISimpleOperatorImage(cli_wand,"-statistic","NonPeak",arg1,exception);
break;
}
parse=ParseCommandOption(MagickNoiseOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedNoiseType",
option,arg1);
attenuate=1.0;
value=GetImageOption(_image_info,"attenuate");
if (value != (const char *) NULL)
attenuate=StringToDouble(value,(char **) NULL);
new_image=AddNoiseImage(_image,(NoiseType)parse,attenuate,
_exception);
break;
}
if (LocaleCompare("normalize",option+1) == 0)
{
(void) NormalizeImage(_image,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'o':
{
if (LocaleCompare("opaque",option+1) == 0)
{
PixelInfo
target;
(void) QueryColorCompliance(arg1,AllCompliance,&target,_exception);
(void) OpaquePaintImage(_image,&target,&_draw_info->fill,IsPlusOp,
_exception);
break;
}
if (LocaleCompare("ordered-dither",option+1) == 0)
{
(void) OrderedDitherImage(_image,arg1,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'p':
{
if (LocaleCompare("paint",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=OilPaintImage(_image,geometry_info.rho,geometry_info.sigma,
_exception);
break;
}
if (LocaleCompare("perceptible",option+1) == 0)
{
(void) PerceptibleImage(_image,StringToDouble(arg1,(char **) NULL),
_exception);
break;
}
if (LocaleCompare("polaroid",option+1) == 0)
{
const char
*caption;
double
angle;
if (IfPlusOp) {
RandomInfo
*random_info;
random_info=AcquireRandomInfo();
angle=22.5*(GetPseudoRandomValue(random_info)-0.5);
random_info=DestroyRandomInfo(random_info);
}
else {
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
angle=geometry_info.rho;
}
caption=GetImageProperty(_image,"caption",_exception);
new_image=PolaroidImage(_image,_draw_info,caption,angle,
_image->interpolate,_exception);
break;
}
if (LocaleCompare("posterize",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) PosterizeImage(_image,(size_t) geometry_info.rho,
_quantize_info->dither_method,_exception);
break;
}
if (LocaleCompare("preview",option+1) == 0)
{
/* FUTURE: should be a 'Genesis' option?
Option however is also in WandSettingOptionInfo()
Why???
*/
parse=ParseCommandOption(MagickPreviewOptions, MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedPreviewType",
option,arg1);
new_image=PreviewImage(_image,(PreviewType)parse,_exception);
break;
}
if (LocaleCompare("profile",option+1) == 0)
{
const char
*name;
const StringInfo
*profile;
Image
*profile_image;
ImageInfo
*profile_info;
/* Note: arguments do not have percent escapes expanded */
if (IfPlusOp)
{ /* Remove a profile from the _image. */
(void) ProfileImage(_image,arg1,(const unsigned char *)
NULL,0,_exception);
break;
}
/* Associate a profile with the _image. */
profile_info=CloneImageInfo(_image_info);
profile=GetImageProfile(_image,"iptc");
if (profile != (StringInfo *) NULL)
profile_info->profile=(void *) CloneStringInfo(profile);
profile_image=GetImageCache(profile_info,arg1,_exception);
profile_info=DestroyImageInfo(profile_info);
if (profile_image == (Image *) NULL)
{
StringInfo
*profile;
profile_info=CloneImageInfo(_image_info);
(void) CopyMagickString(profile_info->filename,arg1,
MagickPathExtent);
profile=FileToStringInfo(profile_info->filename,~0UL,_exception);
if (profile != (StringInfo *) NULL)
{
(void) SetImageInfo(profile_info,0,_exception);
(void) ProfileImage(_image,profile_info->magick,
GetStringInfoDatum(profile),(size_t)
GetStringInfoLength(profile),_exception);
profile=DestroyStringInfo(profile);
}
profile_info=DestroyImageInfo(profile_info);
break;
}
ResetImageProfileIterator(profile_image);
name=GetNextImageProfile(profile_image);
while (name != (const char *) NULL)
{
profile=GetImageProfile(profile_image,name);
if (profile != (StringInfo *) NULL)
(void) ProfileImage(_image,name,GetStringInfoDatum(profile),
(size_t) GetStringInfoLength(profile),_exception);
name=GetNextImageProfile(profile_image);
}
profile_image=DestroyImage(profile_image);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'r':
{
if (LocaleCompare("raise",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParsePageGeometry(_image,arg1,&geometry,_exception);
(void) RaiseImage(_image,&geometry,IsNormalOp,_exception);
break;
}
if (LocaleCompare("random-threshold",option+1) == 0)
{
double
min_threshold,
max_threshold;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
min_threshold=0.0;
max_threshold=(double) QuantumRange;
flags=ParseGeometry(arg1,&geometry_info);
min_threshold=geometry_info.rho;
max_threshold=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
max_threshold=min_threshold;
if (strchr(arg1,'%') != (char *) NULL)
{
max_threshold*=(double) (0.01*QuantumRange);
min_threshold*=(double) (0.01*QuantumRange);
}
(void) RandomThresholdImage(_image,min_threshold,max_threshold,
_exception);
break;
}
if (LocaleCompare("range-threshold",option+1) == 0)
{
/*
Range threshold image.
*/
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
if ((flags & XiValue) == 0)
geometry_info.xi=geometry_info.sigma;
if ((flags & PsiValue) == 0)
geometry_info.psi=geometry_info.xi;
if (strchr(arg1,'%') != (char *) NULL)
{
geometry_info.rho*=(double) (0.01*QuantumRange);
geometry_info.sigma*=(double) (0.01*QuantumRange);
geometry_info.xi*=(double) (0.01*QuantumRange);
geometry_info.psi*=(double) (0.01*QuantumRange);
}
(void) RangeThresholdImage(_image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception);
break;
}
if (LocaleCompare("read-mask",option+1) == 0)
{
/* Note: arguments do not have percent escapes expanded */
Image
*mask;
if (IfPlusOp)
{ /* Remove a mask. */
(void) SetImageMask(_image,ReadPixelMask,(Image *) NULL,
_exception);
break;
}
/* Set the image mask. */
mask=GetImageCache(_image_info,arg1,_exception);
if (mask == (Image *) NULL)
break;
(void) SetImageMask(_image,ReadPixelMask,mask,_exception);
mask=DestroyImage(mask);
break;
}
if (LocaleCompare("recolor",option+1) == 0)
{
CLIWandWarnReplaced("-color-matrix");
(void) CLISimpleOperatorImage(cli_wand,"-color-matrix",arg1,NULL,
exception);
}
if (LocaleCompare("region",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageRegionMask(_image,WritePixelMask,
(const RectangleInfo *) NULL,_exception);
break;
}
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseGravityGeometry(_image,arg1,&geometry,_exception);
(void) SetImageRegionMask(_image,WritePixelMask,&geometry,_exception);
break;
}
if (LocaleCompare("remap",option+1) == 0)
{
/* Note: arguments do not have percent escapes expanded */
Image
*remap_image;
remap_image=GetImageCache(_image_info,arg1,_exception);
if (remap_image == (Image *) NULL)
break;
(void) RemapImage(_quantize_info,_image,remap_image,_exception);
remap_image=DestroyImage(remap_image);
break;
}
if (LocaleCompare("repage",option+1) == 0)
{
if (IfNormalOp)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,
arg1);
(void) ResetImagePage(_image,arg1);
}
else
(void) ParseAbsoluteGeometry("0x0+0+0",&_image->page);
break;
}
if (LocaleCompare("resample",option+1) == 0)
{
/* FUTURE: Roll into a resize special operation */
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
new_image=ResampleImage(_image,geometry_info.rho,
geometry_info.sigma,_image->filter,_exception);
break;
}
if (LocaleCompare("resize",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseRegionGeometry(_image,arg1,&geometry,_exception);
new_image=ResizeImage(_image,geometry.width,geometry.height,
_image->filter,_exception);
break;
}
if (LocaleCompare("roll",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParsePageGeometry(_image,arg1,&geometry,_exception);
if ((flags & PercentValue) != 0)
{
geometry.x*=(double) _image->columns/100.0;
geometry.y*=(double) _image->rows/100.0;
}
new_image=RollImage(_image,geometry.x,geometry.y,_exception);
break;
}
if (LocaleCompare("rotate",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & GreaterValue) != 0 && (_image->columns <= _image->rows))
break;
if ((flags & LessValue) != 0 && (_image->columns >= _image->rows))
break;
new_image=RotateImage(_image,geometry_info.rho,_exception);
break;
}
if (LocaleCompare("rotational-blur",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=RotationalBlurImage(_image,geometry_info.rho,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 's':
{
if (LocaleCompare("sample",option+1) == 0)
{
/* FUTURE: Roll into a resize special operator */
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseRegionGeometry(_image,arg1,&geometry,_exception);
new_image=SampleImage(_image,geometry.width,geometry.height,
_exception);
break;
}
if (LocaleCompare("scale",option+1) == 0)
{
/* FUTURE: Roll into a resize special operator */
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseRegionGeometry(_image,arg1,&geometry,_exception);
new_image=ScaleImage(_image,geometry.width,geometry.height,
_exception);
break;
}
if (LocaleCompare("segment",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
(void) SegmentImage(_image,_image->colorspace,
_image_info->verbose,geometry_info.rho,geometry_info.sigma,
_exception);
break;
}
if (LocaleCompare("selective-blur",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
new_image=SelectiveBlurImage(_image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,_exception);
break;
}
if (LocaleCompare("separate",option+1) == 0)
{
/* WARNING: This can generate multiple images! */
/* FUTURE - this may be replaced by a "-channel" method */
new_image=SeparateImages(_image,_exception);
break;
}
if (LocaleCompare("sepia-tone",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=SepiaToneImage(_image,StringToDoubleInterval(arg1,
(double) QuantumRange+1.0),_exception);
break;
}
if (LocaleCompare("shade",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if (((flags & RhoValue) == 0) || ((flags & SigmaValue) == 0))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=ShadeImage(_image,IsNormalOp,geometry_info.rho,
geometry_info.sigma,_exception);
break;
}
if (LocaleCompare("shadow",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=4.0;
if ((flags & PsiValue) == 0)
geometry_info.psi=4.0;
new_image=ShadowImage(_image,geometry_info.rho,geometry_info.sigma,
(ssize_t) ceil(geometry_info.xi-0.5),(ssize_t)
ceil(geometry_info.psi-0.5),_exception);
break;
}
if (LocaleCompare("sharpen",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.0;
new_image=SharpenImage(_image,geometry_info.rho,geometry_info.sigma,
_exception);
break;
}
if (LocaleCompare("shave",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParsePageGeometry(_image,arg1,&geometry,_exception);
new_image=ShaveImage(_image,&geometry,_exception);
break;
}
if (LocaleCompare("shear",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
new_image=ShearImage(_image,geometry_info.rho,geometry_info.sigma,
_exception);
break;
}
if (LocaleCompare("sigmoidal-contrast",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=(double) QuantumRange/2.0;
if ((flags & PercentValue) != 0)
geometry_info.sigma=(double) QuantumRange*geometry_info.sigma/
100.0;
(void) SigmoidalContrastImage(_image,IsNormalOp,geometry_info.rho,
geometry_info.sigma,_exception);
break;
}
if (LocaleCompare("sketch",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=SketchImage(_image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,_exception);
break;
}
if (LocaleCompare("solarize",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) SolarizeImage(_image,StringToDoubleInterval(arg1,(double)
QuantumRange+1.0),_exception);
break;
}
if (LocaleCompare("sparse-color",option+1) == 0)
{
parse= ParseCommandOption(MagickSparseColorOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedSparseColorMethod",
option,arg1);
new_image=SparseColorOption(_image,(SparseColorMethod)parse,arg2,
_exception);
break;
}
if (LocaleCompare("splice",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
flags=ParseGravityGeometry(_image,arg1,&geometry,_exception);
new_image=SpliceImage(_image,&geometry,_exception);
break;
}
if (LocaleCompare("spread",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg2);
new_image=SpreadImage(_image,_image->interpolate,geometry_info.rho,
_exception);
break;
}
if (LocaleCompare("statistic",option+1) == 0)
{
parse=ParseCommandOption(MagickStatisticOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedStatisticType",
option,arg1);
flags=ParseGeometry(arg2,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg2);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
new_image=StatisticImage(_image,(StatisticType)parse,
(size_t) geometry_info.rho,(size_t) geometry_info.sigma,
_exception);
break;
}
if (LocaleCompare("strip",option+1) == 0)
{
(void) StripImage(_image,_exception);
break;
}
if (LocaleCompare("swirl",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=SwirlImage(_image,geometry_info.rho,
_image->interpolate,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 't':
{
if (LocaleCompare("threshold",option+1) == 0)
{
double
threshold;
threshold=(double) QuantumRange/2;
if (IfNormalOp) {
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
threshold=StringToDoubleInterval(arg1,(double) QuantumRange+1.0);
}
(void) BilevelImage(_image,threshold,_exception);
break;
}
if (LocaleCompare("thumbnail",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParseRegionGeometry(_image,arg1,&geometry,_exception);
new_image=ThumbnailImage(_image,geometry.width,geometry.height,
_exception);
break;
}
if (LocaleCompare("tint",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
new_image=TintImage(_image,arg1,&_draw_info->fill,_exception);
break;
}
if (LocaleCompare("transform",option+1) == 0)
{
CLIWandWarnReplaced("+distort AffineProjection");
new_image=AffineTransformImage(_image,&_draw_info->affine,_exception);
break;
}
if (LocaleCompare("transparent",option+1) == 0)
{
PixelInfo
target;
(void) QueryColorCompliance(arg1,AllCompliance,&target,_exception);
(void) TransparentPaintImage(_image,&target,(Quantum)
TransparentAlpha,IsPlusOp,_exception);
break;
}
if (LocaleCompare("transpose",option+1) == 0)
{
new_image=TransposeImage(_image,_exception);
break;
}
if (LocaleCompare("transverse",option+1) == 0)
{
new_image=TransverseImage(_image,_exception);
break;
}
if (LocaleCompare("trim",option+1) == 0)
{
new_image=TrimImage(_image,_exception);
break;
}
if (LocaleCompare("type",option+1) == 0)
{
/* Note that "type" setting should have already been defined */
(void) SetImageType(_image,_image_info->type,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'u':
{
if (LocaleCompare("unique",option+1) == 0)
{
/* FUTURE: move to SyncImageSettings() and AcqireImage()???
Option is not documented, bt appears to be for "identify".
We may need a identify specific verbose!
*/
if (IsPlusOp) {
(void) DeleteImageArtifact(_image,"identify:unique-colors");
break;
}
(void) SetImageArtifact(_image,"identify:unique-colors","true");
(void) SetImageArtifact(_image,"verbose","true");
break;
}
if (LocaleCompare("unique-colors",option+1) == 0)
{
new_image=UniqueImageColors(_image,_exception);
break;
}
if (LocaleCompare("unsharp",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=1.0;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.05;
new_image=UnsharpMaskImage(_image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,geometry_info.psi,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
/* FUTURE: move to SyncImageSettings() and AcquireImage()???
three places! ImageArtifact ImageOption _image_info->verbose
Some how new images also get this artifact!
*/
(void) SetImageArtifact(_image,option+1,
IfNormalOp ? "true" : "false" );
break;
}
if (LocaleCompare("vignette",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.1*_image->columns;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.1*_image->rows;
if ((flags & PercentValue) != 0)
{
geometry_info.xi*=(double) _image->columns/100.0;
geometry_info.psi*=(double) _image->rows/100.0;
}
new_image=VignetteImage(_image,geometry_info.rho,geometry_info.sigma,
(ssize_t) ceil(geometry_info.xi-0.5),(ssize_t)
ceil(geometry_info.psi-0.5),_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'w':
{
if (LocaleCompare("wave",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & (RhoValue|SigmaValue)) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
new_image=WaveImage(_image,geometry_info.rho,geometry_info.sigma,
_image->interpolate,_exception);
break;
}
if (LocaleCompare("wavelet-denoise",option+1) == 0)
{
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if ((flags & PercentValue) != 0)
{
geometry_info.rho=QuantumRange*geometry_info.rho/100.0;
geometry_info.sigma=QuantumRange*geometry_info.sigma/100.0;
}
if ((flags & SigmaValue) == 0)
geometry_info.sigma=0.0;
new_image=WaveletDenoiseImage(_image,geometry_info.rho,
geometry_info.sigma,_exception);
break;
}
if (LocaleCompare("white-threshold",option+1) == 0)
{
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) WhiteThresholdImage(_image,arg1,_exception);
break;
}
if (LocaleCompare("write-mask",option+1) == 0)
{
/* Note: arguments do not have percent escapes expanded */
Image
*mask;
if (IfPlusOp)
{ /* Remove a mask. */
(void) SetImageMask(_image,WritePixelMask,(Image *) NULL,
_exception);
break;
}
/* Set the image mask. */
mask=GetImageCache(_image_info,arg1,_exception);
if (mask == (Image *) NULL)
break;
(void) SetImageMask(_image,WritePixelMask,mask,_exception);
mask=DestroyImage(mask);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
default:
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
/* clean up percent escape interpreted strings */
if (arg1 != arg1n )
arg1=DestroyString((char *)arg1);
if (arg2 != arg2n )
arg2=DestroyString((char *)arg2);
/* Replace current image with any image that was generated
and set image point to last image (so image->next is correct) */
if (new_image != (Image *) NULL)
ReplaceImageInListReturnLast(&_image,new_image);
return(MagickTrue);
#undef _image_info
#undef _draw_info
#undef _quantize_info
#undef _image
#undef _exception
#undef IfNormalOp
#undef IfPlusOp
#undef IsNormalOp
#undef IsPlusOp
}
WandPrivate MagickBooleanType CLISimpleOperatorImages(MagickCLI *cli_wand,
const char *option,const char *arg1,const char *arg2,ExceptionInfo *exception)
{
#if !USE_WAND_METHODS
size_t
n,
i;
#endif
assert(cli_wand != (MagickCLI *) NULL);
assert(cli_wand->signature == MagickWandSignature);
assert(cli_wand->wand.signature == MagickWandSignature);
assert(cli_wand->wand.images != (Image *) NULL); /* images must be present */
if (cli_wand->wand.debug != MagickFalse)
(void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(),
"- Simple Operator: %s \"%s\" \"%s\"", option,arg1,arg2);
#if !USE_WAND_METHODS
/* FUTURE add appropriate tracing */
i=0;
n=GetImageListLength(cli_wand->wand.images);
cli_wand->wand.images=GetFirstImageInList(cli_wand->wand.images);
while (1) {
i++;
CLISimpleOperatorImage(cli_wand, option, arg1, arg2,exception);
if ( cli_wand->wand.images->next == (Image *) NULL )
break;
cli_wand->wand.images=cli_wand->wand.images->next;
}
assert( i == n );
cli_wand->wand.images=GetFirstImageInList(cli_wand->wand.images);
#else
MagickResetIterator(&cli_wand->wand);
while (MagickNextImage(&cli_wand->wand) != MagickFalse)
(void) CLISimpleOperatorImage(cli_wand, option, arg1, arg2,exception);
MagickResetIterator(&cli_wand->wand);
#endif
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C L I L i s t O p e r a t o r I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLIListOperatorImages() applies a single operation that is apply to the
% entire image list as a whole. The result is often a complete replacment
% of the image list with a completely new list, or with just a single image
% result.
%
% The format of the MogrifyImage method is:
%
% MagickBooleanType CLIListOperatorImages(MagickCLI *cli_wand,
% const char *option,const char *arg1,const char *arg2)
%
% A description of each parameter follows:
%
% o cli_wand: structure holding settings to be applied
%
% o option: The option string for the operation
%
% o arg1, arg2: optional argument strings to the operation
% arg2 is currently not used
%
*/
WandPrivate MagickBooleanType CLIListOperatorImages(MagickCLI *cli_wand,
const char *option,const char *arg1n,const char *arg2n)
{
const char /* percent escaped versions of the args */
*arg1,
*arg2;
Image
*new_images;
MagickStatusType
status;
ssize_t
parse;
#define _image_info (cli_wand->wand.image_info)
#define _images (cli_wand->wand.images)
#define _exception (cli_wand->wand.exception)
#define _draw_info (cli_wand->draw_info)
#define _quantize_info (cli_wand->quantize_info)
#define _process_flags (cli_wand->process_flags)
#define _option_type ((CommandOptionFlags) cli_wand->command->flags)
#define IfNormalOp (*option=='-')
#define IfPlusOp (*option!='-')
#define IsNormalOp IfNormalOp ? MagickTrue : MagickFalse
assert(cli_wand != (MagickCLI *) NULL);
assert(cli_wand->signature == MagickWandSignature);
assert(cli_wand->wand.signature == MagickWandSignature);
assert(_images != (Image *) NULL); /* _images must be present */
if (cli_wand->wand.debug != MagickFalse)
(void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(),
"- List Operator: %s \"%s\" \"%s\"", option,
arg1n == (const char *) NULL ? "null" : arg1n,
arg2n == (const char *) NULL ? "null" : arg2n);
arg1 = arg1n;
arg2 = arg2n;
/* Interpret Percent Escapes in Arguments - using first image */
if ( (((_process_flags & ProcessInterpretProperities) != 0 )
|| ((_option_type & AlwaysInterpretArgsFlag) != 0)
) && ((_option_type & NeverInterpretArgsFlag) == 0) ) {
/* Interpret Percent escapes in argument 1 */
if (arg1n != (char *) NULL) {
arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception);
if (arg1 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg1=arg1n; /* use the given argument as is */
}
}
if (arg2n != (char *) NULL) {
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg2=arg2n; /* use the given argument as is */
}
}
}
#undef _process_flags
#undef _option_type
status=MagickTrue;
new_images=NewImageList();
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("append",option+1) == 0)
{
new_images=AppendImages(_images,IsNormalOp,_exception);
break;
}
if (LocaleCompare("average",option+1) == 0)
{
CLIWandWarnReplaced("-evaluate-sequence Mean");
(void) CLIListOperatorImages(cli_wand,"-evaluate-sequence","Mean",
NULL);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'c':
{
if (LocaleCompare("channel-fx",option+1) == 0)
{
new_images=ChannelFxImage(_images,arg1,_exception);
break;
}
if (LocaleCompare("clut",option+1) == 0)
{
Image
*clut_image;
/* FUTURE - make this a compose option, and thus can be used
with layers compose or even compose last image over all other
_images.
*/
new_images=RemoveFirstImageFromList(&_images);
clut_image=RemoveLastImageFromList(&_images);
/* FUTURE - produce Exception, rather than silent fail */
if (clut_image == (Image *) NULL)
break;
(void) ClutImage(new_images,clut_image,new_images->interpolate,
_exception);
clut_image=DestroyImage(clut_image);
break;
}
if (LocaleCompare("coalesce",option+1) == 0)
{
new_images=CoalesceImages(_images,_exception);
break;
}
if (LocaleCompare("combine",option+1) == 0)
{
parse=(ssize_t) _images->colorspace;
if (_images->number_channels < GetImageListLength(_images))
parse=sRGBColorspace;
if ( IfPlusOp )
parse=ParseCommandOption(MagickColorspaceOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedColorspace",option,
arg1);
new_images=CombineImages(_images,(ColorspaceType) parse,_exception);
break;
}
if (LocaleCompare("compare",option+1) == 0)
{
double
distortion;
Image
*image,
*reconstruct_image;
MetricType
metric;
/*
Mathematically and visually annotate the difference between an
image and its reconstruction.
*/
image=RemoveFirstImageFromList(&_images);
reconstruct_image=RemoveFirstImageFromList(&_images);
/* FUTURE - produce Exception, rather than silent fail */
if (reconstruct_image == (Image *) NULL)
{
image=DestroyImage(image);
break;
}
metric=UndefinedErrorMetric;
option=GetImageOption(_image_info,"metric");
if (option != (const char *) NULL)
metric=(MetricType) ParseCommandOption(MagickMetricOptions,
MagickFalse,option);
new_images=CompareImages(image,reconstruct_image,metric,&distortion,
_exception);
(void) distortion;
reconstruct_image=DestroyImage(reconstruct_image);
image=DestroyImage(image);
break;
}
if (LocaleCompare("complex",option+1) == 0)
{
parse=ParseCommandOption(MagickComplexOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator",
option,arg1);
new_images=ComplexImages(_images,(ComplexOperator) parse,_exception);
break;
}
if (LocaleCompare("composite",option+1) == 0)
{
CompositeOperator
compose;
const char*
value;
MagickBooleanType
clip_to_self;
Image
*mask_image,
*source_image;
RectangleInfo
geometry;
/* Compose value from "-compose" option only */
value=GetImageOption(_image_info,"compose");
if (value == (const char *) NULL)
compose=OverCompositeOp; /* use Over not source_image->compose */
else
compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,value);
/* Get "clip-to-self" expert setting (false is normal) */
clip_to_self=GetCompositeClipToSelf(compose);
value=GetImageOption(_image_info,"compose:clip-to-self");
if (value != (const char *) NULL)
clip_to_self=IsStringTrue(value);
value=GetImageOption(_image_info,"compose:outside-overlay");
if (value != (const char *) NULL)
clip_to_self=IsStringFalse(value); /* deprecated */
new_images=RemoveFirstImageFromList(&_images);
source_image=RemoveFirstImageFromList(&_images);
if (source_image == (Image *) NULL)
break; /* FUTURE - produce Exception, rather than silent fail */
/* FUTURE - this should not be here! - should be part of -geometry */
if (source_image->geometry != (char *) NULL)
{
RectangleInfo
resize_geometry;
(void) ParseRegionGeometry(source_image,source_image->geometry,
&resize_geometry,_exception);
if ((source_image->columns != resize_geometry.width) ||
(source_image->rows != resize_geometry.height))
{
Image
*resize_image;
resize_image=ResizeImage(source_image,resize_geometry.width,
resize_geometry.height,source_image->filter,_exception);
if (resize_image != (Image *) NULL)
{
source_image=DestroyImage(source_image);
source_image=resize_image;
}
}
}
SetGeometry(source_image,&geometry);
(void) ParseAbsoluteGeometry(source_image->geometry,&geometry);
GravityAdjustGeometry(new_images->columns,new_images->rows,
new_images->gravity, &geometry);
mask_image=RemoveFirstImageFromList(&_images);
if (mask_image == (Image *) NULL)
status&=CompositeImage(new_images,source_image,compose,clip_to_self,
geometry.x,geometry.y,_exception);
else
{
if ((compose == DisplaceCompositeOp) ||
(compose == DistortCompositeOp))
{
status&=CompositeImage(source_image,mask_image,
CopyGreenCompositeOp,MagickTrue,0,0,_exception);
status&=CompositeImage(new_images,source_image,compose,
clip_to_self,geometry.x,geometry.y,_exception);
}
else
{
Image
*clone_image;
clone_image=CloneImage(new_images,0,0,MagickTrue,_exception);
if (clone_image == (Image *) NULL)
break;
status&=CompositeImage(new_images,source_image,compose,
clip_to_self,geometry.x,geometry.y,_exception);
status&=CompositeImage(new_images,mask_image,
CopyAlphaCompositeOp,MagickTrue,0,0,_exception);
status&=CompositeImage(clone_image,new_images,OverCompositeOp,
clip_to_self,0,0,_exception);
new_images=DestroyImageList(new_images);
new_images=clone_image;
}
mask_image=DestroyImage(mask_image);
}
source_image=DestroyImage(source_image);
break;
}
if (LocaleCompare("copy",option+1) == 0)
{
Image
*source_image;
OffsetInfo
offset;
RectangleInfo
geometry;
/*
Copy image pixels.
*/
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
if (IsGeometry(arg2) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
(void) ParsePageGeometry(_images,arg2,&geometry,_exception);
offset.x=geometry.x;
offset.y=geometry.y;
source_image=_images;
if (source_image->next != (Image *) NULL)
source_image=source_image->next;
(void) ParsePageGeometry(source_image,arg1,&geometry,_exception);
(void) CopyImagePixels(_images,source_image,&geometry,&offset,
_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'd':
{
if (LocaleCompare("deconstruct",option+1) == 0)
{
CLIWandWarnReplaced("-layer CompareAny");
(void) CLIListOperatorImages(cli_wand,"-layer","CompareAny",NULL);
break;
}
if (LocaleCompare("delete",option+1) == 0)
{
if (IfNormalOp)
DeleteImages(&_images,arg1,_exception);
else
DeleteImages(&_images,"-1",_exception);
break;
}
if (LocaleCompare("duplicate",option+1) == 0)
{
if (IfNormalOp)
{
const char
*p;
size_t
number_duplicates;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,
arg1);
number_duplicates=(size_t) StringToLong(arg1);
p=strchr(arg1,',');
if (p == (const char *) NULL)
new_images=DuplicateImages(_images,number_duplicates,"-1",
_exception);
else
new_images=DuplicateImages(_images,number_duplicates,p,
_exception);
}
else
new_images=DuplicateImages(_images,1,"-1",_exception);
AppendImageToList(&_images, new_images);
new_images=(Image *) NULL;
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'e':
{
if (LocaleCompare("evaluate-sequence",option+1) == 0)
{
parse=ParseCommandOption(MagickEvaluateOptions,MagickFalse,arg1);
if (parse < 0)
CLIWandExceptArgBreak(OptionError,"UnrecognizedEvaluateOperator",
option,arg1);
new_images=EvaluateImages(_images,(MagickEvaluateOperator) parse,
_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'f':
{
if (LocaleCompare("fft",option+1) == 0)
{
new_images=ForwardFourierTransformImage(_images,IsNormalOp,
_exception);
break;
}
if (LocaleCompare("flatten",option+1) == 0)
{
/* REDIRECTED to use -layers flatten instead */
(void) CLIListOperatorImages(cli_wand,"-layers",option+1,NULL);
break;
}
if (LocaleCompare("fx",option+1) == 0)
{
new_images=FxImage(_images,arg1,_exception);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'h':
{
if (LocaleCompare("hald-clut",option+1) == 0)
{
/* FUTURE - make this a compose option (and thus layers compose )
or perhaps compose last image over all other _images.
*/
Image
*hald_image;
new_images=RemoveFirstImageFromList(&_images);
hald_image=RemoveLastImageFromList(&_images);
if (hald_image == (Image *) NULL)
break;
(void) HaldClutImage(new_images,hald_image,_exception);
hald_image=DestroyImage(hald_image);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'i':
{
if (LocaleCompare("ift",option+1) == 0)
{
Image
*magnitude_image,
*phase_image;
magnitude_image=RemoveFirstImageFromList(&_images);
phase_image=RemoveFirstImageFromList(&_images);
/* FUTURE - produce Exception, rather than silent fail */
if (phase_image == (Image *) NULL)
break;
new_images=InverseFourierTransformImage(magnitude_image,phase_image,
IsNormalOp,_exception);
magnitude_image=DestroyImage(magnitude_image);
phase_image=DestroyImage(phase_image);
break;
}
if (LocaleCompare("insert",option+1) == 0)
{
Image
*insert_image,
*index_image;
ssize_t
index;
if (IfNormalOp && (IsGeometry(arg1) == MagickFalse))
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
index=0;
insert_image=RemoveLastImageFromList(&_images);
if (IfNormalOp)
index=(ssize_t) StringToLong(arg1);
index_image=insert_image;
if (index == 0)
PrependImageToList(&_images,insert_image);
else if (index == (ssize_t) GetImageListLength(_images))
AppendImageToList(&_images,insert_image);
else
{
index_image=GetImageFromList(_images,index-1);
if (index_image == (Image *) NULL)
{
insert_image=DestroyImage(insert_image);
CLIWandExceptArgBreak(OptionError,"NoSuchImage",option,arg1);
}
InsertImageInList(&index_image,insert_image);
}
_images=GetFirstImageInList(index_image);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'l':
{
if (LocaleCompare("layers",option+1) == 0)
{
parse=ParseCommandOption(MagickLayerOptions,MagickFalse,arg1);
if ( parse < 0 )
CLIWandExceptArgBreak(OptionError,"UnrecognizedLayerMethod",
option,arg1);
switch ((LayerMethod) parse)
{
case CoalesceLayer:
{
new_images=CoalesceImages(_images,_exception);
break;
}
case CompareAnyLayer:
case CompareClearLayer:
case CompareOverlayLayer:
default:
{
new_images=CompareImagesLayers(_images,(LayerMethod) parse,
_exception);
break;
}
case MergeLayer:
case FlattenLayer:
case MosaicLayer:
case TrimBoundsLayer:
{
new_images=MergeImageLayers(_images,(LayerMethod) parse,
_exception);
break;
}
case DisposeLayer:
{
new_images=DisposeImages(_images,_exception);
break;
}
case OptimizeImageLayer:
{
new_images=OptimizeImageLayers(_images,_exception);
break;
}
case OptimizePlusLayer:
{
new_images=OptimizePlusImageLayers(_images,_exception);
break;
}
case OptimizeTransLayer:
{
OptimizeImageTransparency(_images,_exception);
break;
}
case RemoveDupsLayer:
{
RemoveDuplicateLayers(&_images,_exception);
break;
}
case RemoveZeroLayer:
{
RemoveZeroDelayLayers(&_images,_exception);
break;
}
case OptimizeLayer:
{ /* General Purpose, GIF Animation Optimizer. */
new_images=CoalesceImages(_images,_exception);
if (new_images == (Image *) NULL)
break;
_images=DestroyImageList(_images);
_images=OptimizeImageLayers(new_images,_exception);
if (_images == (Image *) NULL)
break;
new_images=DestroyImageList(new_images);
OptimizeImageTransparency(_images,_exception);
(void) RemapImages(_quantize_info,_images,(Image *) NULL,
_exception);
break;
}
case CompositeLayer:
{
Image
*source;
RectangleInfo
geometry;
CompositeOperator
compose;
const char*
value;
value=GetImageOption(_image_info,"compose");
compose=OverCompositeOp; /* Default to Over */
if (value != (const char *) NULL)
compose=(CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,value);
/* Split image sequence at the first 'NULL:' image. */
source=_images;
while (source != (Image *) NULL)
{
source=GetNextImageInList(source);
if ((source != (Image *) NULL) &&
(LocaleCompare(source->magick,"NULL") == 0))
break;
}
if (source != (Image *) NULL)
{
if ((GetPreviousImageInList(source) == (Image *) NULL) ||
(GetNextImageInList(source) == (Image *) NULL))
source=(Image *) NULL;
else
{ /* Separate the two lists, junk the null: image. */
source=SplitImageList(source->previous);
DeleteImageFromList(&source);
}
}
if (source == (Image *) NULL)
{
(void) ThrowMagickException(_exception,GetMagickModule(),
OptionError,"MissingNullSeparator","layers Composite");
break;
}
/* Adjust offset with gravity and virtual canvas. */
SetGeometry(_images,&geometry);
(void) ParseAbsoluteGeometry(_images->geometry,&geometry);
geometry.width=source->page.width != 0 ?
source->page.width : source->columns;
geometry.height=source->page.height != 0 ?
source->page.height : source->rows;
GravityAdjustGeometry(_images->page.width != 0 ?
_images->page.width : _images->columns,
_images->page.height != 0 ? _images->page.height :
_images->rows,_images->gravity,&geometry);
/* Compose the two image sequences together */
CompositeLayers(_images,compose,source,geometry.x,geometry.y,
_exception);
source=DestroyImageList(source);
break;
}
}
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'm':
{
if (LocaleCompare("map",option+1) == 0)
{
CLIWandWarnReplaced("+remap");
(void) RemapImages(_quantize_info,_images,(Image *) NULL,_exception);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
(void) SetImageOption(_image_info,option+1,arg1);
break;
}
if (LocaleCompare("morph",option+1) == 0)
{
Image
*morph_image;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
morph_image=MorphImages(_images,StringToUnsignedLong(arg1),
_exception);
if (morph_image == (Image *) NULL)
break;
_images=DestroyImageList(_images);
_images=morph_image;
break;
}
if (LocaleCompare("mosaic",option+1) == 0)
{
/* REDIRECTED to use -layers mosaic instead */
(void) CLIListOperatorImages(cli_wand,"-layers",option+1,NULL);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'p':
{
if (LocaleCompare("poly",option+1) == 0)
{
double
*args;
ssize_t
count;
/* convert argument string into an array of doubles */
args = StringToArrayOfDoubles(arg1,&count,_exception);
if (args == (double *) NULL )
CLIWandExceptArgBreak(OptionError,"InvalidNumberList",option,arg1);
new_images=PolynomialImage(_images,(size_t) (count >> 1),args,
_exception);
args=(double *) RelinquishMagickMemory(args);
break;
}
if (LocaleCompare("process",option+1) == 0)
{
/* FUTURE: better parsing using ScriptToken() from string ??? */
char
**arguments;
int
j,
number_arguments;
arguments=StringToArgv(arg1,&number_arguments);
if (arguments == (char **) NULL)
break;
if (strchr(arguments[1],'=') != (char *) NULL)
{
char
breaker,
quote,
*token;
const char
*arguments;
int
next,
status;
size_t
length;
TokenInfo
*token_info;
/*
Support old style syntax, filter="-option arg1".
*/
assert(arg1 != (const char *) NULL);
length=strlen(arg1);
token=(char *) NULL;
if (~length >= (MagickPathExtent-1))
token=(char *) AcquireQuantumMemory(length+MagickPathExtent,
sizeof(*token));
if (token == (char *) NULL)
break;
next=0;
arguments=arg1;
token_info=AcquireTokenInfo();
status=Tokenizer(token_info,0,token,length,arguments,"","=",
"\"",'\0',&breaker,&next,"e);
token_info=DestroyTokenInfo(token_info);
if (status == 0)
{
const char
*argv;
argv=(&(arguments[next]));
(void) InvokeDynamicImageFilter(token,&_images,1,&argv,
_exception);
}
token=DestroyString(token);
break;
}
(void) SubstituteString(&arguments[1],"-","");
(void) InvokeDynamicImageFilter(arguments[1],&_images,
number_arguments-2,(const char **) arguments+2,_exception);
for (j=0; j < number_arguments; j++)
arguments[j]=DestroyString(arguments[j]);
arguments=(char **) RelinquishMagickMemory(arguments);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 'r':
{
if (LocaleCompare("remap",option+1) == 0)
{
(void) RemapImages(_quantize_info,_images,(Image *) NULL,_exception);
break;
}
if (LocaleCompare("reverse",option+1) == 0)
{
ReverseImageList(&_images);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
case 's':
{
if (LocaleCompare("smush",option+1) == 0)
{
/* FUTURE: this option needs more work to make better */
ssize_t
offset;
if (IsGeometry(arg1) == MagickFalse)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
offset=(ssize_t) StringToLong(arg1);
new_images=SmushImages(_images,IsNormalOp,offset,_exception);
break;
}
if (LocaleCompare("subimage",option+1) == 0)
{
Image
*base_image,
*compare_image;
const char
*value;
MetricType
metric;
double
similarity;
RectangleInfo
offset;
base_image=GetImageFromList(_images,0);
compare_image=GetImageFromList(_images,1);
/* Comparision Metric */
metric=UndefinedErrorMetric;
value=GetImageOption(_image_info,"metric");
if (value != (const char *) NULL)
metric=(MetricType) ParseCommandOption(MagickMetricOptions,
MagickFalse,value);
new_images=SimilarityImage(base_image,compare_image,metric,0.0,
&offset,&similarity,_exception);
if (new_images != (Image *) NULL)
{
char
result[MagickPathExtent];
(void) FormatLocaleString(result,MagickPathExtent,"%lf",
similarity);
(void) SetImageProperty(new_images,"subimage:similarity",result,
_exception);
(void) FormatLocaleString(result,MagickPathExtent,"%+ld",(long)
offset.x);
(void) SetImageProperty(new_images,"subimage:x",result,
_exception);
(void) FormatLocaleString(result,MagickPathExtent,"%+ld",(long)
offset.y);
(void) SetImageProperty(new_images,"subimage:y",result,
_exception);
(void) FormatLocaleString(result,MagickPathExtent,
"%lux%lu%+ld%+ld",(unsigned long) offset.width,(unsigned long)
offset.height,(long) offset.x,(long) offset.y);
(void) SetImageProperty(new_images,"subimage:offset",result,
_exception);
}
break;
}
if (LocaleCompare("swap",option+1) == 0)
{
Image
*p,
*q,
*swap;
ssize_t
index,
swap_index;
index=(-1);
swap_index=(-2);
if (IfNormalOp) {
GeometryInfo
geometry_info;
MagickStatusType
flags;
swap_index=(-1);
flags=ParseGeometry(arg1,&geometry_info);
if ((flags & RhoValue) == 0)
CLIWandExceptArgBreak(OptionError,"InvalidArgument",option,arg1);
index=(ssize_t) geometry_info.rho;
if ((flags & SigmaValue) != 0)
swap_index=(ssize_t) geometry_info.sigma;
}
p=GetImageFromList(_images,index);
q=GetImageFromList(_images,swap_index);
if ((p == (Image *) NULL) || (q == (Image *) NULL)) {
if (IfNormalOp)
CLIWandExceptArgBreak(OptionError,"InvalidImageIndex",option,arg1)
else
CLIWandExceptionBreak(OptionError,"TwoOrMoreImagesRequired",option);
}
if (p == q)
CLIWandExceptArgBreak(OptionError,"InvalidImageIndex",option,arg1);
swap=CloneImage(p,0,0,MagickTrue,_exception);
if (swap == (Image *) NULL)
CLIWandExceptArgBreak(ResourceLimitError,"MemoryAllocationFailed",
option,GetExceptionMessage(errno));
ReplaceImageInList(&p,CloneImage(q,0,0,MagickTrue,_exception));
ReplaceImageInList(&q,swap);
_images=GetFirstImageInList(q);
break;
}
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
default:
CLIWandExceptionBreak(OptionError,"UnrecognizedOption",option);
}
/* clean up percent escape interpreted strings */
if (arg1 != arg1n )
arg1=DestroyString((char *)arg1);
if (arg2 != arg2n )
arg2=DestroyString((char *)arg2);
/* if new image list generated, replace existing image list */
if (new_images == (Image *) NULL)
return(status == 0 ? MagickFalse : MagickTrue);
_images=DestroyImageList(_images);
_images=GetFirstImageInList(new_images);
return(status == 0 ? MagickFalse : MagickTrue);
#undef _image_info
#undef _images
#undef _exception
#undef _draw_info
#undef _quantize_info
#undef IfNormalOp
#undef IfPlusOp
#undef IsNormalOp
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C L I N o I m a g e O p e r a t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLINoImageOperator() Applies operations that may not actually need images
% in an image list.
%
% The classic operators of this type is "-read", which actually creates
% images even when no images are present. Or image stack operators, which
% can be applied (push or pop) to an empty image list.
%
% Note that these operators may involve other special 'option' prefix
% characters other than '-' or '+', namely parenthesis and braces.
%
% The format of the CLINoImageOption method is:
%
% void CLINoImageOption(MagickCLI *cli_wand,const char *option,
% const char *arg1, const char *arg2)
%
% A description of each parameter follows:
%
% o cli_wand: the main CLI Wand to use. (sometimes not required)
%
% o option: The special option (with any switch char) to process
%
% o arg1 & arg2: Argument for option, if required
% Currently arg2 is not used.
%
*/
WandPrivate void CLINoImageOperator(MagickCLI *cli_wand,
const char *option,const char *arg1n,const char *arg2n)
{
const char /* percent escaped versions of the args */
*arg1,
*arg2;
#define _image_info (cli_wand->wand.image_info)
#define _images (cli_wand->wand.images)
#define _exception (cli_wand->wand.exception)
#define _process_flags (cli_wand->process_flags)
#define _option_type ((CommandOptionFlags) cli_wand->command->flags)
#define IfNormalOp (*option=='-')
#define IfPlusOp (*option!='-')
assert(cli_wand != (MagickCLI *) NULL);
assert(cli_wand->signature == MagickWandSignature);
assert(cli_wand->wand.signature == MagickWandSignature);
if (cli_wand->wand.debug != MagickFalse)
(void) CLILogEvent(cli_wand,CommandEvent,GetMagickModule(),
"- NoImage Operator: %s \"%s\" \"%s\"", option,
arg1n != (char *) NULL ? arg1n : "",
arg2n != (char *) NULL ? arg2n : "");
arg1 = arg1n;
arg2 = arg2n;
/* Interpret Percent Escapes in Arguments - using first image */
if ( (((_process_flags & ProcessInterpretProperities) != 0 )
|| ((_option_type & AlwaysInterpretArgsFlag) != 0)
) && ((_option_type & NeverInterpretArgsFlag) == 0) ) {
/* Interpret Percent escapes in argument 1 */
if (arg1n != (char *) NULL) {
arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception);
if (arg1 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg1=arg1n; /* use the given argument as is */
}
}
if (arg2n != (char *) NULL) {
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL) {
CLIWandException(OptionWarning,"InterpretPropertyFailure",option);
arg2=arg2n; /* use the given argument as is */
}
}
}
#undef _process_flags
#undef _option_type
do { /* break to exit code */
/*
No-op options (ignore these)
*/
if (LocaleCompare("noop",option+1) == 0) /* zero argument */
break;
if (LocaleCompare("sans",option+1) == 0) /* one argument */
break;
if (LocaleCompare("sans0",option+1) == 0) /* zero argument */
break;
if (LocaleCompare("sans1",option+1) == 0) /* one argument */
break;
if (LocaleCompare("sans2",option+1) == 0) /* two arguments */
break;
/*
Image Reading
*/
if ( ( LocaleCompare("read",option+1) == 0 ) ||
( LocaleCompare("--",option) == 0 ) ) {
/* Do Glob filename Expansion for 'arg1' then read all images.
*
* Expansion handles '@', '~', '*', and '?' meta-characters while ignoring
* (but attaching to the filenames in the generated argument list) any
* [...] read modifiers that may be present.
*
* For example: It will expand '*.gif[20x20]' into a list such as
* 'abc.gif[20x20]', 'foobar.gif[20x20]', 'xyzzy.gif[20x20]'
*
* NOTE: In IMv6 this was done globally across all images. This
* meant you could include IM options in '@filename' lists, but you
* could not include comments. Doing it only for image read makes
* it far more secure.
*
* Note: arguments do not have percent escapes expanded for security
* reasons.
*/
int argc;
char **argv;
ssize_t i;
argc = 1;
argv = (char **) &arg1;
/* Expand 'glob' expressions in the given filename.
Expansion handles any 'coder:' prefix, or read modifiers attached
to the filename, including them in the resulting expanded list.
*/
if (ExpandFilenames(&argc,&argv) == MagickFalse)
CLIWandExceptArgBreak(ResourceLimitError,"MemoryAllocationFailed",
option,GetExceptionMessage(errno));
/* loop over expanded filename list, and read then all in */
for (i=0; i < (ssize_t) argc; i++) {
Image *
new_images;
if (_image_info->ping != MagickFalse)
new_images=PingImages(_image_info,argv[i],_exception);
else
new_images=ReadImages(_image_info,argv[i],_exception);
AppendImageToList(&_images, new_images);
argv[i]=DestroyString(argv[i]);
}
argv=(char **) RelinquishMagickMemory(argv);
break;
}
/*
Image Writing
Note: Writing a empty image list is valid in specific cases
*/
if (LocaleCompare("write",option+1) == 0) {
/* Note: arguments do not have percent escapes expanded */
char
key[MagickPathExtent];
Image
*write_images;
ImageInfo
*write_info;
/* Need images, unless a "null:" output coder is used */
if ( _images == (Image *) NULL ) {
if ( LocaleCompare(arg1,"null:") == 0 )
break;
CLIWandExceptArgBreak(OptionError,"NoImagesForWrite",option,arg1);
}
(void) FormatLocaleString(key,MagickPathExtent,"cache:%s",arg1);
(void) DeleteImageRegistry(key);
write_images=_images;
if (IfPlusOp)
write_images=CloneImageList(_images,_exception);
write_info=CloneImageInfo(_image_info);
(void) WriteImages(write_info,write_images,arg1,_exception);
write_info=DestroyImageInfo(write_info);
if (IfPlusOp)
write_images=DestroyImageList(write_images);
break;
}
/*
Parenthesis and Brace operations
*/
if (LocaleCompare("(",option) == 0) {
/* stack 'push' images */
Stack
*node;
size_t
size;
size=0;
node=cli_wand->image_list_stack;
for ( ; node != (Stack *) NULL; node=node->next)
size++;
if ( size >= MAX_STACK_DEPTH )
CLIWandExceptionBreak(OptionError,"ParenthesisNestedTooDeeply",option);
node=(Stack *) AcquireMagickMemory(sizeof(*node));
if (node == (Stack *) NULL)
CLIWandExceptionBreak(ResourceLimitFatalError,
"MemoryAllocationFailed",option);
node->data = (void *)cli_wand->wand.images;
node->next = cli_wand->image_list_stack;
cli_wand->image_list_stack = node;
cli_wand->wand.images = NewImageList();
/* handle respect-parenthesis */
if (IsStringTrue(GetImageOption(cli_wand->wand.image_info,
"respect-parenthesis")) != MagickFalse)
option="{"; /* fall-thru so as to push image settings too */
else
break;
/* fall thru to operation */
}
if (LocaleCompare("{",option) == 0) {
/* stack 'push' of image_info settings */
Stack
*node;
size_t
size;
size=0;
node=cli_wand->image_info_stack;
for ( ; node != (Stack *) NULL; node=node->next)
size++;
if ( size >= MAX_STACK_DEPTH )
CLIWandExceptionBreak(OptionError,"CurlyBracesNestedTooDeeply",option);
node=(Stack *) AcquireMagickMemory(sizeof(*node));
if (node == (Stack *) NULL)
CLIWandExceptionBreak(ResourceLimitFatalError,
"MemoryAllocationFailed",option);
node->data = (void *)cli_wand->wand.image_info;
node->next = cli_wand->image_info_stack;
cli_wand->image_info_stack = node;
cli_wand->wand.image_info = CloneImageInfo(cli_wand->wand.image_info);
if (cli_wand->wand.image_info == (ImageInfo *) NULL) {
CLIWandException(ResourceLimitFatalError,"MemoryAllocationFailed",
option);
cli_wand->wand.image_info = (ImageInfo *)node->data;
node = (Stack *)RelinquishMagickMemory(node);
break;
}
break;
}
if (LocaleCompare(")",option) == 0) {
/* pop images from stack */
Stack
*node;
node = (Stack *)cli_wand->image_list_stack;
if ( node == (Stack *) NULL)
CLIWandExceptionBreak(OptionError,"UnbalancedParenthesis",option);
cli_wand->image_list_stack = node->next;
AppendImageToList((Image **)&node->data,cli_wand->wand.images);
cli_wand->wand.images= (Image *)node->data;
node = (Stack *)RelinquishMagickMemory(node);
/* handle respect-parenthesis - of the previous 'pushed' settings */
node = cli_wand->image_info_stack;
if ( node != (Stack *) NULL)
{
if (IsStringTrue(GetImageOption(
cli_wand->wand.image_info,"respect-parenthesis")) != MagickFalse)
option="}"; /* fall-thru so as to pop image settings too */
else
break;
}
else
break;
/* fall thru to next if */
}
if (LocaleCompare("}",option) == 0) {
/* pop image_info settings from stack */
Stack
*node;
node = (Stack *)cli_wand->image_info_stack;
if ( node == (Stack *) NULL)
CLIWandExceptionBreak(OptionError,"UnbalancedCurlyBraces",option);
cli_wand->image_info_stack = node->next;
(void) DestroyImageInfo(cli_wand->wand.image_info);
cli_wand->wand.image_info = (ImageInfo *)node->data;
node = (Stack *)RelinquishMagickMemory(node);
GetDrawInfo(cli_wand->wand.image_info, cli_wand->draw_info);
cli_wand->quantize_info=DestroyQuantizeInfo(cli_wand->quantize_info);
cli_wand->quantize_info=AcquireQuantizeInfo(cli_wand->wand.image_info);
break;
}
if (LocaleCompare("print",option+1) == 0)
{
(void) FormatLocaleFile(stdout,"%s",arg1);
break;
}
if (LocaleCompare("set",option+1) == 0)
{
/* Settings are applied to each image in memory in turn (if any).
While a option: only need to be applied once globally.
NOTE: rguments have not been automatically percent expaneded
*/
/* escape the 'key' once only, using first image. */
arg1=InterpretImageProperties(_image_info,_images,arg1n,_exception);
if (arg1 == (char *) NULL)
CLIWandExceptionBreak(OptionWarning,"InterpretPropertyFailure",
option);
if (LocaleNCompare(arg1,"registry:",9) == 0)
{
if (IfPlusOp)
{
(void) DeleteImageRegistry(arg1+9);
arg1=DestroyString((char *)arg1);
break;
}
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL) {
arg1=DestroyString((char *)arg1);
CLIWandExceptionBreak(OptionWarning,"InterpretPropertyFailure",
option);
}
(void) SetImageRegistry(StringRegistryType,arg1+9,arg2,_exception);
arg1=DestroyString((char *)arg1);
arg2=DestroyString((char *)arg2);
break;
}
if (LocaleNCompare(arg1,"option:",7) == 0)
{
/* delete equivelent artifact from all images (if any) */
if (_images != (Image *) NULL)
{
MagickResetIterator(&cli_wand->wand);
while (MagickNextImage(&cli_wand->wand) != MagickFalse)
(void) DeleteImageArtifact(_images,arg1+7);
MagickResetIterator(&cli_wand->wand);
}
/* now set/delete the global option as needed */
/* FUTURE: make escapes in a global 'option:' delayed */
arg2=(char *) NULL;
if (IfNormalOp)
{
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL)
CLIWandExceptionBreak(OptionWarning,
"InterpretPropertyFailure",option);
}
(void) SetImageOption(_image_info,arg1+7,arg2);
arg1=DestroyString((char *)arg1);
arg2=DestroyString((char *)arg2);
break;
}
/* Set Artifacts/Properties/Attributes all images (required) */
if ( _images == (Image *) NULL )
CLIWandExceptArgBreak(OptionWarning,"NoImageForProperty",option,arg1);
MagickResetIterator(&cli_wand->wand);
while (MagickNextImage(&cli_wand->wand) != MagickFalse)
{
arg2=(char *) NULL;
if (IfNormalOp)
{
arg2=InterpretImageProperties(_image_info,_images,arg2n,_exception);
if (arg2 == (char *) NULL)
CLIWandExceptionBreak(OptionWarning,
"InterpretPropertyFailure",option);
}
if (LocaleNCompare(arg1,"artifact:",9) == 0)
(void) SetImageArtifact(_images,arg1+9,arg2);
else if (LocaleNCompare(arg1,"property:",9) == 0)
(void) SetImageProperty(_images,arg1+9,arg2,_exception);
else
(void) SetImageProperty(_images,arg1,arg2,_exception);
arg2=DestroyString((char *)arg2);
}
MagickResetIterator(&cli_wand->wand);
arg1=DestroyString((char *)arg1);
break;
}
if (LocaleCompare("clone",option+1) == 0) {
Image
*new_images;
if (*option == '+')
arg1=AcquireString("-1");
if (IsSceneGeometry(arg1,MagickFalse) == MagickFalse)
CLIWandExceptionBreak(OptionError,"InvalidArgument",option);
if ( cli_wand->image_list_stack == (Stack *) NULL)
CLIWandExceptionBreak(OptionError,"UnableToCloneImage",option);
new_images = (Image *)cli_wand->image_list_stack->data;
if (new_images == (Image *) NULL)
CLIWandExceptionBreak(OptionError,"UnableToCloneImage",option);
new_images=CloneImages(new_images,arg1,_exception);
if (new_images == (Image *) NULL)
CLIWandExceptionBreak(OptionError,"NoSuchImage",option);
AppendImageToList(&_images,new_images);
break;
}
/*
Informational Operations.
Note that these do not require either a cli-wand or images!
Though currently a cli-wand much be provided regardless.
*/
if (LocaleCompare("version",option+1) == 0)
{
ListMagickVersion(stdout);
break;
}
if (LocaleCompare("list",option+1) == 0) {
/*
FUTURE: This 'switch' should really be part of MagickCore
*/
ssize_t
list;
list=ParseCommandOption(MagickListOptions,MagickFalse,arg1);
if ( list < 0 ) {
CLIWandExceptionArg(OptionError,"UnrecognizedListType",option,arg1);
break;
}
switch (list)
{
case MagickCoderOptions:
{
(void) ListCoderInfo((FILE *) NULL,_exception);
break;
}
case MagickColorOptions:
{
(void) ListColorInfo((FILE *) NULL,_exception);
break;
}
case MagickConfigureOptions:
{
(void) ListConfigureInfo((FILE *) NULL,_exception);
break;
}
case MagickDelegateOptions:
{
(void) ListDelegateInfo((FILE *) NULL,_exception);
break;
}
case MagickFontOptions:
{
(void) ListTypeInfo((FILE *) NULL,_exception);
break;
}
case MagickFormatOptions:
(void) ListMagickInfo((FILE *) NULL,_exception);
break;
case MagickLocaleOptions:
(void) ListLocaleInfo((FILE *) NULL,_exception);
break;
case MagickLogOptions:
(void) ListLogInfo((FILE *) NULL,_exception);
break;
case MagickMagicOptions:
(void) ListMagicInfo((FILE *) NULL,_exception);
break;
case MagickMimeOptions:
(void) ListMimeInfo((FILE *) NULL,_exception);
break;
case MagickModuleOptions:
(void) ListModuleInfo((FILE *) NULL,_exception);
break;
case MagickPolicyOptions:
(void) ListPolicyInfo((FILE *) NULL,_exception);
break;
case MagickResourceOptions:
(void) ListMagickResourceInfo((FILE *) NULL,_exception);
break;
case MagickThresholdOptions:
(void) ListThresholdMaps((FILE *) NULL,_exception);
break;
default:
(void) ListCommandOptions((FILE *) NULL,(CommandOption) list,
_exception);
break;
}
break;
}
CLIWandException(OptionError,"UnrecognizedOption",option);
DisableMSCWarning(4127)
} while (0); /* break to exit code. */
RestoreMSCWarning
/* clean up percent escape interpreted strings */
if (arg1 != arg1n )
arg1=DestroyString((char *)arg1);
if (arg2 != arg2n )
arg2=DestroyString((char *)arg2);
#undef _image_info
#undef _images
#undef _exception
#undef IfNormalOp
#undef IfPlusOp
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C L I O p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CLIOption() Processes the given option using the given CLI Magick Wand.
% The option arguments can be variable in number, though at this time no more
% that two is actually used by any option (this may change). Excess options
% are simply ignored.
%
% If the cli_wand->command pointer is non-null, then it is assumed that the
% option has already been search for up from the CommandOptions[] table in
% "MagickCore/options.c" using GetCommandOptionInfo(). If not set this
% routine will do the lookup instead. The pointer is reset afterward.
%
% This action allows the caller to lookup and pre-handle any 'special'
% options, (such as implicit reads) before calling this general option
% handler to deal with 'standard' command line options.
%
% The format of the CLIOption method is:
%
% void CLIOption(MagickCLI *cli_wand,const char *option, ...)
%
% A description of each parameter follows:
%
% o cli_wand: the main CLI Wand to use.
%
% o option: The special option (with any switch char) to process
%
% o args: any required arguments for an option (variable number)
%
% Example Usage...
%
% CLIoption(cli_wand,"-read","rose:");
% CLIoption(cli_wand,"-virtual-pixel","transparent");
% CLIoption(cli_wand,"-distort","SRT:","30");
% CLIoption(cli_wand,"-write","rotated_rose.png");
%
*/
WandExport void CLIOption(MagickCLI *cli_wand,const char *option,...)
{
const char /* extracted option args from args */
*arg1,
*arg2;
CommandOptionFlags
option_type;
assert(cli_wand != (MagickCLI *) NULL);
assert(cli_wand->signature == MagickWandSignature);
assert(cli_wand->wand.signature == MagickWandSignature);
do { /* Break Code Block for error handling */
/* get information about option */
if ( cli_wand->command == (const OptionInfo *) NULL )
cli_wand->command = GetCommandOptionInfo(option);
#if 0
(void) FormatLocaleFile(stderr, "CLIOption \"%s\" matched \"%s\"\n",
option, cli_wand->command->mnemonic );
#endif
option_type=(CommandOptionFlags) cli_wand->command->flags;
if ( option_type == UndefinedOptionFlag )
CLIWandExceptionReturn(OptionFatalError,"UnrecognizedOption",option);
assert( LocaleCompare(cli_wand->command->mnemonic,option) == 0 );
/* deprecated options */
if ( (option_type & DeprecateOptionFlag) != 0 )
CLIWandExceptionBreak(OptionError,"DeprecatedOptionNoCode",option);
/* options that this module does not handle */
if ((option_type & (SpecialOptionFlag|GenesisOptionFlag)) != 0 )
CLIWandExceptionBreak(OptionFatalError,"InvalidUseOfOption",option);
/* Get argument strings from VarArgs
How can you determine if enough arguments was supplied?
What happens if not enough arguments were supplied?
*/
{ size_t
count = (size_t) cli_wand->command->type;
va_list
operands;
va_start(operands,option);
arg1=arg2=NULL;
if ( count >= 1 )
arg1=(const char *) va_arg(operands, const char *);
if ( count >= 2 )
arg2=(const char *) va_arg(operands, const char *);
va_end(operands);
#if 0
(void) FormatLocaleFile(stderr,
"CLIOption: \"%s\" Count: %ld Flags: %04x Args: \"%s\" \"%s\"\n",
option,(long) count,option_type,arg1,arg2);
#endif
}
/*
Call the appropriate option handler
*/
/* FUTURE: this is temporary - get 'settings' to handle distribution of
settings to images attributes,proprieties,artifacts */
if ( cli_wand->wand.images != (Image *) NULL )
(void) SyncImagesSettings(cli_wand->wand.image_info,cli_wand->wand.images,
cli_wand->wand.exception);
if ( (option_type & SettingOptionFlags) != 0 ) {
CLISettingOptionInfo(cli_wand, option, arg1, arg2);
/*
FUTURE: Sync Specific Settings into Image Properities (not global)
*/
}
/* Operators that do not need images - read, write, stack, clone */
if ((option_type & NoImageOperatorFlag) != 0)
CLINoImageOperator(cli_wand, option, arg1, arg2);
/* FUTURE: The not a setting part below is a temporary hack due to
* some options being both a Setting and a Simple operator.
* Specifically -monitor, -depth, and -colorspace */
if ( cli_wand->wand.images == (Image *) NULL )
if ( ((option_type & (SimpleOperatorFlag|ListOperatorFlag)) != 0 ) &&
((option_type & SettingOptionFlags) == 0 )) /* temp hack */
CLIWandExceptionBreak(OptionError,"NoImagesFound",option);
/* Operators which loop of individual images, simply */
if ( (option_type & SimpleOperatorFlag) != 0 &&
cli_wand->wand.images != (Image *) NULL) /* temp hack */
{
ExceptionInfo *exception=AcquireExceptionInfo();
(void) CLISimpleOperatorImages(cli_wand, option, arg1, arg2,exception);
exception=DestroyExceptionInfo(exception);
}
/* Operators that work on the image list as a whole */
if ( (option_type & ListOperatorFlag) != 0 )
(void) CLIListOperatorImages(cli_wand, option, arg1, arg2);
DisableMSCWarning(4127)
} while (0); /* end Break code block */
RestoreMSCWarning
cli_wand->command = (const OptionInfo *) NULL; /* prevent re-use later */
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_946_1 |
crossvul-cpp_data_bad_3656_0 | /*
* Generic hugetlb support.
* (C) William Irwin, April 2004
*/
#include <linux/list.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/seq_file.h>
#include <linux/sysctl.h>
#include <linux/highmem.h>
#include <linux/mmu_notifier.h>
#include <linux/nodemask.h>
#include <linux/pagemap.h>
#include <linux/mempolicy.h>
#include <linux/cpuset.h>
#include <linux/mutex.h>
#include <linux/bootmem.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <linux/io.h>
#include <linux/hugetlb.h>
#include <linux/node.h>
#include "internal.h"
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
static gfp_t htlb_alloc_mask = GFP_HIGHUSER;
unsigned long hugepages_treat_as_movable;
static int max_hstate;
unsigned int default_hstate_idx;
struct hstate hstates[HUGE_MAX_HSTATE];
__initdata LIST_HEAD(huge_boot_pages);
/* for command line parsing */
static struct hstate * __initdata parsed_hstate;
static unsigned long __initdata default_hstate_max_huge_pages;
static unsigned long __initdata default_hstate_size;
#define for_each_hstate(h) \
for ((h) = hstates; (h) < &hstates[max_hstate]; (h)++)
/*
* Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
*/
static DEFINE_SPINLOCK(hugetlb_lock);
static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
{
bool free = (spool->count == 0) && (spool->used_hpages == 0);
spin_unlock(&spool->lock);
/* If no pages are used, and no other handles to the subpool
* remain, free the subpool the subpool remain */
if (free)
kfree(spool);
}
struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
{
struct hugepage_subpool *spool;
spool = kmalloc(sizeof(*spool), GFP_KERNEL);
if (!spool)
return NULL;
spin_lock_init(&spool->lock);
spool->count = 1;
spool->max_hpages = nr_blocks;
spool->used_hpages = 0;
return spool;
}
void hugepage_put_subpool(struct hugepage_subpool *spool)
{
spin_lock(&spool->lock);
BUG_ON(!spool->count);
spool->count--;
unlock_or_release_subpool(spool);
}
static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
long delta)
{
int ret = 0;
if (!spool)
return 0;
spin_lock(&spool->lock);
if ((spool->used_hpages + delta) <= spool->max_hpages) {
spool->used_hpages += delta;
} else {
ret = -ENOMEM;
}
spin_unlock(&spool->lock);
return ret;
}
static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
long delta)
{
if (!spool)
return;
spin_lock(&spool->lock);
spool->used_hpages -= delta;
/* If hugetlbfs_put_super couldn't free spool due to
* an outstanding quota reference, free it now. */
unlock_or_release_subpool(spool);
}
static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
{
return HUGETLBFS_SB(inode->i_sb)->spool;
}
static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
{
return subpool_inode(vma->vm_file->f_dentry->d_inode);
}
/*
* Region tracking -- allows tracking of reservations and instantiated pages
* across the pages in a mapping.
*
* The region data structures are protected by a combination of the mmap_sem
* and the hugetlb_instantion_mutex. To access or modify a region the caller
* must either hold the mmap_sem for write, or the mmap_sem for read and
* the hugetlb_instantiation mutex:
*
* down_write(&mm->mmap_sem);
* or
* down_read(&mm->mmap_sem);
* mutex_lock(&hugetlb_instantiation_mutex);
*/
struct file_region {
struct list_head link;
long from;
long to;
};
static long region_add(struct list_head *head, long f, long t)
{
struct file_region *rg, *nrg, *trg;
/* Locate the region we are either in or before. */
list_for_each_entry(rg, head, link)
if (f <= rg->to)
break;
/* Round our left edge to the current segment if it encloses us. */
if (f > rg->from)
f = rg->from;
/* Check for and consume any regions we now overlap with. */
nrg = rg;
list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
if (&rg->link == head)
break;
if (rg->from > t)
break;
/* If this area reaches higher then extend our area to
* include it completely. If this is not the first area
* which we intend to reuse, free it. */
if (rg->to > t)
t = rg->to;
if (rg != nrg) {
list_del(&rg->link);
kfree(rg);
}
}
nrg->from = f;
nrg->to = t;
return 0;
}
static long region_chg(struct list_head *head, long f, long t)
{
struct file_region *rg, *nrg;
long chg = 0;
/* Locate the region we are before or in. */
list_for_each_entry(rg, head, link)
if (f <= rg->to)
break;
/* If we are below the current region then a new region is required.
* Subtle, allocate a new region at the position but make it zero
* size such that we can guarantee to record the reservation. */
if (&rg->link == head || t < rg->from) {
nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
if (!nrg)
return -ENOMEM;
nrg->from = f;
nrg->to = f;
INIT_LIST_HEAD(&nrg->link);
list_add(&nrg->link, rg->link.prev);
return t - f;
}
/* Round our left edge to the current segment if it encloses us. */
if (f > rg->from)
f = rg->from;
chg = t - f;
/* Check for and consume any regions we now overlap with. */
list_for_each_entry(rg, rg->link.prev, link) {
if (&rg->link == head)
break;
if (rg->from > t)
return chg;
/* We overlap with this area, if it extends further than
* us then we must extend ourselves. Account for its
* existing reservation. */
if (rg->to > t) {
chg += rg->to - t;
t = rg->to;
}
chg -= rg->to - rg->from;
}
return chg;
}
static long region_truncate(struct list_head *head, long end)
{
struct file_region *rg, *trg;
long chg = 0;
/* Locate the region we are either in or before. */
list_for_each_entry(rg, head, link)
if (end <= rg->to)
break;
if (&rg->link == head)
return 0;
/* If we are in the middle of a region then adjust it. */
if (end > rg->from) {
chg = rg->to - end;
rg->to = end;
rg = list_entry(rg->link.next, typeof(*rg), link);
}
/* Drop any remaining regions. */
list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
if (&rg->link == head)
break;
chg += rg->to - rg->from;
list_del(&rg->link);
kfree(rg);
}
return chg;
}
static long region_count(struct list_head *head, long f, long t)
{
struct file_region *rg;
long chg = 0;
/* Locate each segment we overlap with, and count that overlap. */
list_for_each_entry(rg, head, link) {
long seg_from;
long seg_to;
if (rg->to <= f)
continue;
if (rg->from >= t)
break;
seg_from = max(rg->from, f);
seg_to = min(rg->to, t);
chg += seg_to - seg_from;
}
return chg;
}
/*
* Convert the address within this vma to the page offset within
* the mapping, in pagecache page units; huge pages here.
*/
static pgoff_t vma_hugecache_offset(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
{
return ((address - vma->vm_start) >> huge_page_shift(h)) +
(vma->vm_pgoff >> huge_page_order(h));
}
pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
unsigned long address)
{
return vma_hugecache_offset(hstate_vma(vma), vma, address);
}
/*
* Return the size of the pages allocated when backing a VMA. In the majority
* cases this will be same size as used by the page table entries.
*/
unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
{
struct hstate *hstate;
if (!is_vm_hugetlb_page(vma))
return PAGE_SIZE;
hstate = hstate_vma(vma);
return 1UL << (hstate->order + PAGE_SHIFT);
}
EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
/*
* Return the page size being used by the MMU to back a VMA. In the majority
* of cases, the page size used by the kernel matches the MMU size. On
* architectures where it differs, an architecture-specific version of this
* function is required.
*/
#ifndef vma_mmu_pagesize
unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
{
return vma_kernel_pagesize(vma);
}
#endif
/*
* Flags for MAP_PRIVATE reservations. These are stored in the bottom
* bits of the reservation map pointer, which are always clear due to
* alignment.
*/
#define HPAGE_RESV_OWNER (1UL << 0)
#define HPAGE_RESV_UNMAPPED (1UL << 1)
#define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
/*
* These helpers are used to track how many pages are reserved for
* faults in a MAP_PRIVATE mapping. Only the process that called mmap()
* is guaranteed to have their future faults succeed.
*
* With the exception of reset_vma_resv_huge_pages() which is called at fork(),
* the reserve counters are updated with the hugetlb_lock held. It is safe
* to reset the VMA at fork() time as it is not in use yet and there is no
* chance of the global counters getting corrupted as a result of the values.
*
* The private mapping reservation is represented in a subtly different
* manner to a shared mapping. A shared mapping has a region map associated
* with the underlying file, this region map represents the backing file
* pages which have ever had a reservation assigned which this persists even
* after the page is instantiated. A private mapping has a region map
* associated with the original mmap which is attached to all VMAs which
* reference it, this region map represents those offsets which have consumed
* reservation ie. where pages have been instantiated.
*/
static unsigned long get_vma_private_data(struct vm_area_struct *vma)
{
return (unsigned long)vma->vm_private_data;
}
static void set_vma_private_data(struct vm_area_struct *vma,
unsigned long value)
{
vma->vm_private_data = (void *)value;
}
struct resv_map {
struct kref refs;
struct list_head regions;
};
static struct resv_map *resv_map_alloc(void)
{
struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
if (!resv_map)
return NULL;
kref_init(&resv_map->refs);
INIT_LIST_HEAD(&resv_map->regions);
return resv_map;
}
static void resv_map_release(struct kref *ref)
{
struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
/* Clear out any active regions before we release the map. */
region_truncate(&resv_map->regions, 0);
kfree(resv_map);
}
static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
if (!(vma->vm_flags & VM_MAYSHARE))
return (struct resv_map *)(get_vma_private_data(vma) &
~HPAGE_RESV_MASK);
return NULL;
}
static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
set_vma_private_data(vma, (get_vma_private_data(vma) &
HPAGE_RESV_MASK) | (unsigned long)map);
}
static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
set_vma_private_data(vma, get_vma_private_data(vma) | flags);
}
static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
return (get_vma_private_data(vma) & flag) != 0;
}
/* Decrement the reserved pages in the hugepage pool by one */
static void decrement_hugepage_resv_vma(struct hstate *h,
struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_NORESERVE)
return;
if (vma->vm_flags & VM_MAYSHARE) {
/* Shared mappings always use reserves */
h->resv_huge_pages--;
} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
/*
* Only the process that called mmap() has reserves for
* private mappings.
*/
h->resv_huge_pages--;
}
}
/* Reset counters to 0 and clear all HPAGE_RESV_* flags */
void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
{
VM_BUG_ON(!is_vm_hugetlb_page(vma));
if (!(vma->vm_flags & VM_MAYSHARE))
vma->vm_private_data = (void *)0;
}
/* Returns true if the VMA has associated reserve pages */
static int vma_has_reserves(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_MAYSHARE)
return 1;
if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
return 1;
return 0;
}
static void copy_gigantic_page(struct page *dst, struct page *src)
{
int i;
struct hstate *h = page_hstate(src);
struct page *dst_base = dst;
struct page *src_base = src;
for (i = 0; i < pages_per_huge_page(h); ) {
cond_resched();
copy_highpage(dst, src);
i++;
dst = mem_map_next(dst, dst_base, i);
src = mem_map_next(src, src_base, i);
}
}
void copy_huge_page(struct page *dst, struct page *src)
{
int i;
struct hstate *h = page_hstate(src);
if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
copy_gigantic_page(dst, src);
return;
}
might_sleep();
for (i = 0; i < pages_per_huge_page(h); i++) {
cond_resched();
copy_highpage(dst + i, src + i);
}
}
static void enqueue_huge_page(struct hstate *h, struct page *page)
{
int nid = page_to_nid(page);
list_add(&page->lru, &h->hugepage_freelists[nid]);
h->free_huge_pages++;
h->free_huge_pages_node[nid]++;
}
static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
{
struct page *page;
if (list_empty(&h->hugepage_freelists[nid]))
return NULL;
page = list_entry(h->hugepage_freelists[nid].next, struct page, lru);
list_del(&page->lru);
set_page_refcounted(page);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
return page;
}
static struct page *dequeue_huge_page_vma(struct hstate *h,
struct vm_area_struct *vma,
unsigned long address, int avoid_reserve)
{
struct page *page = NULL;
struct mempolicy *mpol;
nodemask_t *nodemask;
struct zonelist *zonelist;
struct zone *zone;
struct zoneref *z;
unsigned int cpuset_mems_cookie;
retry_cpuset:
cpuset_mems_cookie = get_mems_allowed();
zonelist = huge_zonelist(vma, address,
htlb_alloc_mask, &mpol, &nodemask);
/*
* A child process with MAP_PRIVATE mappings created by their parent
* have no page reserves. This check ensures that reservations are
* not "stolen". The child may still get SIGKILLed
*/
if (!vma_has_reserves(vma) &&
h->free_huge_pages - h->resv_huge_pages == 0)
goto err;
/* If reserves cannot be used, ensure enough pages are in the pool */
if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
goto err;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
MAX_NR_ZONES - 1, nodemask) {
if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask)) {
page = dequeue_huge_page_node(h, zone_to_nid(zone));
if (page) {
if (!avoid_reserve)
decrement_hugepage_resv_vma(h, vma);
break;
}
}
}
mpol_cond_put(mpol);
if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
goto retry_cpuset;
return page;
err:
mpol_cond_put(mpol);
return NULL;
}
static void update_and_free_page(struct hstate *h, struct page *page)
{
int i;
VM_BUG_ON(h->order >= MAX_ORDER);
h->nr_huge_pages--;
h->nr_huge_pages_node[page_to_nid(page)]--;
for (i = 0; i < pages_per_huge_page(h); i++) {
page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
1 << PG_referenced | 1 << PG_dirty |
1 << PG_active | 1 << PG_reserved |
1 << PG_private | 1 << PG_writeback);
}
set_compound_page_dtor(page, NULL);
set_page_refcounted(page);
arch_release_hugepage(page);
__free_pages(page, huge_page_order(h));
}
struct hstate *size_to_hstate(unsigned long size)
{
struct hstate *h;
for_each_hstate(h) {
if (huge_page_size(h) == size)
return h;
}
return NULL;
}
static void free_huge_page(struct page *page)
{
/*
* Can't pass hstate in here because it is called from the
* compound page destructor.
*/
struct hstate *h = page_hstate(page);
int nid = page_to_nid(page);
struct hugepage_subpool *spool =
(struct hugepage_subpool *)page_private(page);
set_page_private(page, 0);
page->mapping = NULL;
BUG_ON(page_count(page));
BUG_ON(page_mapcount(page));
INIT_LIST_HEAD(&page->lru);
spin_lock(&hugetlb_lock);
if (h->surplus_huge_pages_node[nid] && huge_page_order(h) < MAX_ORDER) {
update_and_free_page(h, page);
h->surplus_huge_pages--;
h->surplus_huge_pages_node[nid]--;
} else {
enqueue_huge_page(h, page);
}
spin_unlock(&hugetlb_lock);
hugepage_subpool_put_pages(spool, 1);
}
static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
{
set_compound_page_dtor(page, free_huge_page);
spin_lock(&hugetlb_lock);
h->nr_huge_pages++;
h->nr_huge_pages_node[nid]++;
spin_unlock(&hugetlb_lock);
put_page(page); /* free it into the hugepage allocator */
}
static void prep_compound_gigantic_page(struct page *page, unsigned long order)
{
int i;
int nr_pages = 1 << order;
struct page *p = page + 1;
/* we rely on prep_new_huge_page to set the destructor */
set_compound_order(page, order);
__SetPageHead(page);
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
__SetPageTail(p);
set_page_count(p, 0);
p->first_page = page;
}
}
int PageHuge(struct page *page)
{
compound_page_dtor *dtor;
if (!PageCompound(page))
return 0;
page = compound_head(page);
dtor = get_compound_page_dtor(page);
return dtor == free_huge_page;
}
EXPORT_SYMBOL_GPL(PageHuge);
static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
{
struct page *page;
if (h->order >= MAX_ORDER)
return NULL;
page = alloc_pages_exact_node(nid,
htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
__GFP_REPEAT|__GFP_NOWARN,
huge_page_order(h));
if (page) {
if (arch_prepare_hugepage(page)) {
__free_pages(page, huge_page_order(h));
return NULL;
}
prep_new_huge_page(h, page, nid);
}
return page;
}
/*
* common helper functions for hstate_next_node_to_{alloc|free}.
* We may have allocated or freed a huge page based on a different
* nodes_allowed previously, so h->next_node_to_{alloc|free} might
* be outside of *nodes_allowed. Ensure that we use an allowed
* node for alloc or free.
*/
static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
{
nid = next_node(nid, *nodes_allowed);
if (nid == MAX_NUMNODES)
nid = first_node(*nodes_allowed);
VM_BUG_ON(nid >= MAX_NUMNODES);
return nid;
}
static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
{
if (!node_isset(nid, *nodes_allowed))
nid = next_node_allowed(nid, nodes_allowed);
return nid;
}
/*
* returns the previously saved node ["this node"] from which to
* allocate a persistent huge page for the pool and advance the
* next node from which to allocate, handling wrap at end of node
* mask.
*/
static int hstate_next_node_to_alloc(struct hstate *h,
nodemask_t *nodes_allowed)
{
int nid;
VM_BUG_ON(!nodes_allowed);
nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
return nid;
}
static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
{
struct page *page;
int start_nid;
int next_nid;
int ret = 0;
start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
next_nid = start_nid;
do {
page = alloc_fresh_huge_page_node(h, next_nid);
if (page) {
ret = 1;
break;
}
next_nid = hstate_next_node_to_alloc(h, nodes_allowed);
} while (next_nid != start_nid);
if (ret)
count_vm_event(HTLB_BUDDY_PGALLOC);
else
count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
return ret;
}
/*
* helper for free_pool_huge_page() - return the previously saved
* node ["this node"] from which to free a huge page. Advance the
* next node id whether or not we find a free huge page to free so
* that the next attempt to free addresses the next node.
*/
static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
{
int nid;
VM_BUG_ON(!nodes_allowed);
nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
return nid;
}
/*
* Free huge page from pool from next node to free.
* Attempt to keep persistent huge pages more or less
* balanced over allowed nodes.
* Called with hugetlb_lock locked.
*/
static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
bool acct_surplus)
{
int start_nid;
int next_nid;
int ret = 0;
start_nid = hstate_next_node_to_free(h, nodes_allowed);
next_nid = start_nid;
do {
/*
* If we're returning unused surplus pages, only examine
* nodes with surplus pages.
*/
if ((!acct_surplus || h->surplus_huge_pages_node[next_nid]) &&
!list_empty(&h->hugepage_freelists[next_nid])) {
struct page *page =
list_entry(h->hugepage_freelists[next_nid].next,
struct page, lru);
list_del(&page->lru);
h->free_huge_pages--;
h->free_huge_pages_node[next_nid]--;
if (acct_surplus) {
h->surplus_huge_pages--;
h->surplus_huge_pages_node[next_nid]--;
}
update_and_free_page(h, page);
ret = 1;
break;
}
next_nid = hstate_next_node_to_free(h, nodes_allowed);
} while (next_nid != start_nid);
return ret;
}
static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
{
struct page *page;
unsigned int r_nid;
if (h->order >= MAX_ORDER)
return NULL;
/*
* Assume we will successfully allocate the surplus page to
* prevent racing processes from causing the surplus to exceed
* overcommit
*
* This however introduces a different race, where a process B
* tries to grow the static hugepage pool while alloc_pages() is
* called by process A. B will only examine the per-node
* counters in determining if surplus huge pages can be
* converted to normal huge pages in adjust_pool_surplus(). A
* won't be able to increment the per-node counter, until the
* lock is dropped by B, but B doesn't drop hugetlb_lock until
* no more huge pages can be converted from surplus to normal
* state (and doesn't try to convert again). Thus, we have a
* case where a surplus huge page exists, the pool is grown, and
* the surplus huge page still exists after, even though it
* should just have been converted to a normal huge page. This
* does not leak memory, though, as the hugepage will be freed
* once it is out of use. It also does not allow the counters to
* go out of whack in adjust_pool_surplus() as we don't modify
* the node values until we've gotten the hugepage and only the
* per-node value is checked there.
*/
spin_lock(&hugetlb_lock);
if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
spin_unlock(&hugetlb_lock);
return NULL;
} else {
h->nr_huge_pages++;
h->surplus_huge_pages++;
}
spin_unlock(&hugetlb_lock);
if (nid == NUMA_NO_NODE)
page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
__GFP_REPEAT|__GFP_NOWARN,
huge_page_order(h));
else
page = alloc_pages_exact_node(nid,
htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
__GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
if (page && arch_prepare_hugepage(page)) {
__free_pages(page, huge_page_order(h));
page = NULL;
}
spin_lock(&hugetlb_lock);
if (page) {
r_nid = page_to_nid(page);
set_compound_page_dtor(page, free_huge_page);
/*
* We incremented the global counters already
*/
h->nr_huge_pages_node[r_nid]++;
h->surplus_huge_pages_node[r_nid]++;
__count_vm_event(HTLB_BUDDY_PGALLOC);
} else {
h->nr_huge_pages--;
h->surplus_huge_pages--;
__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
}
spin_unlock(&hugetlb_lock);
return page;
}
/*
* This allocation function is useful in the context where vma is irrelevant.
* E.g. soft-offlining uses this function because it only cares physical
* address of error page.
*/
struct page *alloc_huge_page_node(struct hstate *h, int nid)
{
struct page *page;
spin_lock(&hugetlb_lock);
page = dequeue_huge_page_node(h, nid);
spin_unlock(&hugetlb_lock);
if (!page)
page = alloc_buddy_huge_page(h, nid);
return page;
}
/*
* Increase the hugetlb pool such that it can accommodate a reservation
* of size 'delta'.
*/
static int gather_surplus_pages(struct hstate *h, int delta)
{
struct list_head surplus_list;
struct page *page, *tmp;
int ret, i;
int needed, allocated;
bool alloc_ok = true;
needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
if (needed <= 0) {
h->resv_huge_pages += delta;
return 0;
}
allocated = 0;
INIT_LIST_HEAD(&surplus_list);
ret = -ENOMEM;
retry:
spin_unlock(&hugetlb_lock);
for (i = 0; i < needed; i++) {
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
alloc_ok = false;
break;
}
list_add(&page->lru, &surplus_list);
}
allocated += i;
/*
* After retaking hugetlb_lock, we need to recalculate 'needed'
* because either resv_huge_pages or free_huge_pages may have changed.
*/
spin_lock(&hugetlb_lock);
needed = (h->resv_huge_pages + delta) -
(h->free_huge_pages + allocated);
if (needed > 0) {
if (alloc_ok)
goto retry;
/*
* We were not able to allocate enough pages to
* satisfy the entire reservation so we free what
* we've allocated so far.
*/
goto free;
}
/*
* The surplus_list now contains _at_least_ the number of extra pages
* needed to accommodate the reservation. Add the appropriate number
* of pages to the hugetlb pool and free the extras back to the buddy
* allocator. Commit the entire reservation here to prevent another
* process from stealing the pages as they are added to the pool but
* before they are reserved.
*/
needed += allocated;
h->resv_huge_pages += delta;
ret = 0;
/* Free the needed pages to the hugetlb pool */
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
if ((--needed) < 0)
break;
list_del(&page->lru);
/*
* This page is now managed by the hugetlb allocator and has
* no users -- drop the buddy allocator's reference.
*/
put_page_testzero(page);
VM_BUG_ON(page_count(page));
enqueue_huge_page(h, page);
}
free:
spin_unlock(&hugetlb_lock);
/* Free unnecessary surplus pages to the buddy allocator */
if (!list_empty(&surplus_list)) {
list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
list_del(&page->lru);
put_page(page);
}
}
spin_lock(&hugetlb_lock);
return ret;
}
/*
* When releasing a hugetlb pool reservation, any surplus pages that were
* allocated to satisfy the reservation must be explicitly freed if they were
* never used.
* Called with hugetlb_lock held.
*/
static void return_unused_surplus_pages(struct hstate *h,
unsigned long unused_resv_pages)
{
unsigned long nr_pages;
/* Uncommit the reservation */
h->resv_huge_pages -= unused_resv_pages;
/* Cannot return gigantic pages currently */
if (h->order >= MAX_ORDER)
return;
nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
/*
* We want to release as many surplus pages as possible, spread
* evenly across all nodes with memory. Iterate across these nodes
* until we can no longer free unreserved surplus pages. This occurs
* when the nodes with surplus pages have no free pages.
* free_pool_huge_page() will balance the the freed pages across the
* on-line nodes with memory and will handle the hstate accounting.
*/
while (nr_pages--) {
if (!free_pool_huge_page(h, &node_states[N_HIGH_MEMORY], 1))
break;
}
}
/*
* Determine if the huge page at addr within the vma has an associated
* reservation. Where it does not we will need to logically increase
* reservation and actually increase subpool usage before an allocation
* can occur. Where any new reservation would be required the
* reservation change is prepared, but not committed. Once the page
* has been allocated from the subpool and instantiated the change should
* be committed via vma_commit_reservation. No action is required on
* failure.
*/
static long vma_needs_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host;
if (vma->vm_flags & VM_MAYSHARE) {
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
return region_chg(&inode->i_mapping->private_list,
idx, idx + 1);
} else if (!is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
return 1;
} else {
long err;
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *reservations = vma_resv_map(vma);
err = region_chg(&reservations->regions, idx, idx + 1);
if (err < 0)
return err;
return 0;
}
}
static void vma_commit_reservation(struct hstate *h,
struct vm_area_struct *vma, unsigned long addr)
{
struct address_space *mapping = vma->vm_file->f_mapping;
struct inode *inode = mapping->host;
if (vma->vm_flags & VM_MAYSHARE) {
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
region_add(&inode->i_mapping->private_list, idx, idx + 1);
} else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
pgoff_t idx = vma_hugecache_offset(h, vma, addr);
struct resv_map *reservations = vma_resv_map(vma);
/* Mark this page used in the map. */
region_add(&reservations->regions, idx, idx + 1);
}
}
static struct page *alloc_huge_page(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve)
{
struct hugepage_subpool *spool = subpool_vma(vma);
struct hstate *h = hstate_vma(vma);
struct page *page;
long chg;
/*
* Processes that did not create the mapping will have no
* reserves and will not have accounted against subpool
* limit. Check that the subpool limit can be made before
* satisfying the allocation MAP_NORESERVE mappings may also
* need pages and subpool limit allocated allocated if no reserve
* mapping overlaps.
*/
chg = vma_needs_reservation(h, vma, addr);
if (chg < 0)
return ERR_PTR(-VM_FAULT_OOM);
if (chg)
if (hugepage_subpool_get_pages(spool, chg))
return ERR_PTR(-VM_FAULT_SIGBUS);
spin_lock(&hugetlb_lock);
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
spin_unlock(&hugetlb_lock);
if (!page) {
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
hugepage_subpool_put_pages(spool, chg);
return ERR_PTR(-VM_FAULT_SIGBUS);
}
}
set_page_private(page, (unsigned long)spool);
vma_commit_reservation(h, vma, addr);
return page;
}
int __weak alloc_bootmem_huge_page(struct hstate *h)
{
struct huge_bootmem_page *m;
int nr_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
while (nr_nodes) {
void *addr;
addr = __alloc_bootmem_node_nopanic(
NODE_DATA(hstate_next_node_to_alloc(h,
&node_states[N_HIGH_MEMORY])),
huge_page_size(h), huge_page_size(h), 0);
if (addr) {
/*
* Use the beginning of the huge page to store the
* huge_bootmem_page struct (until gather_bootmem
* puts them into the mem_map).
*/
m = addr;
goto found;
}
nr_nodes--;
}
return 0;
found:
BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
/* Put them into a private list first because mem_map is not up yet */
list_add(&m->list, &huge_boot_pages);
m->hstate = h;
return 1;
}
static void prep_compound_huge_page(struct page *page, int order)
{
if (unlikely(order > (MAX_ORDER - 1)))
prep_compound_gigantic_page(page, order);
else
prep_compound_page(page, order);
}
/* Put bootmem huge pages into the standard lists after mem_map is up */
static void __init gather_bootmem_prealloc(void)
{
struct huge_bootmem_page *m;
list_for_each_entry(m, &huge_boot_pages, list) {
struct hstate *h = m->hstate;
struct page *page;
#ifdef CONFIG_HIGHMEM
page = pfn_to_page(m->phys >> PAGE_SHIFT);
free_bootmem_late((unsigned long)m,
sizeof(struct huge_bootmem_page));
#else
page = virt_to_page(m);
#endif
__ClearPageReserved(page);
WARN_ON(page_count(page) != 1);
prep_compound_huge_page(page, h->order);
prep_new_huge_page(h, page, page_to_nid(page));
/*
* If we had gigantic hugepages allocated at boot time, we need
* to restore the 'stolen' pages to totalram_pages in order to
* fix confusing memory reports from free(1) and another
* side-effects, like CommitLimit going negative.
*/
if (h->order > (MAX_ORDER - 1))
totalram_pages += 1 << h->order;
}
}
static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
{
unsigned long i;
for (i = 0; i < h->max_huge_pages; ++i) {
if (h->order >= MAX_ORDER) {
if (!alloc_bootmem_huge_page(h))
break;
} else if (!alloc_fresh_huge_page(h,
&node_states[N_HIGH_MEMORY]))
break;
}
h->max_huge_pages = i;
}
static void __init hugetlb_init_hstates(void)
{
struct hstate *h;
for_each_hstate(h) {
/* oversize hugepages were init'ed in early boot */
if (h->order < MAX_ORDER)
hugetlb_hstate_alloc_pages(h);
}
}
static char * __init memfmt(char *buf, unsigned long n)
{
if (n >= (1UL << 30))
sprintf(buf, "%lu GB", n >> 30);
else if (n >= (1UL << 20))
sprintf(buf, "%lu MB", n >> 20);
else
sprintf(buf, "%lu KB", n >> 10);
return buf;
}
static void __init report_hugepages(void)
{
struct hstate *h;
for_each_hstate(h) {
char buf[32];
printk(KERN_INFO "HugeTLB registered %s page size, "
"pre-allocated %ld pages\n",
memfmt(buf, huge_page_size(h)),
h->free_huge_pages);
}
}
#ifdef CONFIG_HIGHMEM
static void try_to_free_low(struct hstate *h, unsigned long count,
nodemask_t *nodes_allowed)
{
int i;
if (h->order >= MAX_ORDER)
return;
for_each_node_mask(i, *nodes_allowed) {
struct page *page, *next;
struct list_head *freel = &h->hugepage_freelists[i];
list_for_each_entry_safe(page, next, freel, lru) {
if (count >= h->nr_huge_pages)
return;
if (PageHighMem(page))
continue;
list_del(&page->lru);
update_and_free_page(h, page);
h->free_huge_pages--;
h->free_huge_pages_node[page_to_nid(page)]--;
}
}
}
#else
static inline void try_to_free_low(struct hstate *h, unsigned long count,
nodemask_t *nodes_allowed)
{
}
#endif
/*
* Increment or decrement surplus_huge_pages. Keep node-specific counters
* balanced by operating on them in a round-robin fashion.
* Returns 1 if an adjustment was made.
*/
static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
int delta)
{
int start_nid, next_nid;
int ret = 0;
VM_BUG_ON(delta != -1 && delta != 1);
if (delta < 0)
start_nid = hstate_next_node_to_alloc(h, nodes_allowed);
else
start_nid = hstate_next_node_to_free(h, nodes_allowed);
next_nid = start_nid;
do {
int nid = next_nid;
if (delta < 0) {
/*
* To shrink on this node, there must be a surplus page
*/
if (!h->surplus_huge_pages_node[nid]) {
next_nid = hstate_next_node_to_alloc(h,
nodes_allowed);
continue;
}
}
if (delta > 0) {
/*
* Surplus cannot exceed the total number of pages
*/
if (h->surplus_huge_pages_node[nid] >=
h->nr_huge_pages_node[nid]) {
next_nid = hstate_next_node_to_free(h,
nodes_allowed);
continue;
}
}
h->surplus_huge_pages += delta;
h->surplus_huge_pages_node[nid] += delta;
ret = 1;
break;
} while (next_nid != start_nid);
return ret;
}
#define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
nodemask_t *nodes_allowed)
{
unsigned long min_count, ret;
if (h->order >= MAX_ORDER)
return h->max_huge_pages;
/*
* Increase the pool size
* First take pages out of surplus state. Then make up the
* remaining difference by allocating fresh huge pages.
*
* We might race with alloc_buddy_huge_page() here and be unable
* to convert a surplus huge page to a normal huge page. That is
* not critical, though, it just means the overall size of the
* pool might be one hugepage larger than it needs to be, but
* within all the constraints specified by the sysctls.
*/
spin_lock(&hugetlb_lock);
while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
if (!adjust_pool_surplus(h, nodes_allowed, -1))
break;
}
while (count > persistent_huge_pages(h)) {
/*
* If this allocation races such that we no longer need the
* page, free_huge_page will handle it by freeing the page
* and reducing the surplus.
*/
spin_unlock(&hugetlb_lock);
ret = alloc_fresh_huge_page(h, nodes_allowed);
spin_lock(&hugetlb_lock);
if (!ret)
goto out;
/* Bail for signals. Probably ctrl-c from user */
if (signal_pending(current))
goto out;
}
/*
* Decrease the pool size
* First return free pages to the buddy allocator (being careful
* to keep enough around to satisfy reservations). Then place
* pages into surplus state as needed so the pool will shrink
* to the desired size as pages become free.
*
* By placing pages into the surplus state independent of the
* overcommit value, we are allowing the surplus pool size to
* exceed overcommit. There are few sane options here. Since
* alloc_buddy_huge_page() is checking the global counter,
* though, we'll note that we're not allowed to exceed surplus
* and won't grow the pool anywhere else. Not until one of the
* sysctls are changed, or the surplus pages go out of use.
*/
min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
min_count = max(count, min_count);
try_to_free_low(h, min_count, nodes_allowed);
while (min_count < persistent_huge_pages(h)) {
if (!free_pool_huge_page(h, nodes_allowed, 0))
break;
}
while (count < persistent_huge_pages(h)) {
if (!adjust_pool_surplus(h, nodes_allowed, 1))
break;
}
out:
ret = persistent_huge_pages(h);
spin_unlock(&hugetlb_lock);
return ret;
}
#define HSTATE_ATTR_RO(_name) \
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
#define HSTATE_ATTR(_name) \
static struct kobj_attribute _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
static struct kobject *hugepages_kobj;
static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
{
int i;
for (i = 0; i < HUGE_MAX_HSTATE; i++)
if (hstate_kobjs[i] == kobj) {
if (nidp)
*nidp = NUMA_NO_NODE;
return &hstates[i];
}
return kobj_to_node_hstate(kobj, nidp);
}
static ssize_t nr_hugepages_show_common(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct hstate *h;
unsigned long nr_huge_pages;
int nid;
h = kobj_to_hstate(kobj, &nid);
if (nid == NUMA_NO_NODE)
nr_huge_pages = h->nr_huge_pages;
else
nr_huge_pages = h->nr_huge_pages_node[nid];
return sprintf(buf, "%lu\n", nr_huge_pages);
}
static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t len)
{
int err;
int nid;
unsigned long count;
struct hstate *h;
NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
err = strict_strtoul(buf, 10, &count);
if (err)
goto out;
h = kobj_to_hstate(kobj, &nid);
if (h->order >= MAX_ORDER) {
err = -EINVAL;
goto out;
}
if (nid == NUMA_NO_NODE) {
/*
* global hstate attribute
*/
if (!(obey_mempolicy &&
init_nodemask_of_mempolicy(nodes_allowed))) {
NODEMASK_FREE(nodes_allowed);
nodes_allowed = &node_states[N_HIGH_MEMORY];
}
} else if (nodes_allowed) {
/*
* per node hstate attribute: adjust count to global,
* but restrict alloc/free to the specified node.
*/
count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
init_nodemask_of_node(nodes_allowed, nid);
} else
nodes_allowed = &node_states[N_HIGH_MEMORY];
h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
if (nodes_allowed != &node_states[N_HIGH_MEMORY])
NODEMASK_FREE(nodes_allowed);
return len;
out:
NODEMASK_FREE(nodes_allowed);
return err;
}
static ssize_t nr_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return nr_hugepages_show_common(kobj, attr, buf);
}
static ssize_t nr_hugepages_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t len)
{
return nr_hugepages_store_common(false, kobj, attr, buf, len);
}
HSTATE_ATTR(nr_hugepages);
#ifdef CONFIG_NUMA
/*
* hstate attribute for optionally mempolicy-based constraint on persistent
* huge page alloc/free.
*/
static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return nr_hugepages_show_common(kobj, attr, buf);
}
static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t len)
{
return nr_hugepages_store_common(true, kobj, attr, buf, len);
}
HSTATE_ATTR(nr_hugepages_mempolicy);
#endif
static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct hstate *h = kobj_to_hstate(kobj, NULL);
return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
}
static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
struct kobj_attribute *attr, const char *buf, size_t count)
{
int err;
unsigned long input;
struct hstate *h = kobj_to_hstate(kobj, NULL);
if (h->order >= MAX_ORDER)
return -EINVAL;
err = strict_strtoul(buf, 10, &input);
if (err)
return err;
spin_lock(&hugetlb_lock);
h->nr_overcommit_huge_pages = input;
spin_unlock(&hugetlb_lock);
return count;
}
HSTATE_ATTR(nr_overcommit_hugepages);
static ssize_t free_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct hstate *h;
unsigned long free_huge_pages;
int nid;
h = kobj_to_hstate(kobj, &nid);
if (nid == NUMA_NO_NODE)
free_huge_pages = h->free_huge_pages;
else
free_huge_pages = h->free_huge_pages_node[nid];
return sprintf(buf, "%lu\n", free_huge_pages);
}
HSTATE_ATTR_RO(free_hugepages);
static ssize_t resv_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct hstate *h = kobj_to_hstate(kobj, NULL);
return sprintf(buf, "%lu\n", h->resv_huge_pages);
}
HSTATE_ATTR_RO(resv_hugepages);
static ssize_t surplus_hugepages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
struct hstate *h;
unsigned long surplus_huge_pages;
int nid;
h = kobj_to_hstate(kobj, &nid);
if (nid == NUMA_NO_NODE)
surplus_huge_pages = h->surplus_huge_pages;
else
surplus_huge_pages = h->surplus_huge_pages_node[nid];
return sprintf(buf, "%lu\n", surplus_huge_pages);
}
HSTATE_ATTR_RO(surplus_hugepages);
static struct attribute *hstate_attrs[] = {
&nr_hugepages_attr.attr,
&nr_overcommit_hugepages_attr.attr,
&free_hugepages_attr.attr,
&resv_hugepages_attr.attr,
&surplus_hugepages_attr.attr,
#ifdef CONFIG_NUMA
&nr_hugepages_mempolicy_attr.attr,
#endif
NULL,
};
static struct attribute_group hstate_attr_group = {
.attrs = hstate_attrs,
};
static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
struct kobject **hstate_kobjs,
struct attribute_group *hstate_attr_group)
{
int retval;
int hi = h - hstates;
hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
if (!hstate_kobjs[hi])
return -ENOMEM;
retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
if (retval)
kobject_put(hstate_kobjs[hi]);
return retval;
}
static void __init hugetlb_sysfs_init(void)
{
struct hstate *h;
int err;
hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
if (!hugepages_kobj)
return;
for_each_hstate(h) {
err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
hstate_kobjs, &hstate_attr_group);
if (err)
printk(KERN_ERR "Hugetlb: Unable to add hstate %s",
h->name);
}
}
#ifdef CONFIG_NUMA
/*
* node_hstate/s - associate per node hstate attributes, via their kobjects,
* with node devices in node_devices[] using a parallel array. The array
* index of a node device or _hstate == node id.
* This is here to avoid any static dependency of the node device driver, in
* the base kernel, on the hugetlb module.
*/
struct node_hstate {
struct kobject *hugepages_kobj;
struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
};
struct node_hstate node_hstates[MAX_NUMNODES];
/*
* A subset of global hstate attributes for node devices
*/
static struct attribute *per_node_hstate_attrs[] = {
&nr_hugepages_attr.attr,
&free_hugepages_attr.attr,
&surplus_hugepages_attr.attr,
NULL,
};
static struct attribute_group per_node_hstate_attr_group = {
.attrs = per_node_hstate_attrs,
};
/*
* kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
* Returns node id via non-NULL nidp.
*/
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
{
int nid;
for (nid = 0; nid < nr_node_ids; nid++) {
struct node_hstate *nhs = &node_hstates[nid];
int i;
for (i = 0; i < HUGE_MAX_HSTATE; i++)
if (nhs->hstate_kobjs[i] == kobj) {
if (nidp)
*nidp = nid;
return &hstates[i];
}
}
BUG();
return NULL;
}
/*
* Unregister hstate attributes from a single node device.
* No-op if no hstate attributes attached.
*/
void hugetlb_unregister_node(struct node *node)
{
struct hstate *h;
struct node_hstate *nhs = &node_hstates[node->dev.id];
if (!nhs->hugepages_kobj)
return; /* no hstate attributes */
for_each_hstate(h)
if (nhs->hstate_kobjs[h - hstates]) {
kobject_put(nhs->hstate_kobjs[h - hstates]);
nhs->hstate_kobjs[h - hstates] = NULL;
}
kobject_put(nhs->hugepages_kobj);
nhs->hugepages_kobj = NULL;
}
/*
* hugetlb module exit: unregister hstate attributes from node devices
* that have them.
*/
static void hugetlb_unregister_all_nodes(void)
{
int nid;
/*
* disable node device registrations.
*/
register_hugetlbfs_with_node(NULL, NULL);
/*
* remove hstate attributes from any nodes that have them.
*/
for (nid = 0; nid < nr_node_ids; nid++)
hugetlb_unregister_node(&node_devices[nid]);
}
/*
* Register hstate attributes for a single node device.
* No-op if attributes already registered.
*/
void hugetlb_register_node(struct node *node)
{
struct hstate *h;
struct node_hstate *nhs = &node_hstates[node->dev.id];
int err;
if (nhs->hugepages_kobj)
return; /* already allocated */
nhs->hugepages_kobj = kobject_create_and_add("hugepages",
&node->dev.kobj);
if (!nhs->hugepages_kobj)
return;
for_each_hstate(h) {
err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
nhs->hstate_kobjs,
&per_node_hstate_attr_group);
if (err) {
printk(KERN_ERR "Hugetlb: Unable to add hstate %s"
" for node %d\n",
h->name, node->dev.id);
hugetlb_unregister_node(node);
break;
}
}
}
/*
* hugetlb init time: register hstate attributes for all registered node
* devices of nodes that have memory. All on-line nodes should have
* registered their associated device by this time.
*/
static void hugetlb_register_all_nodes(void)
{
int nid;
for_each_node_state(nid, N_HIGH_MEMORY) {
struct node *node = &node_devices[nid];
if (node->dev.id == nid)
hugetlb_register_node(node);
}
/*
* Let the node device driver know we're here so it can
* [un]register hstate attributes on node hotplug.
*/
register_hugetlbfs_with_node(hugetlb_register_node,
hugetlb_unregister_node);
}
#else /* !CONFIG_NUMA */
static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
{
BUG();
if (nidp)
*nidp = -1;
return NULL;
}
static void hugetlb_unregister_all_nodes(void) { }
static void hugetlb_register_all_nodes(void) { }
#endif
static void __exit hugetlb_exit(void)
{
struct hstate *h;
hugetlb_unregister_all_nodes();
for_each_hstate(h) {
kobject_put(hstate_kobjs[h - hstates]);
}
kobject_put(hugepages_kobj);
}
module_exit(hugetlb_exit);
static int __init hugetlb_init(void)
{
/* Some platform decide whether they support huge pages at boot
* time. On these, such as powerpc, HPAGE_SHIFT is set to 0 when
* there is no such support
*/
if (HPAGE_SHIFT == 0)
return 0;
if (!size_to_hstate(default_hstate_size)) {
default_hstate_size = HPAGE_SIZE;
if (!size_to_hstate(default_hstate_size))
hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
}
default_hstate_idx = size_to_hstate(default_hstate_size) - hstates;
if (default_hstate_max_huge_pages)
default_hstate.max_huge_pages = default_hstate_max_huge_pages;
hugetlb_init_hstates();
gather_bootmem_prealloc();
report_hugepages();
hugetlb_sysfs_init();
hugetlb_register_all_nodes();
return 0;
}
module_init(hugetlb_init);
/* Should be called on processing a hugepagesz=... option */
void __init hugetlb_add_hstate(unsigned order)
{
struct hstate *h;
unsigned long i;
if (size_to_hstate(PAGE_SIZE << order)) {
printk(KERN_WARNING "hugepagesz= specified twice, ignoring\n");
return;
}
BUG_ON(max_hstate >= HUGE_MAX_HSTATE);
BUG_ON(order == 0);
h = &hstates[max_hstate++];
h->order = order;
h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
h->nr_huge_pages = 0;
h->free_huge_pages = 0;
for (i = 0; i < MAX_NUMNODES; ++i)
INIT_LIST_HEAD(&h->hugepage_freelists[i]);
h->next_nid_to_alloc = first_node(node_states[N_HIGH_MEMORY]);
h->next_nid_to_free = first_node(node_states[N_HIGH_MEMORY]);
snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
huge_page_size(h)/1024);
parsed_hstate = h;
}
static int __init hugetlb_nrpages_setup(char *s)
{
unsigned long *mhp;
static unsigned long *last_mhp;
/*
* !max_hstate means we haven't parsed a hugepagesz= parameter yet,
* so this hugepages= parameter goes to the "default hstate".
*/
if (!max_hstate)
mhp = &default_hstate_max_huge_pages;
else
mhp = &parsed_hstate->max_huge_pages;
if (mhp == last_mhp) {
printk(KERN_WARNING "hugepages= specified twice without "
"interleaving hugepagesz=, ignoring\n");
return 1;
}
if (sscanf(s, "%lu", mhp) <= 0)
*mhp = 0;
/*
* Global state is always initialized later in hugetlb_init.
* But we need to allocate >= MAX_ORDER hstates here early to still
* use the bootmem allocator.
*/
if (max_hstate && parsed_hstate->order >= MAX_ORDER)
hugetlb_hstate_alloc_pages(parsed_hstate);
last_mhp = mhp;
return 1;
}
__setup("hugepages=", hugetlb_nrpages_setup);
static int __init hugetlb_default_setup(char *s)
{
default_hstate_size = memparse(s, &s);
return 1;
}
__setup("default_hugepagesz=", hugetlb_default_setup);
static unsigned int cpuset_mems_nr(unsigned int *array)
{
int node;
unsigned int nr = 0;
for_each_node_mask(node, cpuset_current_mems_allowed)
nr += array[node];
return nr;
}
#ifdef CONFIG_SYSCTL
static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
struct hstate *h = &default_hstate;
unsigned long tmp;
int ret;
tmp = h->max_huge_pages;
if (write && h->order >= MAX_ORDER)
return -EINVAL;
table->data = &tmp;
table->maxlen = sizeof(unsigned long);
ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (ret)
goto out;
if (write) {
NODEMASK_ALLOC(nodemask_t, nodes_allowed,
GFP_KERNEL | __GFP_NORETRY);
if (!(obey_mempolicy &&
init_nodemask_of_mempolicy(nodes_allowed))) {
NODEMASK_FREE(nodes_allowed);
nodes_allowed = &node_states[N_HIGH_MEMORY];
}
h->max_huge_pages = set_max_huge_pages(h, tmp, nodes_allowed);
if (nodes_allowed != &node_states[N_HIGH_MEMORY])
NODEMASK_FREE(nodes_allowed);
}
out:
return ret;
}
int hugetlb_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
return hugetlb_sysctl_handler_common(false, table, write,
buffer, length, ppos);
}
#ifdef CONFIG_NUMA
int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
return hugetlb_sysctl_handler_common(true, table, write,
buffer, length, ppos);
}
#endif /* CONFIG_NUMA */
int hugetlb_treat_movable_handler(struct ctl_table *table, int write,
void __user *buffer,
size_t *length, loff_t *ppos)
{
proc_dointvec(table, write, buffer, length, ppos);
if (hugepages_treat_as_movable)
htlb_alloc_mask = GFP_HIGHUSER_MOVABLE;
else
htlb_alloc_mask = GFP_HIGHUSER;
return 0;
}
int hugetlb_overcommit_handler(struct ctl_table *table, int write,
void __user *buffer,
size_t *length, loff_t *ppos)
{
struct hstate *h = &default_hstate;
unsigned long tmp;
int ret;
tmp = h->nr_overcommit_huge_pages;
if (write && h->order >= MAX_ORDER)
return -EINVAL;
table->data = &tmp;
table->maxlen = sizeof(unsigned long);
ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
if (ret)
goto out;
if (write) {
spin_lock(&hugetlb_lock);
h->nr_overcommit_huge_pages = tmp;
spin_unlock(&hugetlb_lock);
}
out:
return ret;
}
#endif /* CONFIG_SYSCTL */
void hugetlb_report_meminfo(struct seq_file *m)
{
struct hstate *h = &default_hstate;
seq_printf(m,
"HugePages_Total: %5lu\n"
"HugePages_Free: %5lu\n"
"HugePages_Rsvd: %5lu\n"
"HugePages_Surp: %5lu\n"
"Hugepagesize: %8lu kB\n",
h->nr_huge_pages,
h->free_huge_pages,
h->resv_huge_pages,
h->surplus_huge_pages,
1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
}
int hugetlb_report_node_meminfo(int nid, char *buf)
{
struct hstate *h = &default_hstate;
return sprintf(buf,
"Node %d HugePages_Total: %5u\n"
"Node %d HugePages_Free: %5u\n"
"Node %d HugePages_Surp: %5u\n",
nid, h->nr_huge_pages_node[nid],
nid, h->free_huge_pages_node[nid],
nid, h->surplus_huge_pages_node[nid]);
}
/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
unsigned long hugetlb_total_pages(void)
{
struct hstate *h = &default_hstate;
return h->nr_huge_pages * pages_per_huge_page(h);
}
static int hugetlb_acct_memory(struct hstate *h, long delta)
{
int ret = -ENOMEM;
spin_lock(&hugetlb_lock);
/*
* When cpuset is configured, it breaks the strict hugetlb page
* reservation as the accounting is done on a global variable. Such
* reservation is completely rubbish in the presence of cpuset because
* the reservation is not checked against page availability for the
* current cpuset. Application can still potentially OOM'ed by kernel
* with lack of free htlb page in cpuset that the task is in.
* Attempt to enforce strict accounting with cpuset is almost
* impossible (or too ugly) because cpuset is too fluid that
* task or memory node can be dynamically moved between cpusets.
*
* The change of semantics for shared hugetlb mapping with cpuset is
* undesirable. However, in order to preserve some of the semantics,
* we fall back to check against current free page availability as
* a best attempt and hopefully to minimize the impact of changing
* semantics that cpuset has.
*/
if (delta > 0) {
if (gather_surplus_pages(h, delta) < 0)
goto out;
if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
return_unused_surplus_pages(h, delta);
goto out;
}
}
ret = 0;
if (delta < 0)
return_unused_surplus_pages(h, (unsigned long) -delta);
out:
spin_unlock(&hugetlb_lock);
return ret;
}
static void hugetlb_vm_op_open(struct vm_area_struct *vma)
{
struct resv_map *reservations = vma_resv_map(vma);
/*
* This new VMA should share its siblings reservation map if present.
* The VMA will only ever have a valid reservation map pointer where
* it is being copied for another still existing VMA. As that VMA
* has a reference to the reservation map it cannot disappear until
* after this open call completes. It is therefore safe to take a
* new reference here without additional locking.
*/
if (reservations)
kref_get(&reservations->refs);
}
static void hugetlb_vm_op_close(struct vm_area_struct *vma)
{
struct hstate *h = hstate_vma(vma);
struct resv_map *reservations = vma_resv_map(vma);
struct hugepage_subpool *spool = subpool_vma(vma);
unsigned long reserve;
unsigned long start;
unsigned long end;
if (reservations) {
start = vma_hugecache_offset(h, vma, vma->vm_start);
end = vma_hugecache_offset(h, vma, vma->vm_end);
reserve = (end - start) -
region_count(&reservations->regions, start, end);
kref_put(&reservations->refs, resv_map_release);
if (reserve) {
hugetlb_acct_memory(h, -reserve);
hugepage_subpool_put_pages(spool, reserve);
}
}
}
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
* hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
* this far.
*/
static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
BUG();
return 0;
}
const struct vm_operations_struct hugetlb_vm_ops = {
.fault = hugetlb_vm_op_fault,
.open = hugetlb_vm_op_open,
.close = hugetlb_vm_op_close,
};
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
int writable)
{
pte_t entry;
if (writable) {
entry =
pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
} else {
entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
}
entry = pte_mkyoung(entry);
entry = pte_mkhuge(entry);
entry = arch_make_huge_pte(entry, vma, page, writable);
return entry;
}
static void set_huge_ptep_writable(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t entry;
entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
update_mmu_cache(vma, address, ptep);
}
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
pte_t *src_pte, *dst_pte, entry;
struct page *ptepage;
unsigned long addr;
int cow;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
src_pte = huge_pte_offset(src, addr);
if (!src_pte)
continue;
dst_pte = huge_pte_alloc(dst, addr, sz);
if (!dst_pte)
goto nomem;
/* If the pagetables are shared don't copy or take references */
if (dst_pte == src_pte)
continue;
spin_lock(&dst->page_table_lock);
spin_lock_nested(&src->page_table_lock, SINGLE_DEPTH_NESTING);
if (!huge_pte_none(huge_ptep_get(src_pte))) {
if (cow)
huge_ptep_set_wrprotect(src, addr, src_pte);
entry = huge_ptep_get(src_pte);
ptepage = pte_page(entry);
get_page(ptepage);
page_dup_rmap(ptepage);
set_huge_pte_at(dst, addr, dst_pte, entry);
}
spin_unlock(&src->page_table_lock);
spin_unlock(&dst->page_table_lock);
}
return 0;
nomem:
return -ENOMEM;
}
static int is_hugetlb_entry_migration(pte_t pte)
{
swp_entry_t swp;
if (huge_pte_none(pte) || pte_present(pte))
return 0;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_migration_entry(swp))
return 1;
else
return 0;
}
static int is_hugetlb_entry_hwpoisoned(pte_t pte)
{
swp_entry_t swp;
if (huge_pte_none(pte) || pte_present(pte))
return 0;
swp = pte_to_swp_entry(pte);
if (non_swap_entry(swp) && is_hwpoison_entry(swp))
return 1;
else
return 0;
}
void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
pte_t *ptep;
pte_t pte;
struct page *page;
struct page *tmp;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
/*
* A page gathering list, protected by per file i_mmap_mutex. The
* lock is used to avoid list corruption from multiple unmapping
* of the same page since we are using page->lru.
*/
LIST_HEAD(page_list);
WARN_ON(!is_vm_hugetlb_page(vma));
BUG_ON(start & ~huge_page_mask(h));
BUG_ON(end & ~huge_page_mask(h));
mmu_notifier_invalidate_range_start(mm, start, end);
spin_lock(&mm->page_table_lock);
for (address = start; address < end; address += sz) {
ptep = huge_pte_offset(mm, address);
if (!ptep)
continue;
if (huge_pmd_unshare(mm, &address, ptep))
continue;
pte = huge_ptep_get(ptep);
if (huge_pte_none(pte))
continue;
/*
* HWPoisoned hugepage is already unmapped and dropped reference
*/
if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
continue;
page = pte_page(pte);
/*
* If a reference page is supplied, it is because a specific
* page is being unmapped, not a range. Ensure the page we
* are about to unmap is the actual page of interest.
*/
if (ref_page) {
if (page != ref_page)
continue;
/*
* Mark the VMA as having unmapped its page so that
* future faults in this VMA will fail rather than
* looking like data was lost
*/
set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
}
pte = huge_ptep_get_and_clear(mm, address, ptep);
if (pte_dirty(pte))
set_page_dirty(page);
list_add(&page->lru, &page_list);
/* Bail out after unmapping reference page if supplied */
if (ref_page)
break;
}
flush_tlb_range(vma, start, end);
spin_unlock(&mm->page_table_lock);
mmu_notifier_invalidate_range_end(mm, start, end);
list_for_each_entry_safe(page, tmp, &page_list, lru) {
page_remove_rmap(page);
list_del(&page->lru);
put_page(page);
}
}
void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page)
{
mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
__unmap_hugepage_range(vma, start, end, ref_page);
mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
}
/*
* This is called when the original mapper is failing to COW a MAP_PRIVATE
* mappping it owns the reserve page for. The intention is to unmap the page
* from other VMAs and let the children be SIGKILLed if they are faulting the
* same region.
*/
static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page, unsigned long address)
{
struct hstate *h = hstate_vma(vma);
struct vm_area_struct *iter_vma;
struct address_space *mapping;
struct prio_tree_iter iter;
pgoff_t pgoff;
/*
* vm_pgoff is in PAGE_SIZE units, hence the different calculation
* from page cache lookup which is in HPAGE_SIZE units.
*/
address = address & huge_page_mask(h);
pgoff = vma_hugecache_offset(h, vma, address);
mapping = vma->vm_file->f_dentry->d_inode->i_mapping;
/*
* Take the mapping lock for the duration of the table walk. As
* this mapping should be shared between all the VMAs,
* __unmap_hugepage_range() is called as the lock is already held
*/
mutex_lock(&mapping->i_mmap_mutex);
vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
/* Do not unmap the current VMA */
if (iter_vma == vma)
continue;
/*
* Unmap the page from other VMAs without their own reserves.
* They get marked to be SIGKILLed if they fault in these
* areas. This is because a future no-page fault on this VMA
* could insert a zeroed page instead of the data existing
* from the time of fork. This would look like data corruption
*/
if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
__unmap_hugepage_range(iter_vma,
address, address + huge_page_size(h),
page);
}
mutex_unlock(&mapping->i_mmap_mutex);
return 1;
}
/*
* Hugetlb_cow() should be called with page lock of the original hugepage held.
* Called with hugetlb_instantiation_mutex held and pte_page locked so we
* cannot race with other handlers or page migration.
* Keep the pte_same checks anyway to make transition from the mutex easier.
*/
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, pte_t pte,
struct page *pagecache_page)
{
struct hstate *h = hstate_vma(vma);
struct page *old_page, *new_page;
int avoidcopy;
int outside_reserve = 0;
old_page = pte_page(pte);
retry_avoidcopy:
/* If no-one else is actually using this page, avoid the copy
* and just make the page writable */
avoidcopy = (page_mapcount(old_page) == 1);
if (avoidcopy) {
if (PageAnon(old_page))
page_move_anon_rmap(old_page, vma, address);
set_huge_ptep_writable(vma, address, ptep);
return 0;
}
/*
* If the process that created a MAP_PRIVATE mapping is about to
* perform a COW due to a shared page count, attempt to satisfy
* the allocation without using the existing reserves. The pagecache
* page is used to determine if the reserve at this address was
* consumed or not. If reserves were used, a partial faulted mapping
* at the time of fork() could consume its reserves on COW instead
* of the full address range.
*/
if (!(vma->vm_flags & VM_MAYSHARE) &&
is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
old_page != pagecache_page)
outside_reserve = 1;
page_cache_get(old_page);
/* Drop page_table_lock as buddy allocator may be called */
spin_unlock(&mm->page_table_lock);
new_page = alloc_huge_page(vma, address, outside_reserve);
if (IS_ERR(new_page)) {
page_cache_release(old_page);
/*
* If a process owning a MAP_PRIVATE mapping fails to COW,
* it is due to references held by a child and an insufficient
* huge page pool. To guarantee the original mappers
* reliability, unmap the page from child processes. The child
* may get SIGKILLed if it later faults.
*/
if (outside_reserve) {
BUG_ON(huge_pte_none(pte));
if (unmap_ref_private(mm, vma, old_page, address)) {
BUG_ON(huge_pte_none(pte));
spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
if (likely(pte_same(huge_ptep_get(ptep), pte)))
goto retry_avoidcopy;
/*
* race occurs while re-acquiring page_table_lock, and
* our job is done.
*/
return 0;
}
WARN_ON_ONCE(1);
}
/* Caller expects lock to be held */
spin_lock(&mm->page_table_lock);
return -PTR_ERR(new_page);
}
/*
* When the original hugepage is shared one, it does not have
* anon_vma prepared.
*/
if (unlikely(anon_vma_prepare(vma))) {
page_cache_release(new_page);
page_cache_release(old_page);
/* Caller expects lock to be held */
spin_lock(&mm->page_table_lock);
return VM_FAULT_OOM;
}
copy_user_huge_page(new_page, old_page, address, vma,
pages_per_huge_page(h));
__SetPageUptodate(new_page);
/*
* Retake the page_table_lock to check for racing updates
* before the page tables are altered
*/
spin_lock(&mm->page_table_lock);
ptep = huge_pte_offset(mm, address & huge_page_mask(h));
if (likely(pte_same(huge_ptep_get(ptep), pte))) {
/* Break COW */
mmu_notifier_invalidate_range_start(mm,
address & huge_page_mask(h),
(address & huge_page_mask(h)) + huge_page_size(h));
huge_ptep_clear_flush(vma, address, ptep);
set_huge_pte_at(mm, address, ptep,
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page);
hugepage_add_new_anon_rmap(new_page, vma, address);
/* Make the old page be freed below */
new_page = old_page;
mmu_notifier_invalidate_range_end(mm,
address & huge_page_mask(h),
(address & huge_page_mask(h)) + huge_page_size(h));
}
page_cache_release(new_page);
page_cache_release(old_page);
return 0;
}
/* Return the pagecache page at a given address within a VMA */
static struct page *hugetlbfs_pagecache_page(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
{
struct address_space *mapping;
pgoff_t idx;
mapping = vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, vma, address);
return find_lock_page(mapping, idx);
}
/*
* Return whether there is a pagecache page to back given address within VMA.
* Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
*/
static bool hugetlbfs_pagecache_present(struct hstate *h,
struct vm_area_struct *vma, unsigned long address)
{
struct address_space *mapping;
pgoff_t idx;
struct page *page;
mapping = vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, vma, address);
page = find_get_page(mapping, idx);
if (page)
put_page(page);
return page != NULL;
}
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, unsigned int flags)
{
struct hstate *h = hstate_vma(vma);
int ret = VM_FAULT_SIGBUS;
int anon_rmap = 0;
pgoff_t idx;
unsigned long size;
struct page *page;
struct address_space *mapping;
pte_t new_pte;
/*
* Currently, we are forced to kill the process in the event the
* original mapper has unmapped pages from the child due to a failed
* COW. Warn that such a situation has occurred as it may not be obvious
*/
if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
printk(KERN_WARNING
"PID %d killed due to inadequate hugepage pool\n",
current->pid);
return ret;
}
mapping = vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, vma, address);
/*
* Use page lock to guard against racing truncation
* before we get page_table_lock.
*/
retry:
page = find_lock_page(mapping, idx);
if (!page) {
size = i_size_read(mapping->host) >> huge_page_shift(h);
if (idx >= size)
goto out;
page = alloc_huge_page(vma, address, 0);
if (IS_ERR(page)) {
ret = -PTR_ERR(page);
goto out;
}
clear_huge_page(page, address, pages_per_huge_page(h));
__SetPageUptodate(page);
if (vma->vm_flags & VM_MAYSHARE) {
int err;
struct inode *inode = mapping->host;
err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
if (err) {
put_page(page);
if (err == -EEXIST)
goto retry;
goto out;
}
spin_lock(&inode->i_lock);
inode->i_blocks += blocks_per_huge_page(h);
spin_unlock(&inode->i_lock);
} else {
lock_page(page);
if (unlikely(anon_vma_prepare(vma))) {
ret = VM_FAULT_OOM;
goto backout_unlocked;
}
anon_rmap = 1;
}
} else {
/*
* If memory error occurs between mmap() and fault, some process
* don't have hwpoisoned swap entry for errored virtual address.
* So we need to block hugepage fault by PG_hwpoison bit check.
*/
if (unlikely(PageHWPoison(page))) {
ret = VM_FAULT_HWPOISON |
VM_FAULT_SET_HINDEX(h - hstates);
goto backout_unlocked;
}
}
/*
* If we are going to COW a private mapping later, we examine the
* pending reservations for this page now. This will ensure that
* any allocations necessary to record that reservation occur outside
* the spinlock.
*/
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
if (vma_needs_reservation(h, vma, address) < 0) {
ret = VM_FAULT_OOM;
goto backout_unlocked;
}
spin_lock(&mm->page_table_lock);
size = i_size_read(mapping->host) >> huge_page_shift(h);
if (idx >= size)
goto backout;
ret = 0;
if (!huge_pte_none(huge_ptep_get(ptep)))
goto backout;
if (anon_rmap)
hugepage_add_new_anon_rmap(page, vma, address);
else
page_dup_rmap(page);
new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
&& (vma->vm_flags & VM_SHARED)));
set_huge_pte_at(mm, address, ptep, new_pte);
if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
/* Optimization, do the COW without a second fault */
ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
}
spin_unlock(&mm->page_table_lock);
unlock_page(page);
out:
return ret;
backout:
spin_unlock(&mm->page_table_lock);
backout_unlocked:
unlock_page(page);
put_page(page);
goto out;
}
int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
pte_t *ptep;
pte_t entry;
int ret;
struct page *page = NULL;
struct page *pagecache_page = NULL;
static DEFINE_MUTEX(hugetlb_instantiation_mutex);
struct hstate *h = hstate_vma(vma);
address &= huge_page_mask(h);
ptep = huge_pte_offset(mm, address);
if (ptep) {
entry = huge_ptep_get(ptep);
if (unlikely(is_hugetlb_entry_migration(entry))) {
migration_entry_wait(mm, (pmd_t *)ptep, address);
return 0;
} else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
return VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(h - hstates);
}
ptep = huge_pte_alloc(mm, address, huge_page_size(h));
if (!ptep)
return VM_FAULT_OOM;
/*
* Serialize hugepage allocation and instantiation, so that we don't
* get spurious allocation failures if two CPUs race to instantiate
* the same page in the page cache.
*/
mutex_lock(&hugetlb_instantiation_mutex);
entry = huge_ptep_get(ptep);
if (huge_pte_none(entry)) {
ret = hugetlb_no_page(mm, vma, address, ptep, flags);
goto out_mutex;
}
ret = 0;
/*
* If we are going to COW the mapping later, we examine the pending
* reservations for this page now. This will ensure that any
* allocations necessary to record that reservation occur outside the
* spinlock. For private mappings, we also lookup the pagecache
* page now as it is used to determine if a reservation has been
* consumed.
*/
if ((flags & FAULT_FLAG_WRITE) && !pte_write(entry)) {
if (vma_needs_reservation(h, vma, address) < 0) {
ret = VM_FAULT_OOM;
goto out_mutex;
}
if (!(vma->vm_flags & VM_MAYSHARE))
pagecache_page = hugetlbfs_pagecache_page(h,
vma, address);
}
/*
* hugetlb_cow() requires page locks of pte_page(entry) and
* pagecache_page, so here we need take the former one
* when page != pagecache_page or !pagecache_page.
* Note that locking order is always pagecache_page -> page,
* so no worry about deadlock.
*/
page = pte_page(entry);
get_page(page);
if (page != pagecache_page)
lock_page(page);
spin_lock(&mm->page_table_lock);
/* Check for a racing update before calling hugetlb_cow */
if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
goto out_page_table_lock;
if (flags & FAULT_FLAG_WRITE) {
if (!pte_write(entry)) {
ret = hugetlb_cow(mm, vma, address, ptep, entry,
pagecache_page);
goto out_page_table_lock;
}
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
if (huge_ptep_set_access_flags(vma, address, ptep, entry,
flags & FAULT_FLAG_WRITE))
update_mmu_cache(vma, address, ptep);
out_page_table_lock:
spin_unlock(&mm->page_table_lock);
if (pagecache_page) {
unlock_page(pagecache_page);
put_page(pagecache_page);
}
if (page != pagecache_page)
unlock_page(page);
put_page(page);
out_mutex:
mutex_unlock(&hugetlb_instantiation_mutex);
return ret;
}
/* Can be overriden by architectures */
__attribute__((weak)) struct page *
follow_huge_pud(struct mm_struct *mm, unsigned long address,
pud_t *pud, int write)
{
BUG();
return NULL;
}
int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, int *length, int i,
unsigned int flags)
{
unsigned long pfn_offset;
unsigned long vaddr = *position;
int remainder = *length;
struct hstate *h = hstate_vma(vma);
spin_lock(&mm->page_table_lock);
while (vaddr < vma->vm_end && remainder) {
pte_t *pte;
int absent;
struct page *page;
/*
* Some archs (sparc64, sh*) have multiple pte_ts to
* each hugepage. We have to make sure we get the
* first, for the page indexing below to work.
*/
pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
absent = !pte || huge_pte_none(huge_ptep_get(pte));
/*
* When coredumping, it suits get_dump_page if we just return
* an error where there's an empty slot with no huge pagecache
* to back it. This way, we avoid allocating a hugepage, and
* the sparse dumpfile avoids allocating disk blocks, but its
* huge holes still show up with zeroes where they need to be.
*/
if (absent && (flags & FOLL_DUMP) &&
!hugetlbfs_pagecache_present(h, vma, vaddr)) {
remainder = 0;
break;
}
if (absent ||
((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) {
int ret;
spin_unlock(&mm->page_table_lock);
ret = hugetlb_fault(mm, vma, vaddr,
(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
spin_lock(&mm->page_table_lock);
if (!(ret & VM_FAULT_ERROR))
continue;
remainder = 0;
break;
}
pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
page = pte_page(huge_ptep_get(pte));
same_page:
if (pages) {
pages[i] = mem_map_offset(page, pfn_offset);
get_page(pages[i]);
}
if (vmas)
vmas[i] = vma;
vaddr += PAGE_SIZE;
++pfn_offset;
--remainder;
++i;
if (vaddr < vma->vm_end && remainder &&
pfn_offset < pages_per_huge_page(h)) {
/*
* We use pfn_offset to avoid touching the pageframes
* of this compound page.
*/
goto same_page;
}
}
spin_unlock(&mm->page_table_lock);
*length = remainder;
*position = vaddr;
return i ? i : -EFAULT;
}
void hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long start = address;
pte_t *ptep;
pte_t pte;
struct hstate *h = hstate_vma(vma);
BUG_ON(address >= end);
flush_cache_range(vma, address, end);
mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
spin_lock(&mm->page_table_lock);
for (; address < end; address += huge_page_size(h)) {
ptep = huge_pte_offset(mm, address);
if (!ptep)
continue;
if (huge_pmd_unshare(mm, &address, ptep))
continue;
if (!huge_pte_none(huge_ptep_get(ptep))) {
pte = huge_ptep_get_and_clear(mm, address, ptep);
pte = pte_mkhuge(pte_modify(pte, newprot));
set_huge_pte_at(mm, address, ptep, pte);
}
}
spin_unlock(&mm->page_table_lock);
mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
flush_tlb_range(vma, start, end);
}
int hugetlb_reserve_pages(struct inode *inode,
long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags)
{
long ret, chg;
struct hstate *h = hstate_inode(inode);
struct hugepage_subpool *spool = subpool_inode(inode);
/*
* Only apply hugepage reservation if asked. At fault time, an
* attempt will be made for VM_NORESERVE to allocate a page
* without using reserves
*/
if (vm_flags & VM_NORESERVE)
return 0;
/*
* Shared mappings base their reservation on the number of pages that
* are already allocated on behalf of the file. Private mappings need
* to reserve the full area even if read-only as mprotect() may be
* called to make the mapping read-write. Assume !vma is a shm mapping
*/
if (!vma || vma->vm_flags & VM_MAYSHARE)
chg = region_chg(&inode->i_mapping->private_list, from, to);
else {
struct resv_map *resv_map = resv_map_alloc();
if (!resv_map)
return -ENOMEM;
chg = to - from;
set_vma_resv_map(vma, resv_map);
set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
}
if (chg < 0)
return chg;
/* There must be enough pages in the subpool for the mapping */
if (hugepage_subpool_get_pages(spool, chg))
return -ENOSPC;
/*
* Check enough hugepages are available for the reservation.
* Hand the pages back to the subpool if there are not
*/
ret = hugetlb_acct_memory(h, chg);
if (ret < 0) {
hugepage_subpool_put_pages(spool, chg);
return ret;
}
/*
* Account for the reservations made. Shared mappings record regions
* that have reservations as they are shared by multiple VMAs.
* When the last VMA disappears, the region map says how much
* the reservation was and the page cache tells how much of
* the reservation was consumed. Private mappings are per-VMA and
* only the consumed reservations are tracked. When the VMA
* disappears, the original reservation is the VMA size and the
* consumed reservations are stored in the map. Hence, nothing
* else has to be done for private mappings here
*/
if (!vma || vma->vm_flags & VM_MAYSHARE)
region_add(&inode->i_mapping->private_list, from, to);
return 0;
}
void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
{
struct hstate *h = hstate_inode(inode);
long chg = region_truncate(&inode->i_mapping->private_list, offset);
struct hugepage_subpool *spool = subpool_inode(inode);
spin_lock(&inode->i_lock);
inode->i_blocks -= (blocks_per_huge_page(h) * freed);
spin_unlock(&inode->i_lock);
hugepage_subpool_put_pages(spool, (chg - freed));
hugetlb_acct_memory(h, -(chg - freed));
}
#ifdef CONFIG_MEMORY_FAILURE
/* Should be called in hugetlb_lock */
static int is_hugepage_on_freelist(struct page *hpage)
{
struct page *page;
struct page *tmp;
struct hstate *h = page_hstate(hpage);
int nid = page_to_nid(hpage);
list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
if (page == hpage)
return 1;
return 0;
}
/*
* This function is called from memory failure code.
* Assume the caller holds page lock of the head page.
*/
int dequeue_hwpoisoned_huge_page(struct page *hpage)
{
struct hstate *h = page_hstate(hpage);
int nid = page_to_nid(hpage);
int ret = -EBUSY;
spin_lock(&hugetlb_lock);
if (is_hugepage_on_freelist(hpage)) {
list_del(&hpage->lru);
set_page_refcounted(hpage);
h->free_huge_pages--;
h->free_huge_pages_node[nid]--;
ret = 0;
}
spin_unlock(&hugetlb_lock);
return ret;
}
#endif
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_3656_0 |
crossvul-cpp_data_good_3449_1 | /*
* fs/inotify_user.c - inotify support for userspace
*
* Authors:
* John McCutchan <ttb@tentacle.dhs.org>
* Robert Love <rml@novell.com>
*
* Copyright (C) 2005 John McCutchan
* Copyright 2006 Hewlett-Packard Development Company, L.P.
*
* Copyright (C) 2009 Eric Paris <Red Hat Inc>
* inotify was largely rewriten to make use of the fsnotify infrastructure
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/file.h>
#include <linux/fs.h> /* struct inode */
#include <linux/fsnotify_backend.h>
#include <linux/idr.h>
#include <linux/init.h> /* module_init */
#include <linux/inotify.h>
#include <linux/kernel.h> /* roundup() */
#include <linux/namei.h> /* LOOKUP_FOLLOW */
#include <linux/sched.h> /* struct user */
#include <linux/slab.h> /* struct kmem_cache */
#include <linux/syscalls.h>
#include <linux/types.h>
#include <linux/anon_inodes.h>
#include <linux/uaccess.h>
#include <linux/poll.h>
#include <linux/wait.h>
#include "inotify.h"
#include <asm/ioctls.h>
/* these are configurable via /proc/sys/fs/inotify/ */
static int inotify_max_user_instances __read_mostly;
static int inotify_max_queued_events __read_mostly;
static int inotify_max_user_watches __read_mostly;
static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
struct kmem_cache *event_priv_cachep __read_mostly;
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
static int zero;
ctl_table inotify_table[] = {
{
.procname = "max_user_instances",
.data = &inotify_max_user_instances,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
{
.procname = "max_user_watches",
.data = &inotify_max_user_watches,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
{
.procname = "max_queued_events",
.data = &inotify_max_queued_events,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero
},
{ }
};
#endif /* CONFIG_SYSCTL */
static inline __u32 inotify_arg_to_mask(u32 arg)
{
__u32 mask;
/*
* everything should accept their own ignored, cares about children,
* and should receive events when the inode is unmounted
*/
mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
/* mask off the flags used to open the fd */
mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
return mask;
}
static inline u32 inotify_mask_to_arg(__u32 mask)
{
return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
IN_Q_OVERFLOW);
}
/* intofiy userspace file descriptor functions */
static unsigned int inotify_poll(struct file *file, poll_table *wait)
{
struct fsnotify_group *group = file->private_data;
int ret = 0;
poll_wait(file, &group->notification_waitq, wait);
mutex_lock(&group->notification_mutex);
if (!fsnotify_notify_queue_is_empty(group))
ret = POLLIN | POLLRDNORM;
mutex_unlock(&group->notification_mutex);
return ret;
}
/*
* Get an inotify_kernel_event if one exists and is small
* enough to fit in "count". Return an error pointer if
* not large enough.
*
* Called with the group->notification_mutex held.
*/
static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
size_t count)
{
size_t event_size = sizeof(struct inotify_event);
struct fsnotify_event *event;
if (fsnotify_notify_queue_is_empty(group))
return NULL;
event = fsnotify_peek_notify_event(group);
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
if (event->name_len)
event_size += roundup(event->name_len + 1, event_size);
if (event_size > count)
return ERR_PTR(-EINVAL);
/* held the notification_mutex the whole time, so this is the
* same event we peeked above */
fsnotify_remove_notify_event(group);
return event;
}
/*
* Copy an event to user space, returning how much we copied.
*
* We already checked that the event size is smaller than the
* buffer we had in "get_one_event()" above.
*/
static ssize_t copy_event_to_user(struct fsnotify_group *group,
struct fsnotify_event *event,
char __user *buf)
{
struct inotify_event inotify_event;
struct fsnotify_event_private_data *fsn_priv;
struct inotify_event_private_data *priv;
size_t event_size = sizeof(struct inotify_event);
size_t name_len = 0;
pr_debug("%s: group=%p event=%p\n", __func__, group, event);
/* we get the inotify watch descriptor from the event private data */
spin_lock(&event->lock);
fsn_priv = fsnotify_remove_priv_from_event(group, event);
spin_unlock(&event->lock);
if (!fsn_priv)
inotify_event.wd = -1;
else {
priv = container_of(fsn_priv, struct inotify_event_private_data,
fsnotify_event_priv_data);
inotify_event.wd = priv->wd;
inotify_free_event_priv(fsn_priv);
}
/*
* round up event->name_len so it is a multiple of event_size
* plus an extra byte for the terminating '\0'.
*/
if (event->name_len)
name_len = roundup(event->name_len + 1, event_size);
inotify_event.len = name_len;
inotify_event.mask = inotify_mask_to_arg(event->mask);
inotify_event.cookie = event->sync_cookie;
/* send the main event */
if (copy_to_user(buf, &inotify_event, event_size))
return -EFAULT;
buf += event_size;
/*
* fsnotify only stores the pathname, so here we have to send the pathname
* and then pad that pathname out to a multiple of sizeof(inotify_event)
* with zeros. I get my zeros from the nul_inotify_event.
*/
if (name_len) {
unsigned int len_to_zero = name_len - event->name_len;
/* copy the path name */
if (copy_to_user(buf, event->file_name, event->name_len))
return -EFAULT;
buf += event->name_len;
/* fill userspace with 0's */
if (clear_user(buf, len_to_zero))
return -EFAULT;
buf += len_to_zero;
event_size += name_len;
}
return event_size;
}
static ssize_t inotify_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
struct fsnotify_group *group;
struct fsnotify_event *kevent;
char __user *start;
int ret;
DEFINE_WAIT(wait);
start = buf;
group = file->private_data;
while (1) {
prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&group->notification_mutex);
kevent = get_one_event(group, count);
mutex_unlock(&group->notification_mutex);
pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
if (kevent) {
ret = PTR_ERR(kevent);
if (IS_ERR(kevent))
break;
ret = copy_event_to_user(group, kevent, buf);
fsnotify_put_event(kevent);
if (ret < 0)
break;
buf += ret;
count -= ret;
continue;
}
ret = -EAGAIN;
if (file->f_flags & O_NONBLOCK)
break;
ret = -EINTR;
if (signal_pending(current))
break;
if (start != buf)
break;
schedule();
}
finish_wait(&group->notification_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
return ret;
}
static int inotify_fasync(int fd, struct file *file, int on)
{
struct fsnotify_group *group = file->private_data;
return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO;
}
static int inotify_release(struct inode *ignored, struct file *file)
{
struct fsnotify_group *group = file->private_data;
pr_debug("%s: group=%p\n", __func__, group);
fsnotify_clear_marks_by_group(group);
/* free this group, matching get was inotify_init->fsnotify_obtain_group */
fsnotify_put_group(group);
return 0;
}
static long inotify_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct fsnotify_group *group;
struct fsnotify_event_holder *holder;
struct fsnotify_event *event;
void __user *p;
int ret = -ENOTTY;
size_t send_len = 0;
group = file->private_data;
p = (void __user *) arg;
pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
switch (cmd) {
case FIONREAD:
mutex_lock(&group->notification_mutex);
list_for_each_entry(holder, &group->notification_list, event_list) {
event = holder->event;
send_len += sizeof(struct inotify_event);
if (event->name_len)
send_len += roundup(event->name_len + 1,
sizeof(struct inotify_event));
}
mutex_unlock(&group->notification_mutex);
ret = put_user(send_len, (int __user *) p);
break;
}
return ret;
}
static const struct file_operations inotify_fops = {
.poll = inotify_poll,
.read = inotify_read,
.fasync = inotify_fasync,
.release = inotify_release,
.unlocked_ioctl = inotify_ioctl,
.compat_ioctl = inotify_ioctl,
.llseek = noop_llseek,
};
/*
* find_inode - resolve a user-given path to a specific inode
*/
static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
{
int error;
error = user_path_at(AT_FDCWD, dirname, flags, path);
if (error)
return error;
/* you can only watch an inode if you have read permissions on it */
error = inode_permission(path->dentry->d_inode, MAY_READ);
if (error)
path_put(path);
return error;
}
static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
int *last_wd,
struct inotify_inode_mark *i_mark)
{
int ret;
do {
if (unlikely(!idr_pre_get(idr, GFP_KERNEL)))
return -ENOMEM;
spin_lock(idr_lock);
ret = idr_get_new_above(idr, i_mark, *last_wd + 1,
&i_mark->wd);
/* we added the mark to the idr, take a reference */
if (!ret) {
*last_wd = i_mark->wd;
fsnotify_get_mark(&i_mark->fsn_mark);
}
spin_unlock(idr_lock);
} while (ret == -EAGAIN);
return ret;
}
static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
int wd)
{
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
struct inotify_inode_mark *i_mark;
assert_spin_locked(idr_lock);
i_mark = idr_find(idr, wd);
if (i_mark) {
struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
fsnotify_get_mark(fsn_mark);
/* One ref for being in the idr, one ref we just took */
BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
}
return i_mark;
}
static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
int wd)
{
struct inotify_inode_mark *i_mark;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
spin_lock(idr_lock);
i_mark = inotify_idr_find_locked(group, wd);
spin_unlock(idr_lock);
return i_mark;
}
static void do_inotify_remove_from_idr(struct fsnotify_group *group,
struct inotify_inode_mark *i_mark)
{
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
int wd = i_mark->wd;
assert_spin_locked(idr_lock);
idr_remove(idr, wd);
/* removed from the idr, drop that ref */
fsnotify_put_mark(&i_mark->fsn_mark);
}
/*
* Remove the mark from the idr (if present) and drop the reference
* on the mark because it was in the idr.
*/
static void inotify_remove_from_idr(struct fsnotify_group *group,
struct inotify_inode_mark *i_mark)
{
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
struct inotify_inode_mark *found_i_mark = NULL;
int wd;
spin_lock(idr_lock);
wd = i_mark->wd;
/*
* does this i_mark think it is in the idr? we shouldn't get called
* if it wasn't....
*/
if (wd == -1) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out;
}
/* Lets look in the idr to see if we find it */
found_i_mark = inotify_idr_find_locked(group, wd);
if (unlikely(!found_i_mark)) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
goto out;
}
/*
* We found an mark in the idr at the right wd, but it's
* not the mark we were told to remove. eparis seriously
* fucked up somewhere.
*/
if (unlikely(found_i_mark != i_mark)) {
WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
"mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
"found_i_mark->group=%p found_i_mark->inode=%p\n",
__func__, i_mark, i_mark->wd, i_mark->fsn_mark.group,
i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd,
found_i_mark->fsn_mark.group,
found_i_mark->fsn_mark.i.inode);
goto out;
}
/*
* One ref for being in the idr
* one ref held by the caller trying to kill us
* one ref grabbed by inotify_idr_find
*/
if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
" i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
/* we can't really recover with bad ref cnting.. */
BUG();
}
do_inotify_remove_from_idr(group, i_mark);
out:
/* match the ref taken by inotify_idr_find_locked() */
if (found_i_mark)
fsnotify_put_mark(&found_i_mark->fsn_mark);
i_mark->wd = -1;
spin_unlock(idr_lock);
}
/*
* Send IN_IGNORED for this wd, remove this wd from the idr.
*/
void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
struct fsnotify_group *group)
{
struct inotify_inode_mark *i_mark;
struct fsnotify_event *ignored_event, *notify_event;
struct inotify_event_private_data *event_priv;
struct fsnotify_event_private_data *fsn_event_priv;
int ret;
ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
FSNOTIFY_EVENT_NONE, NULL, 0,
GFP_NOFS);
if (!ignored_event)
return;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
if (unlikely(!event_priv))
goto skip_send_ignore;
fsn_event_priv = &event_priv->fsnotify_event_priv_data;
fsn_event_priv->group = group;
event_priv->wd = i_mark->wd;
notify_event = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
if (notify_event) {
if (IS_ERR(notify_event))
ret = PTR_ERR(notify_event);
else
fsnotify_put_event(notify_event);
inotify_free_event_priv(fsn_event_priv);
}
skip_send_ignore:
/* matches the reference taken when the event was created */
fsnotify_put_event(ignored_event);
/* remove this mark from the idr */
inotify_remove_from_idr(group, i_mark);
atomic_dec(&group->inotify_data.user->inotify_watches);
}
/* ding dong the mark is dead */
static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
{
struct inotify_inode_mark *i_mark;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
kmem_cache_free(inotify_inode_mark_cachep, i_mark);
}
static int inotify_update_existing_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
struct fsnotify_mark *fsn_mark;
struct inotify_inode_mark *i_mark;
__u32 old_mask, new_mask;
__u32 mask;
int add = (arg & IN_MASK_ADD);
int ret;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
if (unlikely(!(mask & IN_ALL_EVENTS)))
return -EINVAL;
fsn_mark = fsnotify_find_inode_mark(group, inode);
if (!fsn_mark)
return -ENOENT;
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
spin_lock(&fsn_mark->lock);
old_mask = fsn_mark->mask;
if (add)
fsnotify_set_mark_mask_locked(fsn_mark, (fsn_mark->mask | mask));
else
fsnotify_set_mark_mask_locked(fsn_mark, mask);
new_mask = fsn_mark->mask;
spin_unlock(&fsn_mark->lock);
if (old_mask != new_mask) {
/* more bits in old than in new? */
int dropped = (old_mask & ~new_mask);
/* more bits in this fsn_mark than the inode's mask? */
int do_inode = (new_mask & ~inode->i_fsnotify_mask);
/* update the inode with this new fsn_mark */
if (dropped || do_inode)
fsnotify_recalc_inode_mask(inode);
}
/* return the wd */
ret = i_mark->wd;
/* match the get from fsnotify_find_mark() */
fsnotify_put_mark(fsn_mark);
return ret;
}
static int inotify_new_watch(struct fsnotify_group *group,
struct inode *inode,
u32 arg)
{
struct inotify_inode_mark *tmp_i_mark;
__u32 mask;
int ret;
struct idr *idr = &group->inotify_data.idr;
spinlock_t *idr_lock = &group->inotify_data.idr_lock;
/* don't allow invalid bits: we don't want flags set */
mask = inotify_arg_to_mask(arg);
if (unlikely(!(mask & IN_ALL_EVENTS)))
return -EINVAL;
tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
if (unlikely(!tmp_i_mark))
return -ENOMEM;
fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark);
tmp_i_mark->fsn_mark.mask = mask;
tmp_i_mark->wd = -1;
ret = -ENOSPC;
if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
goto out_err;
ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd,
tmp_i_mark);
if (ret)
goto out_err;
/* we are on the idr, now get on the inode */
ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, NULL, 0);
if (ret) {
/* we failed to get on the inode, get off the idr */
inotify_remove_from_idr(group, tmp_i_mark);
goto out_err;
}
/* increment the number of watches the user has */
atomic_inc(&group->inotify_data.user->inotify_watches);
/* return the watch descriptor for this new mark */
ret = tmp_i_mark->wd;
out_err:
/* match the ref from fsnotify_init_mark() */
fsnotify_put_mark(&tmp_i_mark->fsn_mark);
return ret;
}
static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
{
int ret = 0;
retry:
/* try to update and existing watch with the new arg */
ret = inotify_update_existing_watch(group, inode, arg);
/* no mark present, try to add a new one */
if (ret == -ENOENT)
ret = inotify_new_watch(group, inode, arg);
/*
* inotify_new_watch could race with another thread which did an
* inotify_new_watch between the update_existing and the add watch
* here, go back and try to update an existing mark again.
*/
if (ret == -EEXIST)
goto retry;
return ret;
}
static struct fsnotify_group *inotify_new_group(unsigned int max_events)
{
struct fsnotify_group *group;
group = fsnotify_alloc_group(&inotify_fsnotify_ops);
if (IS_ERR(group))
return group;
group->max_events = max_events;
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
group->inotify_data.last_wd = 0;
group->inotify_data.fa = NULL;
group->inotify_data.user = get_current_user();
if (atomic_inc_return(&group->inotify_data.user->inotify_devs) >
inotify_max_user_instances) {
fsnotify_put_group(group);
return ERR_PTR(-EMFILE);
}
return group;
}
/* inotify syscalls */
SYSCALL_DEFINE1(inotify_init1, int, flags)
{
struct fsnotify_group *group;
int ret;
/* Check the IN_* constants for consistency. */
BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
return -EINVAL;
/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
group = inotify_new_group(inotify_max_queued_events);
if (IS_ERR(group))
return PTR_ERR(group);
ret = anon_inode_getfd("inotify", &inotify_fops, group,
O_RDONLY | flags);
if (ret < 0)
fsnotify_put_group(group);
return ret;
}
SYSCALL_DEFINE0(inotify_init)
{
return sys_inotify_init1(0);
}
SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
u32, mask)
{
struct fsnotify_group *group;
struct inode *inode;
struct path path;
struct file *filp;
int ret, fput_needed;
unsigned flags = 0;
filp = fget_light(fd, &fput_needed);
if (unlikely(!filp))
return -EBADF;
/* verify that this is indeed an inotify instance */
if (unlikely(filp->f_op != &inotify_fops)) {
ret = -EINVAL;
goto fput_and_out;
}
if (!(mask & IN_DONT_FOLLOW))
flags |= LOOKUP_FOLLOW;
if (mask & IN_ONLYDIR)
flags |= LOOKUP_DIRECTORY;
ret = inotify_find_inode(pathname, &path, flags);
if (ret)
goto fput_and_out;
/* inode held in place by reference to path; group by fget on fd */
inode = path.dentry->d_inode;
group = filp->private_data;
/* create/update an inode mark */
ret = inotify_update_watch(group, inode, mask);
path_put(&path);
fput_and_out:
fput_light(filp, fput_needed);
return ret;
}
SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
{
struct fsnotify_group *group;
struct inotify_inode_mark *i_mark;
struct file *filp;
int ret = 0, fput_needed;
filp = fget_light(fd, &fput_needed);
if (unlikely(!filp))
return -EBADF;
/* verify that this is indeed an inotify instance */
ret = -EINVAL;
if (unlikely(filp->f_op != &inotify_fops))
goto out;
group = filp->private_data;
ret = -EINVAL;
i_mark = inotify_idr_find(group, wd);
if (unlikely(!i_mark))
goto out;
ret = 0;
fsnotify_destroy_mark(&i_mark->fsn_mark);
/* match ref taken by inotify_idr_find */
fsnotify_put_mark(&i_mark->fsn_mark);
out:
fput_light(filp, fput_needed);
return ret;
}
/*
* inotify_user_setup - Our initialization function. Note that we cannot return
* error because we have compiled-in VFS hooks. So an (unlikely) failure here
* must result in panic().
*/
static int __init inotify_user_setup(void)
{
BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
BUILD_BUG_ON(IN_OPEN != FS_OPEN);
BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
BUILD_BUG_ON(IN_CREATE != FS_CREATE);
BUILD_BUG_ON(IN_DELETE != FS_DELETE);
BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);
inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
inotify_max_queued_events = 16384;
inotify_max_user_instances = 128;
inotify_max_user_watches = 8192;
return 0;
}
module_init(inotify_user_setup);
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3449_1 |
crossvul-cpp_data_bad_5366_1 | /* Copyright 2015 greenbytes GmbH (https://www.greenbytes.de)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <assert.h>
#include <stddef.h>
#include <apr_thread_cond.h>
#include <apr_base64.h>
#include <apr_strings.h>
#include <ap_mpm.h>
#include <httpd.h>
#include <http_core.h>
#include <http_config.h>
#include <http_log.h>
#include <scoreboard.h>
#include "h2_private.h"
#include "h2.h"
#include "h2_bucket_eoc.h"
#include "h2_bucket_eos.h"
#include "h2_config.h"
#include "h2_ctx.h"
#include "h2_filter.h"
#include "h2_h2.h"
#include "h2_mplx.h"
#include "h2_push.h"
#include "h2_request.h"
#include "h2_headers.h"
#include "h2_stream.h"
#include "h2_task.h"
#include "h2_session.h"
#include "h2_util.h"
#include "h2_version.h"
#include "h2_workers.h"
static int h2_session_status_from_apr_status(apr_status_t rv)
{
if (rv == APR_SUCCESS) {
return NGHTTP2_NO_ERROR;
}
else if (APR_STATUS_IS_EAGAIN(rv)) {
return NGHTTP2_ERR_WOULDBLOCK;
}
else if (APR_STATUS_IS_EOF(rv)) {
return NGHTTP2_ERR_EOF;
}
return NGHTTP2_ERR_PROTO;
}
static void update_window(void *ctx, int stream_id, apr_off_t bytes_read)
{
h2_session *session = (h2_session*)ctx;
nghttp2_session_consume(session->ngh2, stream_id, bytes_read);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
"h2_session(%ld-%d): consumed %ld bytes",
session->id, stream_id, (long)bytes_read);
}
static apr_status_t h2_session_receive(void *ctx,
const char *data, apr_size_t len,
apr_size_t *readlen);
static void dispatch_event(h2_session *session, h2_session_event_t ev,
int err, const char *msg);
apr_status_t h2_session_stream_done(h2_session *session, h2_stream *stream)
{
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
"h2_stream(%ld-%d): EOS bucket cleanup -> done",
session->id, stream->id);
h2_mplx_stream_done(session->mplx, stream);
dispatch_event(session, H2_SESSION_EV_STREAM_DONE, 0, NULL);
return APR_SUCCESS;
}
typedef struct stream_sel_ctx {
h2_session *session;
h2_stream *candidate;
} stream_sel_ctx;
static int find_cleanup_stream(h2_stream *stream, void *ictx)
{
stream_sel_ctx *ctx = ictx;
if (H2_STREAM_CLIENT_INITIATED(stream->id)) {
if (!ctx->session->local.accepting
&& stream->id > ctx->session->local.accepted_max) {
ctx->candidate = stream;
return 0;
}
}
else {
if (!ctx->session->remote.accepting
&& stream->id > ctx->session->remote.accepted_max) {
ctx->candidate = stream;
return 0;
}
}
return 1;
}
static void cleanup_streams(h2_session *session)
{
stream_sel_ctx ctx;
ctx.session = session;
ctx.candidate = NULL;
while (1) {
h2_mplx_stream_do(session->mplx, find_cleanup_stream, &ctx);
if (ctx.candidate) {
h2_session_stream_done(session, ctx.candidate);
ctx.candidate = NULL;
}
else {
break;
}
}
}
h2_stream *h2_session_open_stream(h2_session *session, int stream_id,
int initiated_on, const h2_request *req)
{
h2_stream * stream;
apr_pool_t *stream_pool;
apr_pool_create(&stream_pool, session->pool);
apr_pool_tag(stream_pool, "h2_stream");
stream = h2_stream_open(stream_id, stream_pool, session,
initiated_on);
nghttp2_session_set_stream_user_data(session->ngh2, stream_id, stream);
if (req) {
h2_stream_set_request(stream, req);
}
if (H2_STREAM_CLIENT_INITIATED(stream_id)) {
if (stream_id > session->remote.emitted_max) {
++session->remote.emitted_count;
session->remote.emitted_max = stream->id;
session->local.accepted_max = stream->id;
}
}
else {
if (stream_id > session->local.emitted_max) {
++session->local.emitted_count;
session->remote.emitted_max = stream->id;
}
}
dispatch_event(session, H2_SESSION_EV_STREAM_OPEN, 0, NULL);
return stream;
}
/**
* Determine the importance of streams when scheduling tasks.
* - if both stream depend on the same one, compare weights
* - if one stream is closer to the root, prioritize that one
* - if both are on the same level, use the weight of their root
* level ancestors
*/
static int spri_cmp(int sid1, nghttp2_stream *s1,
int sid2, nghttp2_stream *s2, h2_session *session)
{
nghttp2_stream *p1, *p2;
p1 = nghttp2_stream_get_parent(s1);
p2 = nghttp2_stream_get_parent(s2);
if (p1 == p2) {
int32_t w1, w2;
w1 = nghttp2_stream_get_weight(s1);
w2 = nghttp2_stream_get_weight(s2);
return w2 - w1;
}
else if (!p1) {
/* stream 1 closer to root */
return -1;
}
else if (!p2) {
/* stream 2 closer to root */
return 1;
}
return spri_cmp(sid1, p1, sid2, p2, session);
}
static int stream_pri_cmp(int sid1, int sid2, void *ctx)
{
h2_session *session = ctx;
nghttp2_stream *s1, *s2;
s1 = nghttp2_session_find_stream(session->ngh2, sid1);
s2 = nghttp2_session_find_stream(session->ngh2, sid2);
if (s1 == s2) {
return 0;
}
else if (!s1) {
return 1;
}
else if (!s2) {
return -1;
}
return spri_cmp(sid1, s1, sid2, s2, session);
}
static apr_status_t stream_schedule(h2_session *session,
h2_stream *stream, int eos)
{
(void)session;
return h2_stream_schedule(stream, eos, h2_session_push_enabled(session),
stream_pri_cmp, session);
}
/*
* Callback when nghttp2 wants to send bytes back to the client.
*/
static ssize_t send_cb(nghttp2_session *ngh2,
const uint8_t *data, size_t length,
int flags, void *userp)
{
h2_session *session = (h2_session *)userp;
apr_status_t status;
(void)ngh2;
(void)flags;
status = h2_conn_io_write(&session->io, (const char *)data, length);
if (status == APR_SUCCESS) {
return length;
}
if (APR_STATUS_IS_EAGAIN(status)) {
return NGHTTP2_ERR_WOULDBLOCK;
}
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03062)
"h2_session: send error");
return h2_session_status_from_apr_status(status);
}
static int on_invalid_frame_recv_cb(nghttp2_session *ngh2,
const nghttp2_frame *frame,
int error, void *userp)
{
h2_session *session = (h2_session *)userp;
(void)ngh2;
if (APLOGcdebug(session->c)) {
char buffer[256];
h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03063)
"h2_session(%ld): recv invalid FRAME[%s], frames=%ld/%ld (r/s)",
session->id, buffer, (long)session->frames_received,
(long)session->frames_sent);
}
return 0;
}
static h2_stream *get_stream(h2_session *session, int stream_id)
{
return nghttp2_session_get_stream_user_data(session->ngh2, stream_id);
}
static int on_data_chunk_recv_cb(nghttp2_session *ngh2, uint8_t flags,
int32_t stream_id,
const uint8_t *data, size_t len, void *userp)
{
h2_session *session = (h2_session *)userp;
apr_status_t status = APR_SUCCESS;
h2_stream * stream;
int rv;
(void)flags;
stream = get_stream(session, stream_id);
if (!stream) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03064)
"h2_stream(%ld-%d): on_data_chunk for unknown stream",
session->id, (int)stream_id);
rv = nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE, stream_id,
NGHTTP2_INTERNAL_ERROR);
if (nghttp2_is_fatal(rv)) {
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
return 0;
}
/* FIXME: enabling setting EOS this way seems to break input handling
* in mod_proxy_http2. why? */
status = h2_stream_write_data(stream, (const char *)data, len,
0 /*flags & NGHTTP2_FLAG_END_STREAM*/);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
"h2_stream(%ld-%d): data_chunk_recv, written %ld bytes",
session->id, stream_id, (long)len);
if (status != APR_SUCCESS) {
update_window(session, stream_id, len);
rv = nghttp2_submit_rst_stream(ngh2, NGHTTP2_FLAG_NONE, stream_id,
H2_STREAM_RST(stream, H2_ERR_INTERNAL_ERROR));
if (nghttp2_is_fatal(rv)) {
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
}
return 0;
}
static apr_status_t stream_release(h2_session *session,
h2_stream *stream,
uint32_t error_code)
{
conn_rec *c = session->c;
apr_bucket *b;
apr_status_t status;
if (!error_code) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, c,
"h2_stream(%ld-%d): handled, closing",
session->id, (int)stream->id);
if (H2_STREAM_CLIENT_INITIATED(stream->id)) {
if (stream->id > session->local.completed_max) {
session->local.completed_max = stream->id;
}
}
}
else {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03065)
"h2_stream(%ld-%d): closing with err=%d %s",
session->id, (int)stream->id, (int)error_code,
h2_h2_err_description(error_code));
h2_stream_rst(stream, error_code);
}
b = h2_bucket_eos_create(c->bucket_alloc, stream);
APR_BRIGADE_INSERT_TAIL(session->bbtmp, b);
status = h2_conn_io_pass(&session->io, session->bbtmp);
apr_brigade_cleanup(session->bbtmp);
return status;
}
static int on_stream_close_cb(nghttp2_session *ngh2, int32_t stream_id,
uint32_t error_code, void *userp)
{
h2_session *session = (h2_session *)userp;
h2_stream *stream;
(void)ngh2;
stream = get_stream(session, stream_id);
if (stream) {
stream_release(session, stream, error_code);
}
return 0;
}
static int on_begin_headers_cb(nghttp2_session *ngh2,
const nghttp2_frame *frame, void *userp)
{
h2_session *session = (h2_session *)userp;
h2_stream *s;
/* We may see HEADERs at the start of a stream or after all DATA
* streams to carry trailers. */
(void)ngh2;
s = get_stream(session, frame->hd.stream_id);
if (s) {
/* nop */
}
else {
s = h2_session_open_stream(userp, frame->hd.stream_id, 0, NULL);
}
return s? 0 : NGHTTP2_ERR_START_STREAM_NOT_ALLOWED;
}
static int on_header_cb(nghttp2_session *ngh2, const nghttp2_frame *frame,
const uint8_t *name, size_t namelen,
const uint8_t *value, size_t valuelen,
uint8_t flags,
void *userp)
{
h2_session *session = (h2_session *)userp;
h2_stream * stream;
apr_status_t status;
(void)flags;
stream = get_stream(session, frame->hd.stream_id);
if (!stream) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, session->c,
APLOGNO(02920)
"h2_session: stream(%ld-%d): on_header unknown stream",
session->id, (int)frame->hd.stream_id);
return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
}
status = h2_stream_add_header(stream, (const char *)name, namelen,
(const char *)value, valuelen);
if (status != APR_SUCCESS && !h2_stream_is_ready(stream)) {
return NGHTTP2_ERR_TEMPORAL_CALLBACK_FAILURE;
}
return 0;
}
/**
* nghttp2 session has received a complete frame. Most, it uses
* for processing of internal state. HEADER and DATA frames however
* we need to handle ourself.
*/
static int on_frame_recv_cb(nghttp2_session *ng2s,
const nghttp2_frame *frame,
void *userp)
{
h2_session *session = (h2_session *)userp;
apr_status_t status = APR_SUCCESS;
h2_stream *stream;
if (APLOGcdebug(session->c)) {
char buffer[256];
h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03066)
"h2_session(%ld): recv FRAME[%s], frames=%ld/%ld (r/s)",
session->id, buffer, (long)session->frames_received,
(long)session->frames_sent);
}
++session->frames_received;
switch (frame->hd.type) {
case NGHTTP2_HEADERS:
/* This can be HEADERS for a new stream, defining the request,
* or HEADER may come after DATA at the end of a stream as in
* trailers */
stream = get_stream(session, frame->hd.stream_id);
if (stream) {
int eos = (frame->hd.flags & NGHTTP2_FLAG_END_STREAM);
if (h2_stream_is_scheduled(stream)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
"h2_stream(%ld-%d): TRAILER, eos=%d",
session->id, frame->hd.stream_id, eos);
if (eos) {
status = h2_stream_close_input(stream);
}
}
else {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
"h2_stream(%ld-%d): HEADER, eos=%d",
session->id, frame->hd.stream_id, eos);
status = stream_schedule(session, stream, eos);
}
}
else {
status = APR_EINVAL;
}
break;
case NGHTTP2_DATA:
stream = get_stream(session, frame->hd.stream_id);
if (stream) {
int eos = (frame->hd.flags & NGHTTP2_FLAG_END_STREAM);
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
"h2_stream(%ld-%d): DATA, len=%ld, eos=%d",
session->id, frame->hd.stream_id,
(long)frame->hd.length, eos);
if (eos) {
status = h2_stream_close_input(stream);
}
}
else {
status = APR_EINVAL;
}
break;
case NGHTTP2_PRIORITY:
session->reprioritize = 1;
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
"h2_session: stream(%ld-%d): PRIORITY frame "
" weight=%d, dependsOn=%d, exclusive=%d",
session->id, (int)frame->hd.stream_id,
frame->priority.pri_spec.weight,
frame->priority.pri_spec.stream_id,
frame->priority.pri_spec.exclusive);
break;
case NGHTTP2_WINDOW_UPDATE:
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
"h2_session: stream(%ld-%d): WINDOW_UPDATE "
"incr=%d",
session->id, (int)frame->hd.stream_id,
frame->window_update.window_size_increment);
break;
case NGHTTP2_RST_STREAM:
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03067)
"h2_session(%ld-%d): RST_STREAM by client, errror=%d",
session->id, (int)frame->hd.stream_id,
(int)frame->rst_stream.error_code);
stream = get_stream(session, frame->hd.stream_id);
if (stream && stream->initiated_on) {
++session->pushes_reset;
}
else {
++session->streams_reset;
}
break;
case NGHTTP2_GOAWAY:
if (frame->goaway.error_code == 0
&& frame->goaway.last_stream_id == ((1u << 31) - 1)) {
/* shutdown notice. Should not come from a client... */
session->remote.accepting = 0;
}
else {
session->remote.accepted_max = frame->goaway.last_stream_id;
dispatch_event(session, H2_SESSION_EV_REMOTE_GOAWAY,
frame->goaway.error_code, NULL);
}
break;
default:
if (APLOGctrace2(session->c)) {
char buffer[256];
h2_util_frame_print(frame, buffer,
sizeof(buffer)/sizeof(buffer[0]));
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
"h2_session: on_frame_rcv %s", buffer);
}
break;
}
if (status != APR_SUCCESS) {
int rv;
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
APLOGNO(02923)
"h2_session: stream(%ld-%d): error handling frame",
session->id, (int)frame->hd.stream_id);
rv = nghttp2_submit_rst_stream(ng2s, NGHTTP2_FLAG_NONE,
frame->hd.stream_id,
NGHTTP2_INTERNAL_ERROR);
if (nghttp2_is_fatal(rv)) {
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
}
return 0;
}
static char immortal_zeros[H2_MAX_PADLEN];
static int on_send_data_cb(nghttp2_session *ngh2,
nghttp2_frame *frame,
const uint8_t *framehd,
size_t length,
nghttp2_data_source *source,
void *userp)
{
apr_status_t status = APR_SUCCESS;
h2_session *session = (h2_session *)userp;
int stream_id = (int)frame->hd.stream_id;
unsigned char padlen;
int eos;
h2_stream *stream;
apr_bucket *b;
apr_off_t len = length;
(void)ngh2;
(void)source;
if (frame->data.padlen > H2_MAX_PADLEN) {
return NGHTTP2_ERR_PROTO;
}
padlen = (unsigned char)frame->data.padlen;
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
"h2_stream(%ld-%d): send_data_cb for %ld bytes",
session->id, (int)stream_id, (long)length);
stream = get_stream(session, stream_id);
if (!stream) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_NOTFOUND, session->c,
APLOGNO(02924)
"h2_stream(%ld-%d): send_data, lookup stream",
session->id, (int)stream_id);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
status = h2_conn_io_write(&session->io, (const char *)framehd, 9);
if (padlen && status == APR_SUCCESS) {
status = h2_conn_io_write(&session->io, (const char *)&padlen, 1);
}
if (status != APR_SUCCESS) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
"h2_stream(%ld-%d): writing frame header",
session->id, (int)stream_id);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
status = h2_stream_read_to(stream, session->bbtmp, &len, &eos);
if (status != APR_SUCCESS) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
"h2_stream(%ld-%d): send_data_cb, reading stream",
session->id, (int)stream_id);
apr_brigade_cleanup(session->bbtmp);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
else if (len != length) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
"h2_stream(%ld-%d): send_data_cb, wanted %ld bytes, "
"got %ld from stream",
session->id, (int)stream_id, (long)length, (long)len);
apr_brigade_cleanup(session->bbtmp);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
if (padlen) {
b = apr_bucket_immortal_create(immortal_zeros, padlen,
session->c->bucket_alloc);
APR_BRIGADE_INSERT_TAIL(session->bbtmp, b);
}
status = h2_conn_io_pass(&session->io, session->bbtmp);
apr_brigade_cleanup(session->bbtmp);
if (status == APR_SUCCESS) {
stream->out_data_frames++;
stream->out_data_octets += length;
return 0;
}
else {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c,
APLOGNO(02925)
"h2_stream(%ld-%d): failed send_data_cb",
session->id, (int)stream_id);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
}
static int on_frame_send_cb(nghttp2_session *ngh2,
const nghttp2_frame *frame,
void *user_data)
{
h2_session *session = user_data;
if (APLOGcdebug(session->c)) {
char buffer[256];
h2_util_frame_print(frame, buffer, sizeof(buffer)/sizeof(buffer[0]));
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03068)
"h2_session(%ld): sent FRAME[%s], frames=%ld/%ld (r/s)",
session->id, buffer, (long)session->frames_received,
(long)session->frames_sent);
}
++session->frames_sent;
return 0;
}
#ifdef H2_NG2_INVALID_HEADER_CB
static int on_invalid_header_cb(nghttp2_session *ngh2,
const nghttp2_frame *frame,
const uint8_t *name, size_t namelen,
const uint8_t *value, size_t valuelen,
uint8_t flags, void *user_data)
{
h2_session *session = user_data;
if (APLOGcdebug(session->c)) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03456)
"h2_session(%ld-%d): denying stream with invalid header "
"'%s: %s'", session->id, (int)frame->hd.stream_id,
apr_pstrndup(session->pool, (const char *)name, namelen),
apr_pstrndup(session->pool, (const char *)value, valuelen));
}
return nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE,
frame->hd.stream_id,
NGHTTP2_PROTOCOL_ERROR);
}
#endif
#define NGH2_SET_CALLBACK(callbacks, name, fn)\
nghttp2_session_callbacks_set_##name##_callback(callbacks, fn)
static apr_status_t init_callbacks(conn_rec *c, nghttp2_session_callbacks **pcb)
{
int rv = nghttp2_session_callbacks_new(pcb);
if (rv != 0) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c,
APLOGNO(02926) "nghttp2_session_callbacks_new: %s",
nghttp2_strerror(rv));
return APR_EGENERAL;
}
NGH2_SET_CALLBACK(*pcb, send, send_cb);
NGH2_SET_CALLBACK(*pcb, on_frame_recv, on_frame_recv_cb);
NGH2_SET_CALLBACK(*pcb, on_invalid_frame_recv, on_invalid_frame_recv_cb);
NGH2_SET_CALLBACK(*pcb, on_data_chunk_recv, on_data_chunk_recv_cb);
NGH2_SET_CALLBACK(*pcb, on_stream_close, on_stream_close_cb);
NGH2_SET_CALLBACK(*pcb, on_begin_headers, on_begin_headers_cb);
NGH2_SET_CALLBACK(*pcb, on_header, on_header_cb);
NGH2_SET_CALLBACK(*pcb, send_data, on_send_data_cb);
NGH2_SET_CALLBACK(*pcb, on_frame_send, on_frame_send_cb);
#ifdef H2_NG2_INVALID_HEADER_CB
NGH2_SET_CALLBACK(*pcb, on_invalid_header, on_invalid_header_cb);
#endif
return APR_SUCCESS;
}
static void h2_session_destroy(h2_session *session)
{
ap_assert(session);
if (session->mplx) {
h2_mplx_set_consumed_cb(session->mplx, NULL, NULL);
h2_mplx_release_and_join(session->mplx, session->iowait);
session->mplx = NULL;
}
ap_remove_input_filter_byhandle((session->r? session->r->input_filters :
session->c->input_filters), "H2_IN");
if (session->ngh2) {
nghttp2_session_del(session->ngh2);
session->ngh2 = NULL;
}
if (session->c) {
h2_ctx_clear(session->c);
}
if (APLOGctrace1(session->c)) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
"h2_session(%ld): destroy", session->id);
}
if (session->pool) {
apr_pool_destroy(session->pool);
}
}
static apr_status_t h2_session_shutdown_notice(h2_session *session)
{
apr_status_t status;
ap_assert(session);
if (!session->local.accepting) {
return APR_SUCCESS;
}
nghttp2_submit_shutdown_notice(session->ngh2);
session->local.accepting = 0;
status = nghttp2_session_send(session->ngh2);
if (status == APR_SUCCESS) {
status = h2_conn_io_flush(&session->io);
}
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03457)
"session(%ld): sent shutdown notice", session->id);
return status;
}
static apr_status_t h2_session_shutdown(h2_session *session, int error,
const char *msg, int force_close)
{
apr_status_t status = APR_SUCCESS;
ap_assert(session);
if (session->local.shutdown) {
return APR_SUCCESS;
}
if (!msg && error) {
msg = nghttp2_strerror(error);
}
if (error || force_close) {
/* not a graceful shutdown, we want to leave...
* Do not start further streams that are waiting to be scheduled.
* Find out the max stream id that we habe been processed or
* are still actively working on.
* Remove all streams greater than this number without submitting
* a RST_STREAM frame, since that should be clear from the GOAWAY
* we send. */
session->local.accepted_max = h2_mplx_shutdown(session->mplx);
session->local.error = error;
}
else {
/* graceful shutdown. we will continue processing all streams
* we have, but no longer accept new ones. Report the max stream
* we have received and discard all new ones. */
}
nghttp2_submit_goaway(session->ngh2, NGHTTP2_FLAG_NONE,
session->local.accepted_max,
error, (uint8_t*)msg, msg? strlen(msg):0);
session->local.accepting = 0;
session->local.shutdown = 1;
status = nghttp2_session_send(session->ngh2);
if (status == APR_SUCCESS) {
status = h2_conn_io_flush(&session->io);
}
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03069)
"session(%ld): sent GOAWAY, err=%d, msg=%s",
session->id, error, msg? msg : "");
dispatch_event(session, H2_SESSION_EV_LOCAL_GOAWAY, error, msg);
if (force_close) {
apr_brigade_cleanup(session->bbtmp);
h2_mplx_abort(session->mplx);
}
return status;
}
static apr_status_t session_pool_cleanup(void *data)
{
h2_session *session = data;
/* On a controlled connection shutdown, this gets never
* called as we deregister and destroy our pool manually.
* However when we have an async mpm, and handed it our idle
* connection, it will just cleanup once the connection is closed
* from the other side (and sometimes even from out side) and
* here we arrive then.
*/
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
"session(%ld): pool_cleanup", session->id);
if (session->state != H2_SESSION_ST_DONE) {
/* Not good. The connection is being torn down and we have
* not sent a goaway. This is considered a protocol error and
* the client has to assume that any streams "in flight" may have
* been processed and are not safe to retry.
* As clients with idle connection may only learn about a closed
* connection when sending the next request, this has the effect
* that at least this one request will fail.
*/
ap_log_cerror(APLOG_MARK, APLOG_WARNING, 0, session->c, APLOGNO(03199)
"session(%ld): connection disappeared without proper "
"goodbye, clients will be confused, should not happen",
session->id);
}
/* keep us from destroying the pool, since that is already ongoing. */
session->pool = NULL;
h2_session_destroy(session);
return APR_SUCCESS;
}
static void *session_malloc(size_t size, void *ctx)
{
h2_session *session = ctx;
ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, session->c,
"h2_session(%ld): malloc(%ld)",
session->id, (long)size);
return malloc(size);
}
static void session_free(void *p, void *ctx)
{
h2_session *session = ctx;
ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, session->c,
"h2_session(%ld): free()",
session->id);
free(p);
}
static void *session_calloc(size_t n, size_t size, void *ctx)
{
h2_session *session = ctx;
ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, session->c,
"h2_session(%ld): calloc(%ld, %ld)",
session->id, (long)n, (long)size);
return calloc(n, size);
}
static void *session_realloc(void *p, size_t size, void *ctx)
{
h2_session *session = ctx;
ap_log_cerror(APLOG_MARK, APLOG_TRACE6, 0, session->c,
"h2_session(%ld): realloc(%ld)",
session->id, (long)size);
return realloc(p, size);
}
static h2_session *h2_session_create_int(conn_rec *c,
request_rec *r,
h2_ctx *ctx,
h2_workers *workers)
{
nghttp2_session_callbacks *callbacks = NULL;
nghttp2_option *options = NULL;
uint32_t n;
apr_pool_t *pool = NULL;
apr_status_t status = apr_pool_create(&pool, c->pool);
h2_session *session;
if (status != APR_SUCCESS) {
return NULL;
}
apr_pool_tag(pool, "h2_session");
session = apr_pcalloc(pool, sizeof(h2_session));
if (session) {
int rv;
nghttp2_mem *mem;
session->id = c->id;
session->c = c;
session->r = r;
session->s = h2_ctx_server_get(ctx);
session->pool = pool;
session->config = h2_config_sget(session->s);
session->workers = workers;
session->state = H2_SESSION_ST_INIT;
session->local.accepting = 1;
session->remote.accepting = 1;
apr_pool_pre_cleanup_register(pool, session, session_pool_cleanup);
session->max_stream_count = h2_config_geti(session->config,
H2_CONF_MAX_STREAMS);
session->max_stream_mem = h2_config_geti(session->config,
H2_CONF_STREAM_MAX_MEM);
status = apr_thread_cond_create(&session->iowait, session->pool);
if (status != APR_SUCCESS) {
return NULL;
}
session->mplx = h2_mplx_create(c, session->pool, session->config,
session->s->timeout, workers);
h2_mplx_set_consumed_cb(session->mplx, update_window, session);
/* Install the connection input filter that feeds the session */
session->cin = h2_filter_cin_create(session->pool,
h2_session_receive, session);
ap_add_input_filter("H2_IN", session->cin, r, c);
h2_conn_io_init(&session->io, c, session->config);
session->bbtmp = apr_brigade_create(session->pool, c->bucket_alloc);
status = init_callbacks(c, &callbacks);
if (status != APR_SUCCESS) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, status, c, APLOGNO(02927)
"nghttp2: error in init_callbacks");
h2_session_destroy(session);
return NULL;
}
rv = nghttp2_option_new(&options);
if (rv != 0) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
APLOGNO(02928) "nghttp2_option_new: %s",
nghttp2_strerror(rv));
h2_session_destroy(session);
return NULL;
}
nghttp2_option_set_peer_max_concurrent_streams(
options, (uint32_t)session->max_stream_count);
/* We need to handle window updates ourself, otherwise we
* get flooded by nghttp2. */
nghttp2_option_set_no_auto_window_update(options, 1);
if (APLOGctrace6(c)) {
mem = apr_pcalloc(session->pool, sizeof(nghttp2_mem));
mem->mem_user_data = session;
mem->malloc = session_malloc;
mem->free = session_free;
mem->calloc = session_calloc;
mem->realloc = session_realloc;
rv = nghttp2_session_server_new3(&session->ngh2, callbacks,
session, options, mem);
}
else {
rv = nghttp2_session_server_new2(&session->ngh2, callbacks,
session, options);
}
nghttp2_session_callbacks_del(callbacks);
nghttp2_option_del(options);
if (rv != 0) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
APLOGNO(02929) "nghttp2_session_server_new: %s",
nghttp2_strerror(rv));
h2_session_destroy(session);
return NULL;
}
n = h2_config_geti(session->config, H2_CONF_PUSH_DIARY_SIZE);
session->push_diary = h2_push_diary_create(session->pool, n);
if (APLOGcdebug(c)) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(03200)
"h2_session(%ld) created, max_streams=%d, "
"stream_mem=%d, workers_limit=%d, workers_max=%d, "
"push_diary(type=%d,N=%d)",
session->id, (int)session->max_stream_count,
(int)session->max_stream_mem,
session->mplx->workers_limit,
session->mplx->workers_max,
session->push_diary->dtype,
(int)session->push_diary->N);
}
}
return session;
}
h2_session *h2_session_create(conn_rec *c, h2_ctx *ctx, h2_workers *workers)
{
return h2_session_create_int(c, NULL, ctx, workers);
}
h2_session *h2_session_rcreate(request_rec *r, h2_ctx *ctx, h2_workers *workers)
{
return h2_session_create_int(r->connection, r, ctx, workers);
}
void h2_session_eoc_callback(h2_session *session)
{
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
"session(%ld): cleanup and destroy", session->id);
apr_pool_cleanup_kill(session->pool, session, session_pool_cleanup);
h2_session_destroy(session);
}
static apr_status_t h2_session_start(h2_session *session, int *rv)
{
apr_status_t status = APR_SUCCESS;
nghttp2_settings_entry settings[3];
size_t slen;
int win_size;
ap_assert(session);
/* Start the conversation by submitting our SETTINGS frame */
*rv = 0;
if (session->r) {
const char *s, *cs;
apr_size_t dlen;
h2_stream * stream;
/* 'h2c' mode: we should have a 'HTTP2-Settings' header with
* base64 encoded client settings. */
s = apr_table_get(session->r->headers_in, "HTTP2-Settings");
if (!s) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, APR_EINVAL, session->r,
APLOGNO(02931)
"HTTP2-Settings header missing in request");
return APR_EINVAL;
}
cs = NULL;
dlen = h2_util_base64url_decode(&cs, s, session->pool);
if (APLOGrdebug(session->r)) {
char buffer[128];
h2_util_hex_dump(buffer, 128, (char*)cs, dlen);
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, session->r, APLOGNO(03070)
"upgrading h2c session with HTTP2-Settings: %s -> %s (%d)",
s, buffer, (int)dlen);
}
*rv = nghttp2_session_upgrade(session->ngh2, (uint8_t*)cs, dlen, NULL);
if (*rv != 0) {
status = APR_EINVAL;
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, session->r,
APLOGNO(02932) "nghttp2_session_upgrade: %s",
nghttp2_strerror(*rv));
return status;
}
/* Now we need to auto-open stream 1 for the request we got. */
stream = h2_session_open_stream(session, 1, 0, NULL);
if (!stream) {
status = APR_EGENERAL;
ap_log_rerror(APLOG_MARK, APLOG_ERR, status, session->r,
APLOGNO(02933) "open stream 1: %s",
nghttp2_strerror(*rv));
return status;
}
status = h2_stream_set_request_rec(stream, session->r);
if (status != APR_SUCCESS) {
return status;
}
status = stream_schedule(session, stream, 1);
if (status != APR_SUCCESS) {
return status;
}
}
slen = 0;
settings[slen].settings_id = NGHTTP2_SETTINGS_MAX_CONCURRENT_STREAMS;
settings[slen].value = (uint32_t)session->max_stream_count;
++slen;
win_size = h2_config_geti(session->config, H2_CONF_WIN_SIZE);
if (win_size != H2_INITIAL_WINDOW_SIZE) {
settings[slen].settings_id = NGHTTP2_SETTINGS_INITIAL_WINDOW_SIZE;
settings[slen].value = win_size;
++slen;
}
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, session->c, APLOGNO(03201)
"h2_session(%ld): start, INITIAL_WINDOW_SIZE=%ld, "
"MAX_CONCURRENT_STREAMS=%d",
session->id, (long)win_size, (int)session->max_stream_count);
*rv = nghttp2_submit_settings(session->ngh2, NGHTTP2_FLAG_NONE,
settings, slen);
if (*rv != 0) {
status = APR_EGENERAL;
ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
APLOGNO(02935) "nghttp2_submit_settings: %s",
nghttp2_strerror(*rv));
}
else {
/* use maximum possible value for connection window size. We are only
* interested in per stream flow control. which have the initial window
* size configured above.
* Therefore, for our use, the connection window can only get in the
* way. Example: if we allow 100 streams with a 32KB window each, we
* buffer up to 3.2 MB of data. Unless we do separate connection window
* interim updates, any smaller connection window will lead to blocking
* in DATA flow.
*/
*rv = nghttp2_submit_window_update(session->ngh2, NGHTTP2_FLAG_NONE,
0, NGHTTP2_MAX_WINDOW_SIZE - win_size);
if (*rv != 0) {
status = APR_EGENERAL;
ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
APLOGNO(02970) "nghttp2_submit_window_update: %s",
nghttp2_strerror(*rv));
}
}
return status;
}
static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream,
h2_headers *headers, apr_off_t len,
int eos);
static ssize_t stream_data_cb(nghttp2_session *ng2s,
int32_t stream_id,
uint8_t *buf,
size_t length,
uint32_t *data_flags,
nghttp2_data_source *source,
void *puser)
{
h2_session *session = (h2_session *)puser;
apr_off_t nread = length;
int eos = 0;
apr_status_t status;
h2_stream *stream;
ap_assert(session);
/* The session wants to send more DATA for the stream. We need
* to find out how much of the requested length we can send without
* blocking.
* Indicate EOS when we encounter it or DEFERRED if the stream
* should be suspended. Beware of trailers.
*/
(void)ng2s;
(void)buf;
(void)source;
stream = get_stream(session, stream_id);
if (!stream) {
ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, session->c,
APLOGNO(02937)
"h2_stream(%ld-%d): data requested but stream not found",
session->id, (int)stream_id);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
status = h2_stream_out_prepare(stream, &nread, &eos, NULL);
if (nread) {
*data_flags |= NGHTTP2_DATA_FLAG_NO_COPY;
}
switch (status) {
case APR_SUCCESS:
break;
case APR_ECONNRESET:
return nghttp2_submit_rst_stream(ng2s, NGHTTP2_FLAG_NONE,
stream->id, stream->rst_error);
case APR_EAGAIN:
/* If there is no data available, our session will automatically
* suspend this stream and not ask for more data until we resume
* it. Remember at our h2_stream that we need to do this.
*/
nread = 0;
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03071)
"h2_stream(%ld-%d): suspending",
session->id, (int)stream_id);
return NGHTTP2_ERR_DEFERRED;
default:
nread = 0;
ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
APLOGNO(02938) "h2_stream(%ld-%d): reading data",
session->id, (int)stream_id);
return NGHTTP2_ERR_CALLBACK_FAILURE;
}
if (eos) {
*data_flags |= NGHTTP2_DATA_FLAG_EOF;
}
return (ssize_t)nread;
}
struct h2_stream *h2_session_push(h2_session *session, h2_stream *is,
h2_push *push)
{
apr_status_t status;
h2_stream *stream;
h2_ngheader *ngh;
int nid;
ngh = h2_util_ngheader_make_req(is->pool, push->req);
nid = nghttp2_submit_push_promise(session->ngh2, 0, is->id,
ngh->nv, ngh->nvlen, NULL);
if (nid <= 0) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03075)
"h2_stream(%ld-%d): submitting push promise fail: %s",
session->id, is->id, nghttp2_strerror(nid));
return NULL;
}
++session->pushes_promised;
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03076)
"h2_stream(%ld-%d): SERVER_PUSH %d for %s %s on %d",
session->id, is->id, nid,
push->req->method, push->req->path, is->id);
stream = h2_session_open_stream(session, nid, is->id, push->req);
if (stream) {
h2_session_set_prio(session, stream, push->priority);
status = stream_schedule(session, stream, 1);
if (status != APR_SUCCESS) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, status, session->c,
"h2_stream(%ld-%d): scheduling push stream",
session->id, stream->id);
stream = NULL;
}
++session->unsent_promises;
}
else {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03077)
"h2_stream(%ld-%d): failed to create stream obj %d",
session->id, is->id, nid);
}
if (!stream) {
/* try to tell the client that it should not wait. */
nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE, nid,
NGHTTP2_INTERNAL_ERROR);
}
return stream;
}
static int valid_weight(float f)
{
int w = (int)f;
return (w < NGHTTP2_MIN_WEIGHT? NGHTTP2_MIN_WEIGHT :
(w > NGHTTP2_MAX_WEIGHT)? NGHTTP2_MAX_WEIGHT : w);
}
apr_status_t h2_session_set_prio(h2_session *session, h2_stream *stream,
const h2_priority *prio)
{
apr_status_t status = APR_SUCCESS;
#ifdef H2_NG2_CHANGE_PRIO
nghttp2_stream *s_grandpa, *s_parent, *s;
if (prio == NULL) {
/* we treat this as a NOP */
return APR_SUCCESS;
}
s = nghttp2_session_find_stream(session->ngh2, stream->id);
if (!s) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
"h2_stream(%ld-%d): lookup of nghttp2_stream failed",
session->id, stream->id);
return APR_EINVAL;
}
s_parent = nghttp2_stream_get_parent(s);
if (s_parent) {
nghttp2_priority_spec ps;
int id_parent, id_grandpa, w_parent, w;
int rv = 0;
const char *ptype = "AFTER";
h2_dependency dep = prio->dependency;
id_parent = nghttp2_stream_get_stream_id(s_parent);
s_grandpa = nghttp2_stream_get_parent(s_parent);
if (s_grandpa) {
id_grandpa = nghttp2_stream_get_stream_id(s_grandpa);
}
else {
/* parent of parent does not exist,
* only possible if parent == root */
dep = H2_DEPENDANT_AFTER;
}
switch (dep) {
case H2_DEPENDANT_INTERLEAVED:
/* PUSHed stream is to be interleaved with initiating stream.
* It is made a sibling of the initiating stream and gets a
* proportional weight [1, MAX_WEIGHT] of the initiaing
* stream weight.
*/
ptype = "INTERLEAVED";
w_parent = nghttp2_stream_get_weight(s_parent);
w = valid_weight(w_parent * ((float)prio->weight / NGHTTP2_MAX_WEIGHT));
nghttp2_priority_spec_init(&ps, id_grandpa, w, 0);
break;
case H2_DEPENDANT_BEFORE:
/* PUSHed stream os to be sent BEFORE the initiating stream.
* It gets the same weight as the initiating stream, replaces
* that stream in the dependency tree and has the initiating
* stream as child.
*/
ptype = "BEFORE";
w = w_parent = nghttp2_stream_get_weight(s_parent);
nghttp2_priority_spec_init(&ps, stream->id, w_parent, 0);
id_grandpa = nghttp2_stream_get_stream_id(s_grandpa);
rv = nghttp2_session_change_stream_priority(session->ngh2, id_parent, &ps);
if (rv < 0) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03202)
"h2_stream(%ld-%d): PUSH BEFORE, weight=%d, "
"depends=%d, returned=%d",
session->id, id_parent, ps.weight, ps.stream_id, rv);
return APR_EGENERAL;
}
nghttp2_priority_spec_init(&ps, id_grandpa, w, 0);
break;
case H2_DEPENDANT_AFTER:
/* The PUSHed stream is to be sent after the initiating stream.
* Give if the specified weight and let it depend on the intiating
* stream.
*/
/* fall through, it's the default */
default:
nghttp2_priority_spec_init(&ps, id_parent, valid_weight(prio->weight), 0);
break;
}
rv = nghttp2_session_change_stream_priority(session->ngh2, stream->id, &ps);
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03203)
"h2_stream(%ld-%d): PUSH %s, weight=%d, "
"depends=%d, returned=%d",
session->id, stream->id, ptype,
ps.weight, ps.stream_id, rv);
status = (rv < 0)? APR_EGENERAL : APR_SUCCESS;
}
#else
(void)session;
(void)stream;
(void)prio;
(void)valid_weight;
#endif
return status;
}
int h2_session_push_enabled(h2_session *session)
{
/* iff we can and they can and want */
return (session->remote.accepting /* remote GOAWAY received */
&& h2_config_geti(session->config, H2_CONF_PUSH)
&& nghttp2_session_get_remote_settings(session->ngh2,
NGHTTP2_SETTINGS_ENABLE_PUSH));
}
static apr_status_t h2_session_send(h2_session *session)
{
apr_interval_time_t saved_timeout;
int rv;
apr_socket_t *socket;
socket = ap_get_conn_socket(session->c);
if (socket) {
apr_socket_timeout_get(socket, &saved_timeout);
apr_socket_timeout_set(socket, session->s->timeout);
}
rv = nghttp2_session_send(session->ngh2);
if (socket) {
apr_socket_timeout_set(socket, saved_timeout);
}
session->have_written = 1;
if (rv != 0) {
if (nghttp2_is_fatal(rv)) {
dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
return APR_EGENERAL;
}
}
session->unsent_promises = 0;
session->unsent_submits = 0;
return APR_SUCCESS;
}
/**
* headers for the stream are ready.
*/
static apr_status_t on_stream_headers(h2_session *session, h2_stream *stream,
h2_headers *headers, apr_off_t len,
int eos)
{
apr_status_t status = APR_SUCCESS;
int rv = 0;
ap_assert(session);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
"h2_stream(%ld-%d): on_headers", session->id, stream->id);
if (headers->status < 100) {
int err = H2_STREAM_RST(stream, headers->status);
rv = nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE,
stream->id, err);
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c,
"h2_stream(%ld-%d): unpexected header status %d, stream rst",
session->id, stream->id, headers->status);
goto leave;
}
else if (stream->has_response) {
h2_ngheader *nh;
nh = h2_util_ngheader_make(stream->pool, headers->headers);
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03072)
"h2_stream(%ld-%d): submit %d trailers",
session->id, (int)stream->id,(int) nh->nvlen);
rv = nghttp2_submit_trailer(session->ngh2, stream->id, nh->nv, nh->nvlen);
goto leave;
}
else {
nghttp2_data_provider provider, *pprovider = NULL;
h2_ngheader *ngh;
apr_table_t *hout;
const char *note;
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03073)
"h2_stream(%ld-%d): submit response %d, REMOTE_WINDOW_SIZE=%u",
session->id, stream->id, headers->status,
(unsigned int)nghttp2_session_get_stream_remote_window_size(session->ngh2, stream->id));
if (!eos || len > 0) {
memset(&provider, 0, sizeof(provider));
provider.source.fd = stream->id;
provider.read_callback = stream_data_cb;
pprovider = &provider;
}
/* If this stream is not a pushed one itself,
* and HTTP/2 server push is enabled here,
* and the response HTTP status is not sth >= 400,
* and the remote side has pushing enabled,
* -> find and perform any pushes on this stream
* *before* we submit the stream response itself.
* This helps clients avoid opening new streams on Link
* headers that get pushed right afterwards.
*
* *) the response code is relevant, as we do not want to
* make pushes on 401 or 403 codes and friends.
* And if we see a 304, we do not push either
* as the client, having this resource in its cache, might
* also have the pushed ones as well.
*/
if (!stream->initiated_on
&& !stream->has_response
&& stream->request && stream->request->method
&& !strcmp("GET", stream->request->method)
&& (headers->status < 400)
&& (headers->status != 304)
&& h2_session_push_enabled(session)) {
h2_stream_submit_pushes(stream, headers);
}
if (!stream->pref_priority) {
stream->pref_priority = h2_stream_get_priority(stream, headers);
}
h2_session_set_prio(session, stream, stream->pref_priority);
hout = headers->headers;
note = apr_table_get(headers->notes, H2_FILTER_DEBUG_NOTE);
if (note && !strcmp("on", note)) {
int32_t connFlowIn, connFlowOut;
connFlowIn = nghttp2_session_get_effective_local_window_size(session->ngh2);
connFlowOut = nghttp2_session_get_remote_window_size(session->ngh2);
hout = apr_table_clone(stream->pool, hout);
apr_table_setn(hout, "conn-flow-in",
apr_itoa(stream->pool, connFlowIn));
apr_table_setn(hout, "conn-flow-out",
apr_itoa(stream->pool, connFlowOut));
}
if (headers->status == 103
&& !h2_config_geti(session->config, H2_CONF_EARLY_HINTS)) {
/* suppress sending this to the client, it might have triggered
* pushes and served its purpose nevertheless */
rv = 0;
goto leave;
}
ngh = h2_util_ngheader_make_res(stream->pool, headers->status, hout);
rv = nghttp2_submit_response(session->ngh2, stream->id,
ngh->nv, ngh->nvlen, pprovider);
stream->has_response = h2_headers_are_response(headers);
session->have_written = 1;
if (stream->initiated_on) {
++session->pushes_submitted;
}
else {
++session->responses_submitted;
}
}
leave:
if (nghttp2_is_fatal(rv)) {
status = APR_EGENERAL;
dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, rv, nghttp2_strerror(rv));
ap_log_cerror(APLOG_MARK, APLOG_ERR, status, session->c,
APLOGNO(02940) "submit_response: %s",
nghttp2_strerror(rv));
}
++session->unsent_submits;
/* Unsent push promises are written immediately, as nghttp2
* 1.5.0 realizes internal stream data structures only on
* send and we might need them for other submits.
* Also, to conserve memory, we send at least every 10 submits
* so that nghttp2 does not buffer all outbound items too
* long.
*/
if (status == APR_SUCCESS
&& (session->unsent_promises || session->unsent_submits > 10)) {
status = h2_session_send(session);
}
return status;
}
/**
* A stream was resumed as new response/output data arrived.
*/
static apr_status_t on_stream_resume(void *ctx, h2_stream *stream)
{
h2_session *session = ctx;
apr_status_t status = APR_EAGAIN;
int rv;
apr_off_t len = 0;
int eos = 0;
h2_headers *headers;
ap_assert(stream);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
"h2_stream(%ld-%d): on_resume", session->id, stream->id);
send_headers:
headers = NULL;
status = h2_stream_out_prepare(stream, &len, &eos, &headers);
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, session->c,
"h2_stream(%ld-%d): prepared len=%ld, eos=%d",
session->id, stream->id, (long)len, eos);
if (headers) {
status = on_stream_headers(session, stream, headers, len, eos);
if (status != APR_SUCCESS || stream->rst_error) {
return status;
}
goto send_headers;
}
else if (status != APR_EAGAIN) {
if (!stream->has_response) {
int err = H2_STREAM_RST(stream, H2_ERR_PROTOCOL_ERROR);
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03466)
"h2_stream(%ld-%d): no response, RST_STREAM, err=%d",
session->id, stream->id, err);
nghttp2_submit_rst_stream(session->ngh2, NGHTTP2_FLAG_NONE,
stream->id, err);
return APR_SUCCESS;
}
rv = nghttp2_session_resume_data(session->ngh2, stream->id);
session->have_written = 1;
ap_log_cerror(APLOG_MARK, nghttp2_is_fatal(rv)?
APLOG_ERR : APLOG_DEBUG, 0, session->c,
APLOGNO(02936)
"h2_stream(%ld-%d): resuming %s",
session->id, stream->id, rv? nghttp2_strerror(rv) : "");
}
return status;
}
static apr_status_t h2_session_receive(void *ctx, const char *data,
apr_size_t len, apr_size_t *readlen)
{
h2_session *session = ctx;
ssize_t n;
if (len > 0) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
"h2_session(%ld): feeding %ld bytes to nghttp2",
session->id, (long)len);
n = nghttp2_session_mem_recv(session->ngh2, (const uint8_t *)data, len);
if (n < 0) {
if (nghttp2_is_fatal((int)n)) {
dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, (int)n, nghttp2_strerror((int)n));
return APR_EGENERAL;
}
}
else {
*readlen = n;
session->io.bytes_read += n;
}
}
return APR_SUCCESS;
}
static apr_status_t h2_session_read(h2_session *session, int block)
{
apr_status_t status, rstatus = APR_EAGAIN;
conn_rec *c = session->c;
apr_off_t read_start = session->io.bytes_read;
while (1) {
/* H2_IN filter handles all incoming data against the session.
* We just pull at the filter chain to make it happen */
status = ap_get_brigade(c->input_filters,
session->bbtmp, AP_MODE_READBYTES,
block? APR_BLOCK_READ : APR_NONBLOCK_READ,
APR_BUCKET_BUFF_SIZE);
/* get rid of any possible data we do not expect to get */
apr_brigade_cleanup(session->bbtmp);
switch (status) {
case APR_SUCCESS:
/* successful read, reset our idle timers */
rstatus = APR_SUCCESS;
if (block) {
/* successful blocked read, try unblocked to
* get more. */
block = 0;
}
break;
case APR_EAGAIN:
return rstatus;
case APR_TIMEUP:
return status;
default:
if (session->io.bytes_read == read_start) {
/* first attempt failed */
if (APR_STATUS_IS_ETIMEDOUT(status)
|| APR_STATUS_IS_ECONNABORTED(status)
|| APR_STATUS_IS_ECONNRESET(status)
|| APR_STATUS_IS_EOF(status)
|| APR_STATUS_IS_EBADF(status)) {
/* common status for a client that has left */
ap_log_cerror( APLOG_MARK, APLOG_TRACE1, status, c,
"h2_session(%ld): input gone", session->id);
}
else {
/* uncommon status, log on INFO so that we see this */
ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, c,
APLOGNO(02950)
"h2_session(%ld): error reading, terminating",
session->id);
}
return status;
}
/* subsequent failure after success(es), return initial
* status. */
return rstatus;
}
if ((session->io.bytes_read - read_start) > (64*1024)) {
/* read enough in one go, give write a chance */
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, status, c,
"h2_session(%ld): read 64k, returning", session->id);
break;
}
}
return rstatus;
}
static const char *StateNames[] = {
"INIT", /* H2_SESSION_ST_INIT */
"DONE", /* H2_SESSION_ST_DONE */
"IDLE", /* H2_SESSION_ST_IDLE */
"BUSY", /* H2_SESSION_ST_BUSY */
"WAIT", /* H2_SESSION_ST_WAIT */
};
static const char *state_name(h2_session_state state)
{
if (state >= (sizeof(StateNames)/sizeof(StateNames[0]))) {
return "unknown";
}
return StateNames[state];
}
static void update_child_status(h2_session *session, int status, const char *msg)
{
/* Assume that we also change code/msg when something really happened and
* avoid updating the scoreboard in between */
if (session->last_status_code != status
|| session->last_status_msg != msg) {
apr_snprintf(session->status, sizeof(session->status),
"%s, streams: %d/%d/%d/%d/%d (open/recv/resp/push/rst)",
msg? msg : "-",
(int)session->open_streams,
(int)session->remote.emitted_count,
(int)session->responses_submitted,
(int)session->pushes_submitted,
(int)session->pushes_reset + session->streams_reset);
ap_update_child_status_descr(session->c->sbh, status, session->status);
}
}
static void transit(h2_session *session, const char *action, h2_session_state nstate)
{
if (session->state != nstate) {
int loglvl = APLOG_DEBUG;
if ((session->state == H2_SESSION_ST_BUSY && nstate == H2_SESSION_ST_WAIT)
|| (session->state == H2_SESSION_ST_WAIT && nstate == H2_SESSION_ST_BUSY)){
loglvl = APLOG_TRACE1;
}
ap_log_cerror(APLOG_MARK, loglvl, 0, session->c, APLOGNO(03078)
"h2_session(%ld): transit [%s] -- %s --> [%s]", session->id,
state_name(session->state), action, state_name(nstate));
session->state = nstate;
switch (session->state) {
case H2_SESSION_ST_IDLE:
update_child_status(session, (session->open_streams == 0?
SERVER_BUSY_KEEPALIVE
: SERVER_BUSY_READ), "idle");
break;
case H2_SESSION_ST_DONE:
update_child_status(session, SERVER_CLOSING, "done");
break;
default:
/* nop */
break;
}
}
}
static void h2_session_ev_init(h2_session *session, int arg, const char *msg)
{
switch (session->state) {
case H2_SESSION_ST_INIT:
transit(session, "init", H2_SESSION_ST_BUSY);
break;
default:
/* nop */
break;
}
}
static void h2_session_ev_local_goaway(h2_session *session, int arg, const char *msg)
{
cleanup_streams(session);
if (!session->remote.shutdown) {
update_child_status(session, SERVER_CLOSING, "local goaway");
}
transit(session, "local goaway", H2_SESSION_ST_DONE);
}
static void h2_session_ev_remote_goaway(h2_session *session, int arg, const char *msg)
{
if (!session->remote.shutdown) {
session->remote.error = arg;
session->remote.accepting = 0;
session->remote.shutdown = 1;
cleanup_streams(session);
update_child_status(session, SERVER_CLOSING, "remote goaway");
transit(session, "remote goaway", H2_SESSION_ST_DONE);
}
}
static void h2_session_ev_conn_error(h2_session *session, int arg, const char *msg)
{
switch (session->state) {
case H2_SESSION_ST_INIT:
case H2_SESSION_ST_DONE:
/* just leave */
transit(session, "conn error", H2_SESSION_ST_DONE);
break;
default:
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03401)
"h2_session(%ld): conn error -> shutdown", session->id);
h2_session_shutdown(session, arg, msg, 0);
break;
}
}
static void h2_session_ev_proto_error(h2_session *session, int arg, const char *msg)
{
if (!session->local.shutdown) {
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, session->c, APLOGNO(03402)
"h2_session(%ld): proto error -> shutdown", session->id);
h2_session_shutdown(session, arg, msg, 0);
}
}
static void h2_session_ev_conn_timeout(h2_session *session, int arg, const char *msg)
{
transit(session, msg, H2_SESSION_ST_DONE);
if (!session->local.shutdown) {
h2_session_shutdown(session, arg, msg, 1);
}
}
static void h2_session_ev_no_io(h2_session *session, int arg, const char *msg)
{
switch (session->state) {
case H2_SESSION_ST_BUSY:
/* Nothing to READ, nothing to WRITE on the master connection.
* Possible causes:
* - we wait for the client to send us sth
* - we wait for started tasks to produce output
* - we have finished all streams and the client has sent GO_AWAY
*/
ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, session->c,
"h2_session(%ld): NO_IO event, %d streams open",
session->id, session->open_streams);
h2_conn_io_flush(&session->io);
if (session->open_streams > 0) {
if (h2_mplx_awaits_data(session->mplx)) {
/* waiting for at least one stream to produce data */
transit(session, "no io", H2_SESSION_ST_WAIT);
}
else {
/* we have streams open, and all are submitted and none
* is suspended. The only thing keeping us from WRITEing
* more must be the flow control.
* This means we only wait for WINDOW_UPDATE from the
* client and can block on READ. */
transit(session, "no io (flow wait)", H2_SESSION_ST_IDLE);
session->idle_until = apr_time_now() + session->s->timeout;
session->keep_sync_until = session->idle_until;
/* Make sure we have flushed all previously written output
* so that the client will react. */
if (h2_conn_io_flush(&session->io) != APR_SUCCESS) {
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
return;
}
}
}
else if (session->local.accepting) {
/* When we have no streams, but accept new, switch to idle */
apr_time_t now = apr_time_now();
transit(session, "no io (keepalive)", H2_SESSION_ST_IDLE);
session->idle_until = (session->remote.emitted_count?
session->s->keep_alive_timeout :
session->s->timeout) + now;
session->keep_sync_until = now + apr_time_from_sec(1);
}
else {
/* We are no longer accepting new streams and there are
* none left. Time to leave. */
h2_session_shutdown(session, arg, msg, 0);
transit(session, "no io", H2_SESSION_ST_DONE);
}
break;
default:
/* nop */
break;
}
}
static void h2_session_ev_stream_ready(h2_session *session, int arg, const char *msg)
{
switch (session->state) {
case H2_SESSION_ST_WAIT:
transit(session, "stream ready", H2_SESSION_ST_BUSY);
break;
default:
/* nop */
break;
}
}
static void h2_session_ev_data_read(h2_session *session, int arg, const char *msg)
{
switch (session->state) {
case H2_SESSION_ST_IDLE:
case H2_SESSION_ST_WAIT:
transit(session, "data read", H2_SESSION_ST_BUSY);
break;
default:
/* nop */
break;
}
}
static void h2_session_ev_ngh2_done(h2_session *session, int arg, const char *msg)
{
switch (session->state) {
case H2_SESSION_ST_DONE:
/* nop */
break;
default:
transit(session, "nghttp2 done", H2_SESSION_ST_DONE);
break;
}
}
static void h2_session_ev_mpm_stopping(h2_session *session, int arg, const char *msg)
{
switch (session->state) {
case H2_SESSION_ST_DONE:
/* nop */
break;
default:
h2_session_shutdown_notice(session);
break;
}
}
static void h2_session_ev_pre_close(h2_session *session, int arg, const char *msg)
{
h2_session_shutdown(session, arg, msg, 1);
}
static void h2_session_ev_stream_open(h2_session *session, int arg, const char *msg)
{
++session->open_streams;
switch (session->state) {
case H2_SESSION_ST_IDLE:
if (session->open_streams == 1) {
/* enter tiomeout, since we have a stream again */
session->idle_until = (session->s->timeout + apr_time_now());
}
break;
default:
break;
}
}
static void h2_session_ev_stream_done(h2_session *session, int arg, const char *msg)
{
--session->open_streams;
switch (session->state) {
case H2_SESSION_ST_IDLE:
if (session->open_streams == 0) {
/* enter keepalive timeout, since we no longer have streams */
session->idle_until = (session->s->keep_alive_timeout
+ apr_time_now());
}
break;
default:
break;
}
}
static void dispatch_event(h2_session *session, h2_session_event_t ev,
int arg, const char *msg)
{
switch (ev) {
case H2_SESSION_EV_INIT:
h2_session_ev_init(session, arg, msg);
break;
case H2_SESSION_EV_LOCAL_GOAWAY:
h2_session_ev_local_goaway(session, arg, msg);
break;
case H2_SESSION_EV_REMOTE_GOAWAY:
h2_session_ev_remote_goaway(session, arg, msg);
break;
case H2_SESSION_EV_CONN_ERROR:
h2_session_ev_conn_error(session, arg, msg);
break;
case H2_SESSION_EV_PROTO_ERROR:
h2_session_ev_proto_error(session, arg, msg);
break;
case H2_SESSION_EV_CONN_TIMEOUT:
h2_session_ev_conn_timeout(session, arg, msg);
break;
case H2_SESSION_EV_NO_IO:
h2_session_ev_no_io(session, arg, msg);
break;
case H2_SESSION_EV_STREAM_READY:
h2_session_ev_stream_ready(session, arg, msg);
break;
case H2_SESSION_EV_DATA_READ:
h2_session_ev_data_read(session, arg, msg);
break;
case H2_SESSION_EV_NGH2_DONE:
h2_session_ev_ngh2_done(session, arg, msg);
break;
case H2_SESSION_EV_MPM_STOPPING:
h2_session_ev_mpm_stopping(session, arg, msg);
break;
case H2_SESSION_EV_PRE_CLOSE:
h2_session_ev_pre_close(session, arg, msg);
break;
case H2_SESSION_EV_STREAM_OPEN:
h2_session_ev_stream_open(session, arg, msg);
break;
case H2_SESSION_EV_STREAM_DONE:
h2_session_ev_stream_done(session, arg, msg);
break;
default:
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
"h2_session(%ld): unknown event %d",
session->id, ev);
break;
}
if (session->state == H2_SESSION_ST_DONE) {
apr_brigade_cleanup(session->bbtmp);
h2_mplx_abort(session->mplx);
}
}
static const int MAX_WAIT_MICROS = 200 * 1000;
apr_status_t h2_session_process(h2_session *session, int async)
{
apr_status_t status = APR_SUCCESS;
conn_rec *c = session->c;
int rv, mpm_state, trace = APLOGctrace3(c);
if (trace) {
ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
"h2_session(%ld): process start, async=%d",
session->id, async);
}
if (c->cs) {
c->cs->state = CONN_STATE_WRITE_COMPLETION;
}
while (session->state != H2_SESSION_ST_DONE) {
trace = APLOGctrace3(c);
session->have_read = session->have_written = 0;
if (session->local.accepting
&& !ap_mpm_query(AP_MPMQ_MPM_STATE, &mpm_state)) {
if (mpm_state == AP_MPMQ_STOPPING) {
dispatch_event(session, H2_SESSION_EV_MPM_STOPPING, 0, NULL);
}
}
session->status[0] = '\0';
switch (session->state) {
case H2_SESSION_ST_INIT:
ap_update_child_status_from_conn(c->sbh, SERVER_BUSY_READ, c);
if (!h2_is_acceptable_connection(c, 1)) {
update_child_status(session, SERVER_BUSY_READ, "inadequate security");
h2_session_shutdown(session, NGHTTP2_INADEQUATE_SECURITY, NULL, 1);
}
else {
update_child_status(session, SERVER_BUSY_READ, "init");
status = h2_session_start(session, &rv);
ap_log_cerror(APLOG_MARK, APLOG_DEBUG, status, c, APLOGNO(03079)
"h2_session(%ld): started on %s:%d", session->id,
session->s->server_hostname,
c->local_addr->port);
if (status != APR_SUCCESS) {
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
}
dispatch_event(session, H2_SESSION_EV_INIT, 0, NULL);
}
break;
case H2_SESSION_ST_IDLE:
/* make certain, we send everything before we idle */
h2_conn_io_flush(&session->io);
if (!session->keep_sync_until && async && !session->open_streams
&& !session->r && session->remote.emitted_count) {
if (trace) {
ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
"h2_session(%ld): async idle, nonblock read, "
"%d streams open", session->id,
session->open_streams);
}
/* We do not return to the async mpm immediately, since under
* load, mpms show the tendency to throw keep_alive connections
* away very rapidly.
* So, if we are still processing streams, we wait for the
* normal timeout first and, on timeout, close.
* If we have no streams, we still wait a short amount of
* time here for the next frame to arrive, before handing
* it to keep_alive processing of the mpm.
*/
status = h2_session_read(session, 0);
if (status == APR_SUCCESS) {
session->have_read = 1;
dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
}
else if (APR_STATUS_IS_EAGAIN(status) || APR_STATUS_IS_TIMEUP(status)) {
if (apr_time_now() > session->idle_until) {
dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, NULL);
}
else {
status = APR_EAGAIN;
goto out;
}
}
else {
ap_log_cerror( APLOG_MARK, APLOG_DEBUG, status, c,
APLOGNO(03403)
"h2_session(%ld): idle, no data, error",
session->id);
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "timeout");
}
}
else {
if (trace) {
ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
"h2_session(%ld): sync idle, stutter 1-sec, "
"%d streams open", session->id,
session->open_streams);
}
/* We wait in smaller increments, using a 1 second timeout.
* That gives us the chance to check for MPMQ_STOPPING often.
*/
status = h2_mplx_idle(session->mplx);
if (status != APR_SUCCESS) {
dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
H2_ERR_ENHANCE_YOUR_CALM, "less is more");
}
h2_filter_cin_timeout_set(session->cin, apr_time_from_sec(1));
status = h2_session_read(session, 1);
if (status == APR_SUCCESS) {
session->have_read = 1;
dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
}
else if (status == APR_EAGAIN) {
/* nothing to read */
}
else if (APR_STATUS_IS_TIMEUP(status)) {
apr_time_t now = apr_time_now();
if (now > session->keep_sync_until) {
/* if we are on an async mpm, now is the time that
* we may dare to pass control to it. */
session->keep_sync_until = 0;
}
if (now > session->idle_until) {
if (trace) {
ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
"h2_session(%ld): keepalive timeout",
session->id);
}
dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, "timeout");
}
else if (trace) {
ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
"h2_session(%ld): keepalive, %f sec left",
session->id, (session->idle_until - now) / 1000000.0f);
}
/* continue reading handling */
}
else if (APR_STATUS_IS_ECONNABORTED(status)
|| APR_STATUS_IS_ECONNRESET(status)
|| APR_STATUS_IS_EOF(status)
|| APR_STATUS_IS_EBADF(status)) {
ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
"h2_session(%ld): input gone", session->id);
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
}
else {
ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
"h2_session(%ld): idle(1 sec timeout) "
"read failed", session->id);
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, "error");
}
}
break;
case H2_SESSION_ST_BUSY:
if (nghttp2_session_want_read(session->ngh2)) {
ap_update_child_status(session->c->sbh, SERVER_BUSY_READ, NULL);
h2_filter_cin_timeout_set(session->cin, session->s->timeout);
status = h2_session_read(session, 0);
if (status == APR_SUCCESS) {
session->have_read = 1;
dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
}
else if (status == APR_EAGAIN) {
/* nothing to read */
}
else if (APR_STATUS_IS_TIMEUP(status)) {
dispatch_event(session, H2_SESSION_EV_CONN_TIMEOUT, 0, NULL);
break;
}
else {
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
}
}
/* trigger window updates, stream resumes and submits */
status = h2_mplx_dispatch_master_events(session->mplx,
on_stream_resume,
session);
if (status != APR_SUCCESS) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, status, c,
"h2_session(%ld): dispatch error",
session->id);
dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
H2_ERR_INTERNAL_ERROR,
"dispatch error");
break;
}
if (nghttp2_session_want_write(session->ngh2)) {
ap_update_child_status(session->c->sbh, SERVER_BUSY_WRITE, NULL);
status = h2_session_send(session);
if (status != APR_SUCCESS) {
dispatch_event(session, H2_SESSION_EV_CONN_ERROR,
H2_ERR_INTERNAL_ERROR, "writing");
break;
}
}
if (session->have_read || session->have_written) {
if (session->wait_us) {
session->wait_us = 0;
}
}
else if (!nghttp2_session_want_write(session->ngh2)) {
dispatch_event(session, H2_SESSION_EV_NO_IO, 0, NULL);
}
break;
case H2_SESSION_ST_WAIT:
if (session->wait_us <= 0) {
session->wait_us = 10;
if (h2_conn_io_flush(&session->io) != APR_SUCCESS) {
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
break;
}
}
else {
/* repeating, increase timer for graceful backoff */
session->wait_us = H2MIN(session->wait_us*2, MAX_WAIT_MICROS);
}
if (trace) {
ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c,
"h2_session: wait for data, %ld micros",
(long)session->wait_us);
}
status = h2_mplx_out_trywait(session->mplx, session->wait_us,
session->iowait);
if (status == APR_SUCCESS) {
session->wait_us = 0;
dispatch_event(session, H2_SESSION_EV_DATA_READ, 0, NULL);
}
else if (APR_STATUS_IS_TIMEUP(status)) {
/* go back to checking all inputs again */
transit(session, "wait cycle", session->local.accepting?
H2_SESSION_ST_BUSY : H2_SESSION_ST_DONE);
}
else if (APR_STATUS_IS_ECONNRESET(status)
|| APR_STATUS_IS_ECONNABORTED(status)) {
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
}
else {
ap_log_cerror(APLOG_MARK, APLOG_WARNING, status, c,
APLOGNO(03404)
"h2_session(%ld): waiting on conditional",
session->id);
h2_session_shutdown(session, H2_ERR_INTERNAL_ERROR,
"cond wait error", 0);
}
break;
default:
ap_log_cerror(APLOG_MARK, APLOG_ERR, APR_EGENERAL, c,
APLOGNO(03080)
"h2_session(%ld): unknown state %d", session->id, session->state);
dispatch_event(session, H2_SESSION_EV_PROTO_ERROR, 0, NULL);
break;
}
if (!nghttp2_session_want_read(session->ngh2)
&& !nghttp2_session_want_write(session->ngh2)) {
dispatch_event(session, H2_SESSION_EV_NGH2_DONE, 0, NULL);
}
if (session->reprioritize) {
h2_mplx_reprioritize(session->mplx, stream_pri_cmp, session);
session->reprioritize = 0;
}
}
out:
if (trace) {
ap_log_cerror( APLOG_MARK, APLOG_TRACE3, status, c,
"h2_session(%ld): [%s] process returns",
session->id, state_name(session->state));
}
if ((session->state != H2_SESSION_ST_DONE)
&& (APR_STATUS_IS_EOF(status)
|| APR_STATUS_IS_ECONNRESET(status)
|| APR_STATUS_IS_ECONNABORTED(status))) {
dispatch_event(session, H2_SESSION_EV_CONN_ERROR, 0, NULL);
}
status = APR_SUCCESS;
if (session->state == H2_SESSION_ST_DONE) {
status = APR_EOF;
if (!session->eoc_written) {
session->eoc_written = 1;
h2_conn_io_write_eoc(&session->io, session);
}
}
return status;
}
apr_status_t h2_session_pre_close(h2_session *session, int async)
{
ap_log_cerror(APLOG_MARK, APLOG_TRACE1, 0, session->c,
"h2_session(%ld): pre_close", session->id);
dispatch_event(session, H2_SESSION_EV_PRE_CLOSE, 0,
(session->state == H2_SESSION_ST_IDLE)? "timeout" : NULL);
return APR_SUCCESS;
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_5366_1 |
crossvul-cpp_data_good_1713_0 | /* Copyright (C) 2009 Red Hat, Inc.
* Copyright (C) 2006 Rusty Russell IBM Corporation
*
* Author: Michael S. Tsirkin <mst@redhat.com>
*
* Inspiration, some code, and most witty comments come from
* Documentation/virtual/lguest/lguest.c, by Rusty Russell
*
* This work is licensed under the terms of the GNU GPL, version 2.
*
* Generic code for virtio server in host kernel.
*/
#include <linux/eventfd.h>
#include <linux/vhost.h>
#include <linux/uio.h>
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/kthread.h>
#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/sort.h>
#include "vhost.h"
static ushort max_mem_regions = 64;
module_param(max_mem_regions, ushort, 0444);
MODULE_PARM_DESC(max_mem_regions,
"Maximum number of memory regions in memory map. (default: 64)");
enum {
VHOST_MEMORY_F_LOG = 0x1,
};
#define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num])
#define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num])
#ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq)
{
vq->user_be = !virtio_legacy_is_little_endian();
}
static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
{
struct vhost_vring_state s;
if (vq->private_data)
return -EBUSY;
if (copy_from_user(&s, argp, sizeof(s)))
return -EFAULT;
if (s.num != VHOST_VRING_LITTLE_ENDIAN &&
s.num != VHOST_VRING_BIG_ENDIAN)
return -EINVAL;
vq->user_be = s.num;
return 0;
}
static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
int __user *argp)
{
struct vhost_vring_state s = {
.index = idx,
.num = vq->user_be
};
if (copy_to_user(argp, &s, sizeof(s)))
return -EFAULT;
return 0;
}
static void vhost_init_is_le(struct vhost_virtqueue *vq)
{
/* Note for legacy virtio: user_be is initialized at reset time
* according to the host endianness. If userspace does not set an
* explicit endianness, the default behavior is native endian, as
* expected by legacy virtio.
*/
vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1) || !vq->user_be;
}
#else
static void vhost_vq_reset_user_be(struct vhost_virtqueue *vq)
{
}
static long vhost_set_vring_endian(struct vhost_virtqueue *vq, int __user *argp)
{
return -ENOIOCTLCMD;
}
static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
int __user *argp)
{
return -ENOIOCTLCMD;
}
static void vhost_init_is_le(struct vhost_virtqueue *vq)
{
if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
vq->is_le = true;
}
#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
poll_table *pt)
{
struct vhost_poll *poll;
poll = container_of(pt, struct vhost_poll, table);
poll->wqh = wqh;
add_wait_queue(wqh, &poll->wait);
}
static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
void *key)
{
struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
if (!((unsigned long)key & poll->mask))
return 0;
vhost_poll_queue(poll);
return 0;
}
void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
{
INIT_LIST_HEAD(&work->node);
work->fn = fn;
init_waitqueue_head(&work->done);
work->flushing = 0;
work->queue_seq = work->done_seq = 0;
}
EXPORT_SYMBOL_GPL(vhost_work_init);
/* Init poll structure */
void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
unsigned long mask, struct vhost_dev *dev)
{
init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
init_poll_funcptr(&poll->table, vhost_poll_func);
poll->mask = mask;
poll->dev = dev;
poll->wqh = NULL;
vhost_work_init(&poll->work, fn);
}
EXPORT_SYMBOL_GPL(vhost_poll_init);
/* Start polling a file. We add ourselves to file's wait queue. The caller must
* keep a reference to a file until after vhost_poll_stop is called. */
int vhost_poll_start(struct vhost_poll *poll, struct file *file)
{
unsigned long mask;
int ret = 0;
if (poll->wqh)
return 0;
mask = file->f_op->poll(file, &poll->table);
if (mask)
vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
if (mask & POLLERR) {
if (poll->wqh)
remove_wait_queue(poll->wqh, &poll->wait);
ret = -EINVAL;
}
return ret;
}
EXPORT_SYMBOL_GPL(vhost_poll_start);
/* Stop polling a file. After this function returns, it becomes safe to drop the
* file reference. You must also flush afterwards. */
void vhost_poll_stop(struct vhost_poll *poll)
{
if (poll->wqh) {
remove_wait_queue(poll->wqh, &poll->wait);
poll->wqh = NULL;
}
}
EXPORT_SYMBOL_GPL(vhost_poll_stop);
static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
unsigned seq)
{
int left;
spin_lock_irq(&dev->work_lock);
left = seq - work->done_seq;
spin_unlock_irq(&dev->work_lock);
return left <= 0;
}
void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
{
unsigned seq;
int flushing;
spin_lock_irq(&dev->work_lock);
seq = work->queue_seq;
work->flushing++;
spin_unlock_irq(&dev->work_lock);
wait_event(work->done, vhost_work_seq_done(dev, work, seq));
spin_lock_irq(&dev->work_lock);
flushing = --work->flushing;
spin_unlock_irq(&dev->work_lock);
BUG_ON(flushing < 0);
}
EXPORT_SYMBOL_GPL(vhost_work_flush);
/* Flush any work that has been scheduled. When calling this, don't hold any
* locks that are also used by the callback. */
void vhost_poll_flush(struct vhost_poll *poll)
{
vhost_work_flush(poll->dev, &poll->work);
}
EXPORT_SYMBOL_GPL(vhost_poll_flush);
void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
{
unsigned long flags;
spin_lock_irqsave(&dev->work_lock, flags);
if (list_empty(&work->node)) {
list_add_tail(&work->node, &dev->work_list);
work->queue_seq++;
spin_unlock_irqrestore(&dev->work_lock, flags);
wake_up_process(dev->worker);
} else {
spin_unlock_irqrestore(&dev->work_lock, flags);
}
}
EXPORT_SYMBOL_GPL(vhost_work_queue);
void vhost_poll_queue(struct vhost_poll *poll)
{
vhost_work_queue(poll->dev, &poll->work);
}
EXPORT_SYMBOL_GPL(vhost_poll_queue);
static void vhost_vq_reset(struct vhost_dev *dev,
struct vhost_virtqueue *vq)
{
vq->num = 1;
vq->desc = NULL;
vq->avail = NULL;
vq->used = NULL;
vq->last_avail_idx = 0;
vq->avail_idx = 0;
vq->last_used_idx = 0;
vq->signalled_used = 0;
vq->signalled_used_valid = false;
vq->used_flags = 0;
vq->log_used = false;
vq->log_addr = -1ull;
vq->private_data = NULL;
vq->acked_features = 0;
vq->log_base = NULL;
vq->error_ctx = NULL;
vq->error = NULL;
vq->kick = NULL;
vq->call_ctx = NULL;
vq->call = NULL;
vq->log_ctx = NULL;
vq->memory = NULL;
vq->is_le = virtio_legacy_is_little_endian();
vhost_vq_reset_user_be(vq);
}
static int vhost_worker(void *data)
{
struct vhost_dev *dev = data;
struct vhost_work *work = NULL;
unsigned uninitialized_var(seq);
mm_segment_t oldfs = get_fs();
set_fs(USER_DS);
use_mm(dev->mm);
for (;;) {
/* mb paired w/ kthread_stop */
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irq(&dev->work_lock);
if (work) {
work->done_seq = seq;
if (work->flushing)
wake_up_all(&work->done);
}
if (kthread_should_stop()) {
spin_unlock_irq(&dev->work_lock);
__set_current_state(TASK_RUNNING);
break;
}
if (!list_empty(&dev->work_list)) {
work = list_first_entry(&dev->work_list,
struct vhost_work, node);
list_del_init(&work->node);
seq = work->queue_seq;
} else
work = NULL;
spin_unlock_irq(&dev->work_lock);
if (work) {
__set_current_state(TASK_RUNNING);
work->fn(work);
if (need_resched())
schedule();
} else
schedule();
}
unuse_mm(dev->mm);
set_fs(oldfs);
return 0;
}
static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
{
kfree(vq->indirect);
vq->indirect = NULL;
kfree(vq->log);
vq->log = NULL;
kfree(vq->heads);
vq->heads = NULL;
}
/* Helper to allocate iovec buffers for all vqs. */
static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
{
struct vhost_virtqueue *vq;
int i;
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV,
GFP_KERNEL);
vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL);
vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL);
if (!vq->indirect || !vq->log || !vq->heads)
goto err_nomem;
}
return 0;
err_nomem:
for (; i >= 0; --i)
vhost_vq_free_iovecs(dev->vqs[i]);
return -ENOMEM;
}
static void vhost_dev_free_iovecs(struct vhost_dev *dev)
{
int i;
for (i = 0; i < dev->nvqs; ++i)
vhost_vq_free_iovecs(dev->vqs[i]);
}
void vhost_dev_init(struct vhost_dev *dev,
struct vhost_virtqueue **vqs, int nvqs)
{
struct vhost_virtqueue *vq;
int i;
dev->vqs = vqs;
dev->nvqs = nvqs;
mutex_init(&dev->mutex);
dev->log_ctx = NULL;
dev->log_file = NULL;
dev->memory = NULL;
dev->mm = NULL;
spin_lock_init(&dev->work_lock);
INIT_LIST_HEAD(&dev->work_list);
dev->worker = NULL;
for (i = 0; i < dev->nvqs; ++i) {
vq = dev->vqs[i];
vq->log = NULL;
vq->indirect = NULL;
vq->heads = NULL;
vq->dev = dev;
mutex_init(&vq->mutex);
vhost_vq_reset(dev, vq);
if (vq->handle_kick)
vhost_poll_init(&vq->poll, vq->handle_kick,
POLLIN, dev);
}
}
EXPORT_SYMBOL_GPL(vhost_dev_init);
/* Caller should have device mutex */
long vhost_dev_check_owner(struct vhost_dev *dev)
{
/* Are you the owner? If not, I don't think you mean to do that */
return dev->mm == current->mm ? 0 : -EPERM;
}
EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
struct vhost_attach_cgroups_struct {
struct vhost_work work;
struct task_struct *owner;
int ret;
};
static void vhost_attach_cgroups_work(struct vhost_work *work)
{
struct vhost_attach_cgroups_struct *s;
s = container_of(work, struct vhost_attach_cgroups_struct, work);
s->ret = cgroup_attach_task_all(s->owner, current);
}
static int vhost_attach_cgroups(struct vhost_dev *dev)
{
struct vhost_attach_cgroups_struct attach;
attach.owner = current;
vhost_work_init(&attach.work, vhost_attach_cgroups_work);
vhost_work_queue(dev, &attach.work);
vhost_work_flush(dev, &attach.work);
return attach.ret;
}
/* Caller should have device mutex */
bool vhost_dev_has_owner(struct vhost_dev *dev)
{
return dev->mm;
}
EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
/* Caller should have device mutex */
long vhost_dev_set_owner(struct vhost_dev *dev)
{
struct task_struct *worker;
int err;
/* Is there an owner already? */
if (vhost_dev_has_owner(dev)) {
err = -EBUSY;
goto err_mm;
}
/* No owner, become one */
dev->mm = get_task_mm(current);
worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
if (IS_ERR(worker)) {
err = PTR_ERR(worker);
goto err_worker;
}
dev->worker = worker;
wake_up_process(worker); /* avoid contributing to loadavg */
err = vhost_attach_cgroups(dev);
if (err)
goto err_cgroup;
err = vhost_dev_alloc_iovecs(dev);
if (err)
goto err_cgroup;
return 0;
err_cgroup:
kthread_stop(worker);
dev->worker = NULL;
err_worker:
if (dev->mm)
mmput(dev->mm);
dev->mm = NULL;
err_mm:
return err;
}
EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
struct vhost_memory *vhost_dev_reset_owner_prepare(void)
{
return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
/* Caller should have device mutex */
void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
{
int i;
vhost_dev_cleanup(dev, true);
/* Restore memory to default empty mapping. */
memory->nregions = 0;
dev->memory = memory;
/* We don't need VQ locks below since vhost_dev_cleanup makes sure
* VQs aren't running.
*/
for (i = 0; i < dev->nvqs; ++i)
dev->vqs[i]->memory = memory;
}
EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
void vhost_dev_stop(struct vhost_dev *dev)
{
int i;
for (i = 0; i < dev->nvqs; ++i) {
if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
vhost_poll_stop(&dev->vqs[i]->poll);
vhost_poll_flush(&dev->vqs[i]->poll);
}
}
}
EXPORT_SYMBOL_GPL(vhost_dev_stop);
/* Caller should have device mutex if and only if locked is set */
void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
{
int i;
for (i = 0; i < dev->nvqs; ++i) {
if (dev->vqs[i]->error_ctx)
eventfd_ctx_put(dev->vqs[i]->error_ctx);
if (dev->vqs[i]->error)
fput(dev->vqs[i]->error);
if (dev->vqs[i]->kick)
fput(dev->vqs[i]->kick);
if (dev->vqs[i]->call_ctx)
eventfd_ctx_put(dev->vqs[i]->call_ctx);
if (dev->vqs[i]->call)
fput(dev->vqs[i]->call);
vhost_vq_reset(dev, dev->vqs[i]);
}
vhost_dev_free_iovecs(dev);
if (dev->log_ctx)
eventfd_ctx_put(dev->log_ctx);
dev->log_ctx = NULL;
if (dev->log_file)
fput(dev->log_file);
dev->log_file = NULL;
/* No one will access memory at this point */
kvfree(dev->memory);
dev->memory = NULL;
WARN_ON(!list_empty(&dev->work_list));
if (dev->worker) {
kthread_stop(dev->worker);
dev->worker = NULL;
}
if (dev->mm)
mmput(dev->mm);
dev->mm = NULL;
}
EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
{
u64 a = addr / VHOST_PAGE_SIZE / 8;
/* Make sure 64 bit math will not overflow. */
if (a > ULONG_MAX - (unsigned long)log_base ||
a + (unsigned long)log_base > ULONG_MAX)
return 0;
return access_ok(VERIFY_WRITE, log_base + a,
(sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
}
/* Caller should have vq mutex and device mutex. */
static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
int log_all)
{
int i;
if (!mem)
return 0;
for (i = 0; i < mem->nregions; ++i) {
struct vhost_memory_region *m = mem->regions + i;
unsigned long a = m->userspace_addr;
if (m->memory_size > ULONG_MAX)
return 0;
else if (!access_ok(VERIFY_WRITE, (void __user *)a,
m->memory_size))
return 0;
else if (log_all && !log_access_ok(log_base,
m->guest_phys_addr,
m->memory_size))
return 0;
}
return 1;
}
/* Can we switch to this memory table? */
/* Caller should have device mutex but not vq mutex */
static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
int log_all)
{
int i;
for (i = 0; i < d->nvqs; ++i) {
int ok;
bool log;
mutex_lock(&d->vqs[i]->mutex);
log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
/* If ring is inactive, will check when it's enabled. */
if (d->vqs[i]->private_data)
ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, log);
else
ok = 1;
mutex_unlock(&d->vqs[i]->mutex);
if (!ok)
return 0;
}
return 1;
}
static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
struct vring_desc __user *desc,
struct vring_avail __user *avail,
struct vring_used __user *used)
{
size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
access_ok(VERIFY_READ, avail,
sizeof *avail + num * sizeof *avail->ring + s) &&
access_ok(VERIFY_WRITE, used,
sizeof *used + num * sizeof *used->ring + s);
}
/* Can we log writes? */
/* Caller should have device mutex but not vq mutex */
int vhost_log_access_ok(struct vhost_dev *dev)
{
return memory_access_ok(dev, dev->memory, 1);
}
EXPORT_SYMBOL_GPL(vhost_log_access_ok);
/* Verify access for write logging. */
/* Caller should have vq mutex and device mutex */
static int vq_log_access_ok(struct vhost_virtqueue *vq,
void __user *log_base)
{
size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
return vq_memory_access_ok(log_base, vq->memory,
vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
(!vq->log_used || log_access_ok(log_base, vq->log_addr,
sizeof *vq->used +
vq->num * sizeof *vq->used->ring + s));
}
/* Can we start vq? */
/* Caller should have vq mutex and device mutex */
int vhost_vq_access_ok(struct vhost_virtqueue *vq)
{
return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
vq_log_access_ok(vq, vq->log_base);
}
EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
{
const struct vhost_memory_region *r1 = p1, *r2 = p2;
if (r1->guest_phys_addr < r2->guest_phys_addr)
return 1;
if (r1->guest_phys_addr > r2->guest_phys_addr)
return -1;
return 0;
}
static void *vhost_kvzalloc(unsigned long size)
{
void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
if (!n) {
n = vzalloc(size);
if (!n)
return ERR_PTR(-ENOMEM);
}
return n;
}
static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
{
struct vhost_memory mem, *newmem, *oldmem;
unsigned long size = offsetof(struct vhost_memory, regions);
int i;
if (copy_from_user(&mem, m, size))
return -EFAULT;
if (mem.padding)
return -EOPNOTSUPP;
if (mem.nregions > max_mem_regions)
return -E2BIG;
newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
if (!newmem)
return -ENOMEM;
memcpy(newmem, &mem, size);
if (copy_from_user(newmem->regions, m->regions,
mem.nregions * sizeof *m->regions)) {
kvfree(newmem);
return -EFAULT;
}
sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
vhost_memory_reg_sort_cmp, NULL);
if (!memory_access_ok(d, newmem, 0)) {
kvfree(newmem);
return -EFAULT;
}
oldmem = d->memory;
d->memory = newmem;
/* All memory accesses are done under some VQ mutex. */
for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i]->mutex);
d->vqs[i]->memory = newmem;
mutex_unlock(&d->vqs[i]->mutex);
}
kvfree(oldmem);
return 0;
}
long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
{
struct file *eventfp, *filep = NULL;
bool pollstart = false, pollstop = false;
struct eventfd_ctx *ctx = NULL;
u32 __user *idxp = argp;
struct vhost_virtqueue *vq;
struct vhost_vring_state s;
struct vhost_vring_file f;
struct vhost_vring_addr a;
u32 idx;
long r;
r = get_user(idx, idxp);
if (r < 0)
return r;
if (idx >= d->nvqs)
return -ENOBUFS;
vq = d->vqs[idx];
mutex_lock(&vq->mutex);
switch (ioctl) {
case VHOST_SET_VRING_NUM:
/* Resizing ring with an active backend?
* You don't want to do that. */
if (vq->private_data) {
r = -EBUSY;
break;
}
if (copy_from_user(&s, argp, sizeof s)) {
r = -EFAULT;
break;
}
if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
r = -EINVAL;
break;
}
vq->num = s.num;
break;
case VHOST_SET_VRING_BASE:
/* Moving base with an active backend?
* You don't want to do that. */
if (vq->private_data) {
r = -EBUSY;
break;
}
if (copy_from_user(&s, argp, sizeof s)) {
r = -EFAULT;
break;
}
if (s.num > 0xffff) {
r = -EINVAL;
break;
}
vq->last_avail_idx = s.num;
/* Forget the cached index value. */
vq->avail_idx = vq->last_avail_idx;
break;
case VHOST_GET_VRING_BASE:
s.index = idx;
s.num = vq->last_avail_idx;
if (copy_to_user(argp, &s, sizeof s))
r = -EFAULT;
break;
case VHOST_SET_VRING_ADDR:
if (copy_from_user(&a, argp, sizeof a)) {
r = -EFAULT;
break;
}
if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
r = -EOPNOTSUPP;
break;
}
/* For 32bit, verify that the top 32bits of the user
data are set to zero. */
if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
(u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
(u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
r = -EFAULT;
break;
}
/* Make sure it's safe to cast pointers to vring types. */
BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
(a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
(a.log_guest_addr & (sizeof(u64) - 1))) {
r = -EINVAL;
break;
}
/* We only verify access here if backend is configured.
* If it is not, we don't as size might not have been setup.
* We will verify when backend is configured. */
if (vq->private_data) {
if (!vq_access_ok(vq, vq->num,
(void __user *)(unsigned long)a.desc_user_addr,
(void __user *)(unsigned long)a.avail_user_addr,
(void __user *)(unsigned long)a.used_user_addr)) {
r = -EINVAL;
break;
}
/* Also validate log access for used ring if enabled. */
if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
!log_access_ok(vq->log_base, a.log_guest_addr,
sizeof *vq->used +
vq->num * sizeof *vq->used->ring)) {
r = -EINVAL;
break;
}
}
vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
vq->log_addr = a.log_guest_addr;
vq->used = (void __user *)(unsigned long)a.used_user_addr;
break;
case VHOST_SET_VRING_KICK:
if (copy_from_user(&f, argp, sizeof f)) {
r = -EFAULT;
break;
}
eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
if (IS_ERR(eventfp)) {
r = PTR_ERR(eventfp);
break;
}
if (eventfp != vq->kick) {
pollstop = (filep = vq->kick) != NULL;
pollstart = (vq->kick = eventfp) != NULL;
} else
filep = eventfp;
break;
case VHOST_SET_VRING_CALL:
if (copy_from_user(&f, argp, sizeof f)) {
r = -EFAULT;
break;
}
eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
if (IS_ERR(eventfp)) {
r = PTR_ERR(eventfp);
break;
}
if (eventfp != vq->call) {
filep = vq->call;
ctx = vq->call_ctx;
vq->call = eventfp;
vq->call_ctx = eventfp ?
eventfd_ctx_fileget(eventfp) : NULL;
} else
filep = eventfp;
break;
case VHOST_SET_VRING_ERR:
if (copy_from_user(&f, argp, sizeof f)) {
r = -EFAULT;
break;
}
eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
if (IS_ERR(eventfp)) {
r = PTR_ERR(eventfp);
break;
}
if (eventfp != vq->error) {
filep = vq->error;
vq->error = eventfp;
ctx = vq->error_ctx;
vq->error_ctx = eventfp ?
eventfd_ctx_fileget(eventfp) : NULL;
} else
filep = eventfp;
break;
case VHOST_SET_VRING_ENDIAN:
r = vhost_set_vring_endian(vq, argp);
break;
case VHOST_GET_VRING_ENDIAN:
r = vhost_get_vring_endian(vq, idx, argp);
break;
default:
r = -ENOIOCTLCMD;
}
if (pollstop && vq->handle_kick)
vhost_poll_stop(&vq->poll);
if (ctx)
eventfd_ctx_put(ctx);
if (filep)
fput(filep);
if (pollstart && vq->handle_kick)
r = vhost_poll_start(&vq->poll, vq->kick);
mutex_unlock(&vq->mutex);
if (pollstop && vq->handle_kick)
vhost_poll_flush(&vq->poll);
return r;
}
EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
/* Caller must have device mutex */
long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
{
struct file *eventfp, *filep = NULL;
struct eventfd_ctx *ctx = NULL;
u64 p;
long r;
int i, fd;
/* If you are not the owner, you can become one */
if (ioctl == VHOST_SET_OWNER) {
r = vhost_dev_set_owner(d);
goto done;
}
/* You must be the owner to do anything else */
r = vhost_dev_check_owner(d);
if (r)
goto done;
switch (ioctl) {
case VHOST_SET_MEM_TABLE:
r = vhost_set_memory(d, argp);
break;
case VHOST_SET_LOG_BASE:
if (copy_from_user(&p, argp, sizeof p)) {
r = -EFAULT;
break;
}
if ((u64)(unsigned long)p != p) {
r = -EFAULT;
break;
}
for (i = 0; i < d->nvqs; ++i) {
struct vhost_virtqueue *vq;
void __user *base = (void __user *)(unsigned long)p;
vq = d->vqs[i];
mutex_lock(&vq->mutex);
/* If ring is inactive, will check when it's enabled. */
if (vq->private_data && !vq_log_access_ok(vq, base))
r = -EFAULT;
else
vq->log_base = base;
mutex_unlock(&vq->mutex);
}
break;
case VHOST_SET_LOG_FD:
r = get_user(fd, (int __user *)argp);
if (r < 0)
break;
eventfp = fd == -1 ? NULL : eventfd_fget(fd);
if (IS_ERR(eventfp)) {
r = PTR_ERR(eventfp);
break;
}
if (eventfp != d->log_file) {
filep = d->log_file;
d->log_file = eventfp;
ctx = d->log_ctx;
d->log_ctx = eventfp ?
eventfd_ctx_fileget(eventfp) : NULL;
} else
filep = eventfp;
for (i = 0; i < d->nvqs; ++i) {
mutex_lock(&d->vqs[i]->mutex);
d->vqs[i]->log_ctx = d->log_ctx;
mutex_unlock(&d->vqs[i]->mutex);
}
if (ctx)
eventfd_ctx_put(ctx);
if (filep)
fput(filep);
break;
default:
r = -ENOIOCTLCMD;
break;
}
done:
return r;
}
EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
__u64 addr, __u32 len)
{
const struct vhost_memory_region *reg;
int start = 0, end = mem->nregions;
while (start < end) {
int slot = start + (end - start) / 2;
reg = mem->regions + slot;
if (addr >= reg->guest_phys_addr)
end = slot;
else
start = slot + 1;
}
reg = mem->regions + start;
if (addr >= reg->guest_phys_addr &&
reg->guest_phys_addr + reg->memory_size > addr)
return reg;
return NULL;
}
/* TODO: This is really inefficient. We need something like get_user()
* (instruction directly accesses the data, with an exception table entry
* returning -EFAULT). See Documentation/x86/exception-tables.txt.
*/
static int set_bit_to_user(int nr, void __user *addr)
{
unsigned long log = (unsigned long)addr;
struct page *page;
void *base;
int bit = nr + (log % PAGE_SIZE) * 8;
int r;
r = get_user_pages_fast(log, 1, 1, &page);
if (r < 0)
return r;
BUG_ON(r != 1);
base = kmap_atomic(page);
set_bit(bit, base);
kunmap_atomic(base);
set_page_dirty_lock(page);
put_page(page);
return 0;
}
static int log_write(void __user *log_base,
u64 write_address, u64 write_length)
{
u64 write_page = write_address / VHOST_PAGE_SIZE;
int r;
if (!write_length)
return 0;
write_length += write_address % VHOST_PAGE_SIZE;
for (;;) {
u64 base = (u64)(unsigned long)log_base;
u64 log = base + write_page / 8;
int bit = write_page % 8;
if ((u64)(unsigned long)log != log)
return -EFAULT;
r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
if (r < 0)
return r;
if (write_length <= VHOST_PAGE_SIZE)
break;
write_length -= VHOST_PAGE_SIZE;
write_page += 1;
}
return r;
}
int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
unsigned int log_num, u64 len)
{
int i, r;
/* Make sure data written is seen before log. */
smp_wmb();
for (i = 0; i < log_num; ++i) {
u64 l = min(log[i].len, len);
r = log_write(vq->log_base, log[i].addr, l);
if (r < 0)
return r;
len -= l;
if (!len) {
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
return 0;
}
}
/* Length written exceeds what we have stored. This is a bug. */
BUG();
return 0;
}
EXPORT_SYMBOL_GPL(vhost_log_write);
static int vhost_update_used_flags(struct vhost_virtqueue *vq)
{
void __user *used;
if (__put_user(cpu_to_vhost16(vq, vq->used_flags), &vq->used->flags) < 0)
return -EFAULT;
if (unlikely(vq->log_used)) {
/* Make sure the flag is seen before log. */
smp_wmb();
/* Log used flag write. */
used = &vq->used->flags;
log_write(vq->log_base, vq->log_addr +
(used - (void __user *)vq->used),
sizeof vq->used->flags);
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
return 0;
}
static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
{
if (__put_user(cpu_to_vhost16(vq, vq->avail_idx), vhost_avail_event(vq)))
return -EFAULT;
if (unlikely(vq->log_used)) {
void __user *used;
/* Make sure the event is seen before log. */
smp_wmb();
/* Log avail event write */
used = vhost_avail_event(vq);
log_write(vq->log_base, vq->log_addr +
(used - (void __user *)vq->used),
sizeof *vhost_avail_event(vq));
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
return 0;
}
int vhost_init_used(struct vhost_virtqueue *vq)
{
__virtio16 last_used_idx;
int r;
if (!vq->private_data) {
vq->is_le = virtio_legacy_is_little_endian();
return 0;
}
vhost_init_is_le(vq);
r = vhost_update_used_flags(vq);
if (r)
return r;
vq->signalled_used_valid = false;
if (!access_ok(VERIFY_READ, &vq->used->idx, sizeof vq->used->idx))
return -EFAULT;
r = __get_user(last_used_idx, &vq->used->idx);
if (r)
return r;
vq->last_used_idx = vhost16_to_cpu(vq, last_used_idx);
return 0;
}
EXPORT_SYMBOL_GPL(vhost_init_used);
static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
struct iovec iov[], int iov_size)
{
const struct vhost_memory_region *reg;
struct vhost_memory *mem;
struct iovec *_iov;
u64 s = 0;
int ret = 0;
mem = vq->memory;
while ((u64)len > s) {
u64 size;
if (unlikely(ret >= iov_size)) {
ret = -ENOBUFS;
break;
}
reg = find_region(mem, addr, len);
if (unlikely(!reg)) {
ret = -EFAULT;
break;
}
_iov = iov + ret;
size = reg->memory_size - addr + reg->guest_phys_addr;
_iov->iov_len = min((u64)len - s, size);
_iov->iov_base = (void __user *)(unsigned long)
(reg->userspace_addr + addr - reg->guest_phys_addr);
s += size;
addr += size;
++ret;
}
return ret;
}
/* Each buffer in the virtqueues is actually a chain of descriptors. This
* function returns the next descriptor in the chain,
* or -1U if we're at the end. */
static unsigned next_desc(struct vhost_virtqueue *vq, struct vring_desc *desc)
{
unsigned int next;
/* If this descriptor says it doesn't chain, we're done. */
if (!(desc->flags & cpu_to_vhost16(vq, VRING_DESC_F_NEXT)))
return -1U;
/* Check they're not leading us off end of descriptors. */
next = vhost16_to_cpu(vq, desc->next);
/* Make sure compiler knows to grab that: we don't want it changing! */
/* We will use the result as an index in an array, so most
* architectures only need a compiler barrier here. */
read_barrier_depends();
return next;
}
static int get_indirect(struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num,
struct vring_desc *indirect)
{
struct vring_desc desc;
unsigned int i = 0, count, found = 0;
u32 len = vhost32_to_cpu(vq, indirect->len);
struct iov_iter from;
int ret;
/* Sanity check */
if (unlikely(len % sizeof desc)) {
vq_err(vq, "Invalid length in indirect descriptor: "
"len 0x%llx not multiple of 0x%zx\n",
(unsigned long long)len,
sizeof desc);
return -EINVAL;
}
ret = translate_desc(vq, vhost64_to_cpu(vq, indirect->addr), len, vq->indirect,
UIO_MAXIOV);
if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d in indirect.\n", ret);
return ret;
}
iov_iter_init(&from, READ, vq->indirect, ret, len);
/* We will use the result as an address to read from, so most
* architectures only need a compiler barrier here. */
read_barrier_depends();
count = len / sizeof desc;
/* Buffers are chained via a 16 bit next field, so
* we can have at most 2^16 of these. */
if (unlikely(count > USHRT_MAX + 1)) {
vq_err(vq, "Indirect buffer length too big: %d\n",
indirect->len);
return -E2BIG;
}
do {
unsigned iov_count = *in_num + *out_num;
if (unlikely(++found > count)) {
vq_err(vq, "Loop detected: last one at %u "
"indirect size %u\n",
i, count);
return -EINVAL;
}
if (unlikely(copy_from_iter(&desc, sizeof(desc), &from) !=
sizeof(desc))) {
vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
return -EINVAL;
}
if (unlikely(desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT))) {
vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
i, (size_t)vhost64_to_cpu(vq, indirect->addr) + i * sizeof desc);
return -EINVAL;
}
ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
vhost32_to_cpu(vq, desc.len), iov + iov_count,
iov_size - iov_count);
if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d indirect idx %d\n",
ret, i);
return ret;
}
/* If this is an input descriptor, increment that count. */
if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
*in_num += ret;
if (unlikely(log)) {
log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
}
} else {
/* If it's an output descriptor, they're all supposed
* to come before any input descriptors. */
if (unlikely(*in_num)) {
vq_err(vq, "Indirect descriptor "
"has out after in: idx %d\n", i);
return -EINVAL;
}
*out_num += ret;
}
} while ((i = next_desc(vq, &desc)) != -1);
return 0;
}
/* This looks in the virtqueue and for the first available buffer, and converts
* it to an iovec for convenient access. Since descriptors consist of some
* number of output then some number of input descriptors, it's actually two
* iovecs, but we pack them into one and note how many of each there were.
*
* This function returns the descriptor number found, or vq->num (which is
* never a valid descriptor number) if none was found. A negative code is
* returned on error. */
int vhost_get_vq_desc(struct vhost_virtqueue *vq,
struct iovec iov[], unsigned int iov_size,
unsigned int *out_num, unsigned int *in_num,
struct vhost_log *log, unsigned int *log_num)
{
struct vring_desc desc;
unsigned int i, head, found = 0;
u16 last_avail_idx;
__virtio16 avail_idx;
__virtio16 ring_head;
int ret;
/* Check it isn't doing very strange things with descriptor numbers. */
last_avail_idx = vq->last_avail_idx;
if (unlikely(__get_user(avail_idx, &vq->avail->idx))) {
vq_err(vq, "Failed to access avail idx at %p\n",
&vq->avail->idx);
return -EFAULT;
}
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
vq_err(vq, "Guest moved used index from %u to %u",
last_avail_idx, vq->avail_idx);
return -EFAULT;
}
/* If there's nothing new since last we looked, return invalid. */
if (vq->avail_idx == last_avail_idx)
return vq->num;
/* Only get avail ring entries after they have been exposed by guest. */
smp_rmb();
/* Grab the next descriptor number they're advertising, and increment
* the index we've seen. */
if (unlikely(__get_user(ring_head,
&vq->avail->ring[last_avail_idx % vq->num]))) {
vq_err(vq, "Failed to read head: idx %d address %p\n",
last_avail_idx,
&vq->avail->ring[last_avail_idx % vq->num]);
return -EFAULT;
}
head = vhost16_to_cpu(vq, ring_head);
/* If their number is silly, that's an error. */
if (unlikely(head >= vq->num)) {
vq_err(vq, "Guest says index %u > %u is available",
head, vq->num);
return -EINVAL;
}
/* When we start there are none of either input nor output. */
*out_num = *in_num = 0;
if (unlikely(log))
*log_num = 0;
i = head;
do {
unsigned iov_count = *in_num + *out_num;
if (unlikely(i >= vq->num)) {
vq_err(vq, "Desc index is %u > %u, head = %u",
i, vq->num, head);
return -EINVAL;
}
if (unlikely(++found > vq->num)) {
vq_err(vq, "Loop detected: last one at %u "
"vq size %u head %u\n",
i, vq->num, head);
return -EINVAL;
}
ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
if (unlikely(ret)) {
vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
i, vq->desc + i);
return -EFAULT;
}
if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_INDIRECT)) {
ret = get_indirect(vq, iov, iov_size,
out_num, in_num,
log, log_num, &desc);
if (unlikely(ret < 0)) {
vq_err(vq, "Failure detected "
"in indirect descriptor at idx %d\n", i);
return ret;
}
continue;
}
ret = translate_desc(vq, vhost64_to_cpu(vq, desc.addr),
vhost32_to_cpu(vq, desc.len), iov + iov_count,
iov_size - iov_count);
if (unlikely(ret < 0)) {
vq_err(vq, "Translation failure %d descriptor idx %d\n",
ret, i);
return ret;
}
if (desc.flags & cpu_to_vhost16(vq, VRING_DESC_F_WRITE)) {
/* If this is an input descriptor,
* increment that count. */
*in_num += ret;
if (unlikely(log)) {
log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
log[*log_num].len = vhost32_to_cpu(vq, desc.len);
++*log_num;
}
} else {
/* If it's an output descriptor, they're all supposed
* to come before any input descriptors. */
if (unlikely(*in_num)) {
vq_err(vq, "Descriptor has out after in: "
"idx %d\n", i);
return -EINVAL;
}
*out_num += ret;
}
} while ((i = next_desc(vq, &desc)) != -1);
/* On success, increment avail index. */
vq->last_avail_idx++;
/* Assume notifications from guest are disabled at this point,
* if they aren't we would need to update avail_event index. */
BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
return head;
}
EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
{
vq->last_avail_idx -= n;
}
EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
/* After we've used one of their buffers, we tell them about it. We'll then
* want to notify the guest, using eventfd. */
int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
{
struct vring_used_elem heads = {
cpu_to_vhost32(vq, head),
cpu_to_vhost32(vq, len)
};
return vhost_add_used_n(vq, &heads, 1);
}
EXPORT_SYMBOL_GPL(vhost_add_used);
static int __vhost_add_used_n(struct vhost_virtqueue *vq,
struct vring_used_elem *heads,
unsigned count)
{
struct vring_used_elem __user *used;
u16 old, new;
int start;
start = vq->last_used_idx % vq->num;
used = vq->used->ring + start;
if (count == 1) {
if (__put_user(heads[0].id, &used->id)) {
vq_err(vq, "Failed to write used id");
return -EFAULT;
}
if (__put_user(heads[0].len, &used->len)) {
vq_err(vq, "Failed to write used len");
return -EFAULT;
}
} else if (__copy_to_user(used, heads, count * sizeof *used)) {
vq_err(vq, "Failed to write used");
return -EFAULT;
}
if (unlikely(vq->log_used)) {
/* Make sure data is seen before log. */
smp_wmb();
/* Log used ring entry write. */
log_write(vq->log_base,
vq->log_addr +
((void __user *)used - (void __user *)vq->used),
count * sizeof *used);
}
old = vq->last_used_idx;
new = (vq->last_used_idx += count);
/* If the driver never bothers to signal in a very long while,
* used index might wrap around. If that happens, invalidate
* signalled_used index we stored. TODO: make sure driver
* signals at least once in 2^16 and remove this. */
if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
vq->signalled_used_valid = false;
return 0;
}
/* After we've used one of their buffers, we tell them about it. We'll then
* want to notify the guest, using eventfd. */
int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
unsigned count)
{
int start, n, r;
start = vq->last_used_idx % vq->num;
n = vq->num - start;
if (n < count) {
r = __vhost_add_used_n(vq, heads, n);
if (r < 0)
return r;
heads += n;
count -= n;
}
r = __vhost_add_used_n(vq, heads, count);
/* Make sure buffer is written before we update index. */
smp_wmb();
if (__put_user(cpu_to_vhost16(vq, vq->last_used_idx), &vq->used->idx)) {
vq_err(vq, "Failed to increment used idx");
return -EFAULT;
}
if (unlikely(vq->log_used)) {
/* Log used index update. */
log_write(vq->log_base,
vq->log_addr + offsetof(struct vring_used, idx),
sizeof vq->used->idx);
if (vq->log_ctx)
eventfd_signal(vq->log_ctx, 1);
}
return r;
}
EXPORT_SYMBOL_GPL(vhost_add_used_n);
static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
__u16 old, new;
__virtio16 event;
bool v;
/* Flush out used index updates. This is paired
* with the barrier that the Guest executes when enabling
* interrupts. */
smp_mb();
if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
unlikely(vq->avail_idx == vq->last_avail_idx))
return true;
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
__virtio16 flags;
if (__get_user(flags, &vq->avail->flags)) {
vq_err(vq, "Failed to get flags");
return true;
}
return !(flags & cpu_to_vhost16(vq, VRING_AVAIL_F_NO_INTERRUPT));
}
old = vq->signalled_used;
v = vq->signalled_used_valid;
new = vq->signalled_used = vq->last_used_idx;
vq->signalled_used_valid = true;
if (unlikely(!v))
return true;
if (__get_user(event, vhost_used_event(vq))) {
vq_err(vq, "Failed to get used event idx");
return true;
}
return vring_need_event(vhost16_to_cpu(vq, event), new, old);
}
/* This actually signals the guest, using eventfd. */
void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
/* Signal the Guest tell them we used something up. */
if (vq->call_ctx && vhost_notify(dev, vq))
eventfd_signal(vq->call_ctx, 1);
}
EXPORT_SYMBOL_GPL(vhost_signal);
/* And here's the combo meal deal. Supersize me! */
void vhost_add_used_and_signal(struct vhost_dev *dev,
struct vhost_virtqueue *vq,
unsigned int head, int len)
{
vhost_add_used(vq, head, len);
vhost_signal(dev, vq);
}
EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
/* multi-buffer version of vhost_add_used_and_signal */
void vhost_add_used_and_signal_n(struct vhost_dev *dev,
struct vhost_virtqueue *vq,
struct vring_used_elem *heads, unsigned count)
{
vhost_add_used_n(vq, heads, count);
vhost_signal(dev, vq);
}
EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
/* OK, now we need to know about added descriptors. */
bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
__virtio16 avail_idx;
int r;
if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
return false;
vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
r = vhost_update_used_flags(vq);
if (r) {
vq_err(vq, "Failed to enable notification at %p: %d\n",
&vq->used->flags, r);
return false;
}
} else {
r = vhost_update_avail_event(vq, vq->avail_idx);
if (r) {
vq_err(vq, "Failed to update avail event index at %p: %d\n",
vhost_avail_event(vq), r);
return false;
}
}
/* They could have slipped one in as we were doing that: make
* sure it's written, then check again. */
smp_mb();
r = __get_user(avail_idx, &vq->avail->idx);
if (r) {
vq_err(vq, "Failed to check avail idx at %p: %d\n",
&vq->avail->idx, r);
return false;
}
return vhost16_to_cpu(vq, avail_idx) != vq->avail_idx;
}
EXPORT_SYMBOL_GPL(vhost_enable_notify);
/* We don't need to be notified again. */
void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
{
int r;
if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
return;
vq->used_flags |= VRING_USED_F_NO_NOTIFY;
if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
r = vhost_update_used_flags(vq);
if (r)
vq_err(vq, "Failed to enable notification at %p: %d\n",
&vq->used->flags, r);
}
}
EXPORT_SYMBOL_GPL(vhost_disable_notify);
static int __init vhost_init(void)
{
return 0;
}
static void __exit vhost_exit(void)
{
}
module_init(vhost_init);
module_exit(vhost_exit);
MODULE_VERSION("0.0.1");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Michael S. Tsirkin");
MODULE_DESCRIPTION("Host kernel accelerator for virtio");
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_1713_0 |
crossvul-cpp_data_good_947_0 | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO GGGGG RRRR IIIII FFFFF Y Y %
% MM MM O O G R R I F Y Y %
% M M M O O G GGG RRRR I FFF Y %
% M M O O G G R R I F Y %
% M M OOO GGGG R R IIIII F Y %
% %
% %
% MagickWand Module Methods %
% %
% Software Design %
% Cristy %
% March 2000 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Use the mogrify program to resize an image, blur, crop, despeckle, dither,
% draw on, flip, join, re-sample, and much more. This tool is similiar to
% convert except that the original image file is overwritten (unless you
% change the file suffix with the -format option) with any changes you
% request.
%
*/
/*
Include declarations.
*/
#include "wand/studio.h"
#include "wand/MagickWand.h"
#include "wand/magick-wand-private.h"
#include "wand/mogrify-private.h"
#include "magick/blob-private.h"
#include "magick/color-private.h"
#include "magick/image-private.h"
#include "magick/monitor-private.h"
#include "magick/pixel-private.h"
#include "magick/thread-private.h"
#include "magick/string-private.h"
#include "magick/timer-private.h"
#include "magick/utility-private.h"
#if defined(MAGICKCORE_HAVE_UTIME_H)
#include <utime.h>
#endif
/*
Define declarations.
*/
#define UndefinedCompressionQuality 0UL
/*
Constant declaration.
*/
static const char
MogrifyBackgroundColor[] = "#fff", /* white */
MogrifyBorderColor[] = "#dfdfdf", /* sRGB gray */
MogrifyMatteColor[] = "#bdbdbd"; /* slightly darker gray */
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M a g i c k C o m m a n d G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MagickCommandGenesis() applies image processing options to an image as
% prescribed by command line options.
%
% The format of the MagickCommandGenesis method is:
%
% MagickBooleanType MagickCommandGenesis(ImageInfo *image_info,
% MagickCommand command,int argc,char **argv,char **metadata,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o command: Choose from ConvertImageCommand, IdentifyImageCommand,
% MogrifyImageCommand, CompositeImageCommand, CompareImageCommand,
% ConjureImageCommand, StreamImageCommand, ImportImageCommand,
% DisplayImageCommand, or AnimateImageCommand.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o metadata: any metadata is returned here.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MagickCommandGenesis(ImageInfo *image_info,
MagickCommand command,int argc,char **argv,char **metadata,
ExceptionInfo *exception)
{
char
*option;
double
duration,
serial;
MagickBooleanType
concurrent,
regard_warnings,
status;
register ssize_t
i;
size_t
iterations,
number_threads;
ssize_t
n;
(void) setlocale(LC_ALL,"");
(void) setlocale(LC_NUMERIC,"C");
concurrent=MagickFalse;
duration=(-1.0);
iterations=1;
status=MagickTrue;
regard_warnings=MagickFalse;
for (i=1; i < (ssize_t) (argc-1); i++)
{
option=argv[i];
if ((strlen(option) == 1) || ((*option != '-') && (*option != '+')))
continue;
if (LocaleCompare("bench",option+1) == 0)
iterations=StringToUnsignedLong(argv[++i]);
if (LocaleCompare("concurrent",option+1) == 0)
concurrent=MagickTrue;
if (LocaleCompare("debug",option+1) == 0)
(void) SetLogEventMask(argv[++i]);
if (LocaleCompare("distribute-cache",option+1) == 0)
{
DistributePixelCacheServer(StringToInteger(argv[++i]),exception);
exit(0);
}
if (LocaleCompare("duration",option+1) == 0)
duration=StringToDouble(argv[++i],(char **) NULL);
if (LocaleCompare("regard-warnings",option+1) == 0)
regard_warnings=MagickTrue;
}
if (iterations == 1)
{
status=command(image_info,argc,argv,metadata,exception);
if (exception->severity != UndefinedException)
{
if ((exception->severity > ErrorException) ||
(regard_warnings != MagickFalse))
status=MagickFalse;
CatchException(exception);
}
if ((metadata != (char **) NULL) && (*metadata != (char *) NULL))
{
(void) fputs(*metadata,stdout);
*metadata=DestroyString(*metadata);
}
return(status);
}
number_threads=GetOpenMPMaximumThreads();
serial=0.0;
for (n=1; n <= (ssize_t) number_threads; n++)
{
double
e,
parallel,
user_time;
TimerInfo
*timer;
(void) SetMagickResourceLimit(ThreadResource,(MagickSizeType) n);
timer=AcquireTimerInfo();
if (concurrent == MagickFalse)
{
for (i=0; i < (ssize_t) iterations; i++)
{
if (status == MagickFalse)
continue;
if (duration > 0)
{
if (GetElapsedTime(timer) > duration)
continue;
(void) ContinueTimer(timer);
}
status=command(image_info,argc,argv,metadata,exception);
if (exception->severity != UndefinedException)
{
if ((exception->severity > ErrorException) ||
(regard_warnings != MagickFalse))
status=MagickFalse;
CatchException(exception);
}
if ((metadata != (char **) NULL) && (*metadata != (char *) NULL))
{
(void) fputs(*metadata,stdout);
*metadata=DestroyString(*metadata);
}
}
}
else
{
SetOpenMPNested(1);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp parallel for shared(status)
#endif
for (i=0; i < (ssize_t) iterations; i++)
{
if (status == MagickFalse)
continue;
if (duration > 0)
{
if (GetElapsedTime(timer) > duration)
continue;
(void) ContinueTimer(timer);
}
status=command(image_info,argc,argv,metadata,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
# pragma omp critical (MagickCore_MagickCommandGenesis)
#endif
{
if (exception->severity != UndefinedException)
{
if ((exception->severity > ErrorException) ||
(regard_warnings != MagickFalse))
status=MagickFalse;
CatchException(exception);
}
if ((metadata != (char **) NULL) && (*metadata != (char *) NULL))
{
(void) fputs(*metadata,stdout);
*metadata=DestroyString(*metadata);
}
}
}
}
user_time=GetUserTime(timer);
parallel=GetElapsedTime(timer);
e=1.0;
if (n == 1)
serial=parallel;
else
e=((1.0/(1.0/((serial/(serial+parallel))+(1.0-(serial/(serial+parallel)))/
(double) n)))-(1.0/(double) n))/(1.0-1.0/(double) n);
(void) FormatLocaleFile(stderr,
" Performance[%.20g]: %.20gi %0.3fips %0.6fe %0.6fu %lu:%02lu.%03lu\n",
(double) n,(double) iterations,(double) iterations/parallel,e,user_time,
(unsigned long) (parallel/60.0),(unsigned long) floor(fmod(parallel,
60.0)),(unsigned long) (1000.0*(parallel-floor(parallel))+0.5));
timer=DestroyTimerInfo(timer);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImage() applies simple single image processing options to a single
% image that may be part of a large list, but also handles any 'region'
% image handling.
%
% The image in the list may be modified in three different ways...
%
% * directly modified (EG: -negate, -gamma, -level, -annotate, -draw),
% * replaced by a new image (EG: -spread, -resize, -rotate, -morphology)
% * replace by a list of images (only the -separate option!)
%
% In each case the result is returned into the list, and a pointer to the
% modified image (last image added if replaced by a list of images) is
% returned.
%
% ASIDE: The -crop is present but restricted to non-tile single image crops
%
% This means if all the images are being processed (such as by
% MogrifyImages(), next image to be processed will be as per the pointer
% (*image)->next. Also the image list may grow as a result of some specific
% operations but as images are never merged or deleted, it will never shrink
% in length. Typically the list will remain the same length.
%
% WARNING: As the image pointed to may be replaced, the first image in the
% list may also change. GetFirstImageInList() should be used by caller if
% they wish return the Image pointer to the first image in list.
%
%
% The format of the MogrifyImage method is:
%
% MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc,
% const char **argv,Image **image)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Image *GetImageCache(const ImageInfo *image_info,const char *path,
ExceptionInfo *exception)
{
char
key[MaxTextExtent];
ExceptionInfo
*sans_exception;
Image
*image;
ImageInfo
*read_info;
/*
Read an image into a image cache if not already present. Return the image
that is in the cache under that filename.
*/
(void) FormatLocaleString(key,MaxTextExtent,"cache:%s",path);
sans_exception=AcquireExceptionInfo();
image=(Image *) GetImageRegistry(ImageRegistryType,key,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if (image != (Image *) NULL)
return(image);
read_info=CloneImageInfo(image_info);
(void) CopyMagickString(read_info->filename,path,MaxTextExtent);
image=ReadImage(read_info,exception);
read_info=DestroyImageInfo(read_info);
if (image != (Image *) NULL)
(void) SetImageRegistry(ImageRegistryType,key,image,exception);
return(image);
}
static inline MagickBooleanType IsPathWritable(const char *path)
{
if (IsPathAccessible(path) == MagickFalse)
return(MagickFalse);
if (access_utf8(path,W_OK) != 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType MonitorProgress(const char *text,
const MagickOffsetType offset,const MagickSizeType extent,
void *wand_unused(client_data))
{
char
message[MaxTextExtent],
tag[MaxTextExtent];
const char
*locale_message;
register char
*p;
wand_unreferenced(client_data);
if ((extent <= 1) || (offset < 0) || (offset >= (MagickOffsetType) extent))
return(MagickTrue);
if ((offset != (MagickOffsetType) (extent-1)) && ((offset % 50) != 0))
return(MagickTrue);
(void) CopyMagickString(tag,text,MaxTextExtent);
p=strrchr(tag,'/');
if (p != (char *) NULL)
*p='\0';
(void) FormatLocaleString(message,MaxTextExtent,"Monitor/%s",tag);
locale_message=GetLocaleMessage(message);
if (locale_message == message)
locale_message=tag;
if (p == (char *) NULL)
(void) FormatLocaleFile(stderr,"%s: %ld of %lu, %02ld%% complete\r",
locale_message,(long) offset,(unsigned long) extent,(long)
(100L*offset/(extent-1)));
else
(void) FormatLocaleFile(stderr,"%s[%s]: %ld of %lu, %02ld%% complete\r",
locale_message,p+1,(long) offset,(unsigned long) extent,(long)
(100L*offset/(extent-1)));
if (offset == (MagickOffsetType) (extent-1))
(void) FormatLocaleFile(stderr,"\n");
(void) fflush(stderr);
return(MagickTrue);
}
static Image *SparseColorOption(const Image *image,const ChannelType channel,
const SparseColorMethod method,const char *arguments,
const MagickBooleanType color_from_image,ExceptionInfo *exception)
{
ChannelType
channels;
char
token[MaxTextExtent];
const char
*p;
double
*sparse_arguments;
Image
*sparse_image;
MagickBooleanType
error;
MagickPixelPacket
color;
register size_t
x;
size_t
number_arguments,
number_colors;
/*
SparseColorOption() parses the complex -sparse-color argument into an an
array of floating point values then calls SparseColorImage(). Argument is
a complex mix of floating-point pixel coodinates, and color specifications
(or direct floating point numbers). The number of floats needed to
represent a color varies depending on the current channel setting.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/*
Limit channels according to image - and add up number of color channel.
*/
channels=channel;
if (image->colorspace != CMYKColorspace)
channels=(ChannelType) (channels & ~IndexChannel); /* no index channel */
if (image->matte == MagickFalse)
channels=(ChannelType) (channels & ~OpacityChannel); /* no alpha channel */
number_colors=0;
if ((channels & RedChannel) != 0)
number_colors++;
if ((channels & GreenChannel) != 0)
number_colors++;
if ((channels & BlueChannel) != 0)
number_colors++;
if ((channels & IndexChannel) != 0)
number_colors++;
if ((channels & OpacityChannel) != 0)
number_colors++;
/*
Read string, to determine number of arguments needed,
*/
p=arguments;
x=0;
while( *p != '\0' )
{
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == ',' ) continue;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
if ( color_from_image ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "`%s': %s", "sparse-color",
"Color arg given, when colors are coming from image");
return( (Image *) NULL);
}
x += number_colors; /* color argument */
}
else {
x++; /* floating point argument */
}
}
error=MagickTrue;
if ( color_from_image ) {
/* just the control points are being given */
error = ( x % 2 != 0 ) ? MagickTrue : MagickFalse;
number_arguments=(x/2)*(2+number_colors);
}
else {
/* control points and color values */
error = ( x % (2+number_colors) != 0 ) ? MagickTrue : MagickFalse;
number_arguments=x;
}
if ( error ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "`%s': %s", "sparse-color",
"Invalid number of Arguments");
return( (Image *) NULL);
}
/* Allocate and fill in the floating point arguments */
sparse_arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*sparse_arguments));
if (sparse_arguments == (double *) NULL) {
(void) ThrowMagickException(exception,GetMagickModule(),ResourceLimitError,
" MemoryAllocationFailed\n""%s","SparseColorOption");
return( (Image *) NULL);
}
(void) memset(sparse_arguments,0,number_arguments*
sizeof(*sparse_arguments));
p=arguments;
x=0;
while( *p != '\0' && x < number_arguments ) {
/* X coordinate */
token[0]=',';
while ( token[0] == ',' )
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "`%s': %s", "sparse-color",
"Color found, instead of X-coord");
error = MagickTrue;
break;
}
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
/* Y coordinate */
token[0]=','; while ( token[0] == ',' ) GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError, "InvalidArgument", "`%s': %s", "sparse-color",
"Color found, instead of Y-coord");
error = MagickTrue;
break;
}
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
/* color values for this control point */
#if 0
if ( (color_from_image ) {
/* get color from image */
/* HOW??? */
}
else
#endif
{
/* color name or function given in string argument */
token[0]=',';
while ( token[0] == ',' )
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' ) break;
if ( isalpha((int) token[0]) || token[0] == '#' ) {
/* Color string given */
(void) QueryMagickColor(token,&color,exception);
if ( channels & RedChannel )
sparse_arguments[x++] = QuantumScale*color.red;
if ( channels & GreenChannel )
sparse_arguments[x++] = QuantumScale*color.green;
if ( channels & BlueChannel )
sparse_arguments[x++] = QuantumScale*color.blue;
if ( channels & IndexChannel )
sparse_arguments[x++] = QuantumScale*color.index;
if ( channels & OpacityChannel )
sparse_arguments[x++] = QuantumScale*color.opacity;
}
else {
/* Colors given as a set of floating point values - experimental */
/* NB: token contains the first floating point value to use! */
if ( channels & RedChannel ) {
while ( token[0] == ',' )
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ( channels & GreenChannel ) {
while ( token[0] == ',' )
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ( channels & BlueChannel ) {
while ( token[0] == ',' )
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ( channels & IndexChannel ) {
while ( token[0] == ',' )
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
if ( channels & OpacityChannel ) {
while ( token[0] == ',' )
GetNextToken(p,&p,MaxTextExtent,token);
if ( token[0] == '\0' || isalpha((int)token[0]) || token[0] == '#' )
break;
sparse_arguments[x++]=StringToDouble(token,(char **) NULL);
token[0] = ','; /* used this token - get another */
}
}
}
}
if ( number_arguments != x && !error ) {
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
" InvalidArgument","`%s': %s","sparse-color","Argument Parsing Error");
sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments);
return( (Image *) NULL);
}
if ( error )
return( (Image *) NULL);
/* Call the Interpolation function with the parsed arguments */
sparse_image=SparseColorImage(image,channels,method,number_arguments,
sparse_arguments,exception);
sparse_arguments=(double *) RelinquishMagickMemory(sparse_arguments);
return( sparse_image );
}
WandExport MagickBooleanType MogrifyImage(ImageInfo *image_info,const int argc,
const char **argv,Image **image,ExceptionInfo *exception)
{
ChannelType
channel;
const char
*format,
*option;
DrawInfo
*draw_info;
GeometryInfo
geometry_info;
Image
*region_image;
ImageInfo
*mogrify_info;
MagickStatusType
status;
MagickPixelPacket
fill;
MagickStatusType
flags;
QuantizeInfo
*quantize_info;
RectangleInfo
geometry,
region_geometry;
register ssize_t
i;
/*
Initialize method variables.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (argc < 0)
return(MagickTrue);
mogrify_info=CloneImageInfo(image_info);
draw_info=CloneDrawInfo(mogrify_info,(DrawInfo *) NULL);
quantize_info=AcquireQuantizeInfo(mogrify_info);
SetGeometryInfo(&geometry_info);
GetMagickPixelPacket(*image,&fill);
SetMagickPixelPacket(*image,&(*image)->background_color,(IndexPacket *) NULL,
&fill);
channel=mogrify_info->channel;
format=GetImageOption(mogrify_info,"format");
SetGeometry(*image,®ion_geometry);
region_image=NewImageList();
/*
Transmogrify the image.
*/
for (i=0; i < (ssize_t) argc; i++)
{
Image
*mogrify_image;
ssize_t
count;
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=MagickMax(ParseCommandOption(MagickCommandOptions,MagickFalse,option),
0L);
if ((i+count) >= (ssize_t) argc)
break;
status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception);
mogrify_image=(Image *) NULL;
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adaptive-blur",option+1) == 0)
{
/*
Adaptive blur image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=AdaptiveBlurImageChannel(*image,channel,
geometry_info.rho,geometry_info.sigma,exception);
break;
}
if (LocaleCompare("adaptive-resize",option+1) == 0)
{
/*
Adaptive resize image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=AdaptiveResizeImage(*image,geometry.width,
geometry.height,exception);
break;
}
if (LocaleCompare("adaptive-sharpen",option+1) == 0)
{
/*
Adaptive sharpen image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=AdaptiveSharpenImageChannel(*image,channel,
geometry_info.rho,geometry_info.sigma,exception);
break;
}
if (LocaleCompare("affine",option+1) == 0)
{
/*
Affine matrix.
*/
if (*option == '+')
{
GetAffineMatrix(&draw_info->affine);
break;
}
(void) ParseAffineGeometry(argv[i+1],&draw_info->affine,exception);
break;
}
if (LocaleCompare("alpha",option+1) == 0)
{
AlphaChannelType
alpha_type;
(void) SyncImageSettings(mogrify_info,*image);
alpha_type=(AlphaChannelType) ParseCommandOption(MagickAlphaOptions,
MagickFalse,argv[i+1]);
(void) SetImageAlphaChannel(*image,alpha_type);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("annotate",option+1) == 0)
{
char
*text,
geometry[MaxTextExtent];
/*
Annotate image.
*/
(void) SyncImageSettings(mogrify_info,*image);
SetGeometryInfo(&geometry_info);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
text=InterpretImageProperties(mogrify_info,*image,argv[i+2]);
InheritException(exception,&(*image)->exception);
if (text == (char *) NULL)
break;
(void) CloneString(&draw_info->text,text);
text=DestroyString(text);
(void) FormatLocaleString(geometry,MaxTextExtent,"%+f%+f",
geometry_info.xi,geometry_info.psi);
(void) CloneString(&draw_info->geometry,geometry);
draw_info->affine.sx=cos(DegreesToRadians(
fmod(geometry_info.rho,360.0)));
draw_info->affine.rx=sin(DegreesToRadians(
fmod(geometry_info.rho,360.0)));
draw_info->affine.ry=(-sin(DegreesToRadians(
fmod(geometry_info.sigma,360.0))));
draw_info->affine.sy=cos(DegreesToRadians(
fmod(geometry_info.sigma,360.0)));
(void) AnnotateImage(*image,draw_info);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("antialias",option+1) == 0)
{
draw_info->stroke_antialias=(*option == '-') ? MagickTrue :
MagickFalse;
draw_info->text_antialias=(*option == '-') ? MagickTrue :
MagickFalse;
break;
}
if (LocaleCompare("auto-gamma",option+1) == 0)
{
/*
Auto Adjust Gamma of image based on its mean
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) AutoGammaImageChannel(*image,channel);
break;
}
if (LocaleCompare("auto-level",option+1) == 0)
{
/*
Perfectly Normalize (max/min stretch) the image
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) AutoLevelImageChannel(*image,channel);
break;
}
if (LocaleCompare("auto-orient",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=AutoOrientImage(*image,(*image)->orientation,
exception);
break;
}
break;
}
case 'b':
{
if (LocaleCompare("black-threshold",option+1) == 0)
{
/*
Black threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) BlackThresholdImageChannel(*image,channel,argv[i+1],
exception);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("blue-shift",option+1) == 0)
{
/*
Blue shift image.
*/
(void) SyncImageSettings(mogrify_info,*image);
geometry_info.rho=1.5;
if (*option == '-')
flags=ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=BlueShiftImage(*image,geometry_info.rho,exception);
break;
}
if (LocaleCompare("blur",option+1) == 0)
{
/*
Gaussian blur image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=BlurImageChannel(*image,channel,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("border",option+1) == 0)
{
/*
Surround image with a border of solid color.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=BorderImage(*image,&geometry,exception);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
if (*option == '+')
{
(void) QueryColorDatabase(MogrifyBorderColor,
&draw_info->border_color,exception);
break;
}
(void) QueryColorDatabase(argv[i+1],&draw_info->border_color,
exception);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
(void) QueryColorDatabase(argv[i+1],&draw_info->undercolor,
exception);
break;
}
if (LocaleCompare("brightness-contrast",option+1) == 0)
{
double
brightness,
contrast;
GeometryInfo
geometry_info;
MagickStatusType
flags;
/*
Brightness / contrast image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
brightness=geometry_info.rho;
contrast=0.0;
if ((flags & SigmaValue) != 0)
contrast=geometry_info.sigma;
(void) BrightnessContrastImageChannel(*image,channel,brightness,
contrast);
InheritException(exception,&(*image)->exception);
break;
}
break;
}
case 'c':
{
if (LocaleCompare("canny",option+1) == 0)
{
/*
Detect edges in the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.10;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.30;
if ((flags & PercentValue) != 0)
{
geometry_info.xi/=100.0;
geometry_info.psi/=100.0;
}
mogrify_image=CannyEdgeImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,geometry_info.psi,exception);
break;
}
if (LocaleCompare("cdl",option+1) == 0)
{
char
*color_correction_collection;
/*
Color correct with a color decision list.
*/
(void) SyncImageSettings(mogrify_info,*image);
color_correction_collection=FileToString(argv[i+1],~0UL,exception);
if (color_correction_collection == (char *) NULL)
break;
(void) ColorDecisionListImage(*image,color_correction_collection);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("channel",option+1) == 0)
{
if (*option == '+')
channel=DefaultChannels;
else
channel=(ChannelType) ParseChannelOption(argv[i+1]);
break;
}
if (LocaleCompare("charcoal",option+1) == 0)
{
/*
Charcoal image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=CharcoalImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("chop",option+1) == 0)
{
/*
Chop the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ChopImage(*image,&geometry,exception);
break;
}
if (LocaleCompare("clamp",option+1) == 0)
{
/*
Clamp image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ClampImageChannel(*image,channel);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("clip",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
{
(void) SetImageClipMask(*image,(Image *) NULL);
InheritException(exception,&(*image)->exception);
break;
}
(void) ClipImage(*image);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("clip-mask",option+1) == 0)
{
CacheView
*mask_view;
Image
*mask_image;
register PixelPacket
*magick_restrict q;
register ssize_t
x;
ssize_t
y;
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
{
/*
Remove a mask.
*/
(void) SetImageMask(*image,(Image *) NULL);
InheritException(exception,&(*image)->exception);
break;
}
/*
Set the image mask.
FUTURE: This Should Be a SetImageAlphaChannel() call, Or two.
*/
mask_image=GetImageCache(mogrify_info,argv[i+1],exception);
if (mask_image == (Image *) NULL)
break;
if (SetImageStorageClass(mask_image,DirectClass) == MagickFalse)
return(MagickFalse);
mask_view=AcquireAuthenticCacheView(mask_image,exception);
for (y=0; y < (ssize_t) mask_image->rows; y++)
{
q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
break;
for (x=0; x < (ssize_t) mask_image->columns; x++)
{
if (mask_image->matte == MagickFalse)
SetPixelOpacity(q,ClampToQuantum(GetPixelIntensity(mask_image,
q)));
SetPixelRed(q,GetPixelOpacity(q));
SetPixelGreen(q,GetPixelOpacity(q));
SetPixelBlue(q,GetPixelOpacity(q));
q++;
}
if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse)
break;
}
mask_view=DestroyCacheView(mask_view);
mask_image->matte=MagickTrue;
(void) SetImageClipMask(*image,mask_image);
mask_image=DestroyImage(mask_image);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("clip-path",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) ClipImagePath(*image,argv[i+1],*option == '-' ? MagickTrue :
MagickFalse);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("colorize",option+1) == 0)
{
/*
Colorize the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=ColorizeImage(*image,argv[i+1],draw_info->fill,
exception);
break;
}
if (LocaleCompare("color-matrix",option+1) == 0)
{
KernelInfo
*kernel;
(void) SyncImageSettings(mogrify_info,*image);
kernel=AcquireKernelInfo(argv[i+1]);
if (kernel == (KernelInfo *) NULL)
break;
mogrify_image=ColorMatrixImage(*image,kernel,exception);
kernel=DestroyKernelInfo(kernel);
break;
}
if (LocaleCompare("colors",option+1) == 0)
{
/*
Reduce the number of colors in the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
quantize_info->number_colors=StringToUnsignedLong(argv[i+1]);
if (quantize_info->number_colors == 0)
break;
if (((*image)->storage_class == DirectClass) ||
(*image)->colors > quantize_info->number_colors)
(void) QuantizeImage(quantize_info,*image);
else
(void) CompressImageColormap(*image);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
ColorspaceType
colorspace;
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
{
(void) TransformImageColorspace(*image,sRGBColorspace);
InheritException(exception,&(*image)->exception);
break;
}
colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
(void) TransformImageColorspace(*image,colorspace);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("connected-components",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=ConnectedComponentsImage(*image,
StringToInteger(argv[i+1]),exception);
break;
}
if (LocaleCompare("contrast",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) ContrastImage(*image,(*option == '-') ? MagickTrue :
MagickFalse);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("contrast-stretch",option+1) == 0)
{
double
black_point,
white_point;
MagickStatusType
flags;
/*
Contrast stretch image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
black_point=geometry_info.rho;
white_point=(flags & SigmaValue) != 0 ? geometry_info.sigma :
black_point;
if ((flags & PercentValue) != 0)
{
black_point*=(double) (*image)->columns*(*image)->rows/100.0;
white_point*=(double) (*image)->columns*(*image)->rows/100.0;
}
white_point=(MagickRealType) (*image)->columns*(*image)->rows-
white_point;
(void) ContrastStretchImageChannel(*image,channel,black_point,
white_point);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("convolve",option+1) == 0)
{
double
gamma;
KernelInfo
*kernel_info;
register ssize_t
j;
size_t
extent;
(void) SyncImageSettings(mogrify_info,*image);
kernel_info=AcquireKernelInfo(argv[i+1]);
if (kernel_info == (KernelInfo *) NULL)
break;
extent=kernel_info->width*kernel_info->height;
gamma=0.0;
for (j=0; j < (ssize_t) extent; j++)
gamma+=kernel_info->values[j];
gamma=1.0/(fabs((double) gamma) <= MagickEpsilon ? 1.0 : gamma);
for (j=0; j < (ssize_t) extent; j++)
kernel_info->values[j]*=gamma;
mogrify_image=MorphologyImage(*image,CorrelateMorphology,1,
kernel_info,exception);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("crop",option+1) == 0)
{
/*
Crop a image to a smaller size
*/
(void) SyncImageSettings(mogrify_info,*image);
#if 0
flags=ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
if (((geometry.width != 0) || (geometry.height != 0)) &&
((flags & XValue) == 0) && ((flags & YValue) == 0))
break;
#endif
#if 0
mogrify_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception);
mogrify_image->next = mogrify_image->previous = (Image *) NULL;
(void) TransformImage(&mogrify_image,argv[i+1],(char *) NULL);
InheritException(exception,&mogrify_image->exception);
#else
mogrify_image=CropImageToTiles(*image,argv[i+1],exception);
#endif
break;
}
if (LocaleCompare("cycle",option+1) == 0)
{
/*
Cycle an image colormap.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) CycleColormapImage(*image,(ssize_t) StringToLong(argv[i+1]));
InheritException(exception,&(*image)->exception);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("decipher",option+1) == 0)
{
StringInfo
*passkey;
/*
Decipher pixels.
*/
(void) SyncImageSettings(mogrify_info,*image);
passkey=FileToStringInfo(argv[i+1],~0UL,exception);
if (passkey != (StringInfo *) NULL)
{
(void) PasskeyDecipherImage(*image,passkey,exception);
passkey=DestroyStringInfo(passkey);
}
break;
}
if (LocaleCompare("density",option+1) == 0)
{
/*
Set image density.
*/
(void) CloneString(&draw_info->density,argv[i+1]);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
{
(void) SetImageDepth(*image,MAGICKCORE_QUANTUM_DEPTH);
break;
}
(void) SetImageDepth(*image,StringToUnsignedLong(argv[i+1]));
break;
}
if (LocaleCompare("deskew",option+1) == 0)
{
double
threshold;
/*
Straighten the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
threshold=40.0*QuantumRange/100.0;
else
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
mogrify_image=DeskewImage(*image,threshold,exception);
break;
}
if (LocaleCompare("despeckle",option+1) == 0)
{
/*
Reduce the speckles within an image.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=DespeckleImage(*image,exception);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
(void) CloneString(&draw_info->server_name,argv[i+1]);
break;
}
if (LocaleCompare("distort",option+1) == 0)
{
char
*args,
token[MaxTextExtent];
const char
*p;
DistortImageMethod
method;
double
*arguments;
register ssize_t
x;
size_t
number_arguments;
/*
Distort image.
*/
(void) SyncImageSettings(mogrify_info,*image);
method=(DistortImageMethod) ParseCommandOption(MagickDistortOptions,
MagickFalse,argv[i+1]);
if (method == ResizeDistortion)
{
double
resize_args[2];
/*
Resize distortion.
*/
(void) ParseRegionGeometry(*image,argv[i+2],&geometry,
exception);
resize_args[0]=(double) geometry.width;
resize_args[1]=(double) geometry.height;
mogrify_image=DistortImage(*image,method,(size_t) 2,
resize_args,MagickTrue,exception);
break;
}
args=InterpretImageProperties(mogrify_info,*image,argv[i+2]);
InheritException(exception,&(*image)->exception);
if (args == (char *) NULL)
break;
p=(char *) args;
for (x=0; *p != '\0'; x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
}
number_arguments=(size_t) x;
arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*arguments));
if (arguments == (double *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,
"MemoryAllocationFailed",(*image)->filename);
(void) memset(arguments,0,number_arguments*sizeof(*arguments));
p=(char *) args;
for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
arguments[x]=StringToDouble(token,(char **) NULL);
}
args=DestroyString(args);
mogrify_image=DistortImage(*image,method,number_arguments,arguments,
(*option == '+') ? MagickTrue : MagickFalse,exception);
arguments=(double *) RelinquishMagickMemory(arguments);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
quantize_info->dither=MagickFalse;
break;
}
quantize_info->dither=MagickTrue;
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,argv[i+1]);
if (quantize_info->dither_method == NoDitherMethod)
quantize_info->dither=MagickFalse;
break;
}
if (LocaleCompare("draw",option+1) == 0)
{
/*
Draw image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) CloneString(&draw_info->primitive,argv[i+1]);
(void) DrawImage(*image,draw_info);
InheritException(exception,&(*image)->exception);
break;
}
break;
}
case 'e':
{
if (LocaleCompare("edge",option+1) == 0)
{
/*
Enhance edges in the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=EdgeImage(*image,geometry_info.rho,exception);
break;
}
if (LocaleCompare("emboss",option+1) == 0)
{
/*
Gaussian embossen image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=EmbossImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("encipher",option+1) == 0)
{
StringInfo
*passkey;
/*
Encipher pixels.
*/
(void) SyncImageSettings(mogrify_info,*image);
passkey=FileToStringInfo(argv[i+1],~0UL,exception);
if (passkey != (StringInfo *) NULL)
{
(void) PasskeyEncipherImage(*image,passkey,exception);
passkey=DestroyStringInfo(passkey);
}
break;
}
if (LocaleCompare("encoding",option+1) == 0)
{
(void) CloneString(&draw_info->encoding,argv[i+1]);
break;
}
if (LocaleCompare("enhance",option+1) == 0)
{
/*
Enhance image.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=EnhanceImage(*image,exception);
break;
}
if (LocaleCompare("equalize",option+1) == 0)
{
/*
Equalize image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) EqualizeImageChannel(*image,channel);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("evaluate",option+1) == 0)
{
double
constant;
MagickEvaluateOperator
op;
(void) SyncImageSettings(mogrify_info,*image);
op=(MagickEvaluateOperator) ParseCommandOption(
MagickEvaluateOptions,MagickFalse,argv[i+1]);
constant=StringToDoubleInterval(argv[i+2],(double) QuantumRange+
1.0);
(void) EvaluateImageChannel(*image,channel,op,constant,exception);
break;
}
if (LocaleCompare("extent",option+1) == 0)
{
/*
Set the image extent.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
if (geometry.width == 0)
geometry.width=(*image)->columns;
if (geometry.height == 0)
geometry.height=(*image)->rows;
mogrify_image=ExtentImage(*image,&geometry,exception);
break;
}
break;
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
if (*option == '+')
{
if (draw_info->family != (char *) NULL)
draw_info->family=DestroyString(draw_info->family);
break;
}
(void) CloneString(&draw_info->family,argv[i+1]);
break;
}
if (LocaleCompare("features",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageArtifact(*image,"identify:features");
break;
}
(void) SetImageArtifact(*image,"identify:features",argv[i+1]);
(void) SetImageArtifact(*image,"verbose","true");
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
ExceptionInfo
*sans;
GetMagickPixelPacket(*image,&fill);
if (*option == '+')
{
(void) QueryMagickColor("none",&fill,exception);
(void) QueryColorDatabase("none",&draw_info->fill,exception);
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
break;
}
sans=AcquireExceptionInfo();
(void) QueryMagickColor(argv[i+1],&fill,sans);
status=QueryColorDatabase(argv[i+1],&draw_info->fill,sans);
sans=DestroyExceptionInfo(sans);
if (status == MagickFalse)
draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1],
exception);
break;
}
if (LocaleCompare("flip",option+1) == 0)
{
/*
Flip image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=FlipImage(*image,exception);
break;
}
if (LocaleCompare("floodfill",option+1) == 0)
{
MagickPixelPacket
target;
/*
Floodfill image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParsePageGeometry(*image,argv[i+1],&geometry,exception);
(void) QueryMagickColor(argv[i+2],&target,exception);
(void) FloodfillPaintImage(*image,channel,draw_info,&target,
geometry.x,geometry.y,*option == '-' ? MagickFalse : MagickTrue);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("flop",option+1) == 0)
{
/*
Flop image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=FlopImage(*image,exception);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
if (*option == '+')
{
if (draw_info->font != (char *) NULL)
draw_info->font=DestroyString(draw_info->font);
break;
}
(void) CloneString(&draw_info->font,argv[i+1]);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
format=argv[i+1];
break;
}
if (LocaleCompare("frame",option+1) == 0)
{
FrameInfo
frame_info;
/*
Surround image with an ornamental border.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
frame_info.width=geometry.width;
frame_info.height=geometry.height;
frame_info.outer_bevel=geometry.x;
frame_info.inner_bevel=geometry.y;
frame_info.x=(ssize_t) frame_info.width;
frame_info.y=(ssize_t) frame_info.height;
frame_info.width=(*image)->columns+2*frame_info.width;
frame_info.height=(*image)->rows+2*frame_info.height;
mogrify_image=FrameImage(*image,&frame_info,exception);
break;
}
if (LocaleCompare("function",option+1) == 0)
{
char
*arguments,
token[MaxTextExtent];
const char
*p;
double
*parameters;
MagickFunction
function;
register ssize_t
x;
size_t
number_parameters;
/*
Function Modify Image Values
*/
(void) SyncImageSettings(mogrify_info,*image);
function=(MagickFunction) ParseCommandOption(MagickFunctionOptions,
MagickFalse,argv[i+1]);
arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2]);
InheritException(exception,&(*image)->exception);
if (arguments == (char *) NULL)
break;
p=(char *) arguments;
for (x=0; *p != '\0'; x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
}
number_parameters=(size_t) x;
parameters=(double *) AcquireQuantumMemory(number_parameters,
sizeof(*parameters));
if (parameters == (double *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,
"MemoryAllocationFailed",(*image)->filename);
(void) memset(parameters,0,number_parameters*
sizeof(*parameters));
p=(char *) arguments;
for (x=0; (x < (ssize_t) number_parameters) && (*p != '\0'); x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
parameters[x]=StringToDouble(token,(char **) NULL);
}
arguments=DestroyString(arguments);
(void) FunctionImageChannel(*image,channel,function,
number_parameters,parameters,exception);
parameters=(double *) RelinquishMagickMemory(parameters);
break;
}
break;
}
case 'g':
{
if (LocaleCompare("gamma",option+1) == 0)
{
/*
Gamma image.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
(*image)->gamma=StringToDouble(argv[i+1],(char **) NULL);
else
{
if (strchr(argv[i+1],',') != (char *) NULL)
(void) GammaImage(*image,argv[i+1]);
else
(void) GammaImageChannel(*image,channel,
StringToDouble(argv[i+1],(char **) NULL));
InheritException(exception,&(*image)->exception);
}
break;
}
if ((LocaleCompare("gaussian-blur",option+1) == 0) ||
(LocaleCompare("gaussian",option+1) == 0))
{
/*
Gaussian blur image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=GaussianBlurImageChannel(*image,channel,
geometry_info.rho,geometry_info.sigma,exception);
break;
}
if (LocaleCompare("geometry",option+1) == 0)
{
/*
Record Image offset, Resize last image.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
{
if ((*image)->geometry != (char *) NULL)
(*image)->geometry=DestroyString((*image)->geometry);
break;
}
flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
(void) CloneString(&(*image)->geometry,argv[i+1]);
else
mogrify_image=ResizeImage(*image,geometry.width,geometry.height,
(*image)->filter,(*image)->blur,exception);
break;
}
if (LocaleCompare("gravity",option+1) == 0)
{
if (*option == '+')
{
draw_info->gravity=UndefinedGravity;
break;
}
draw_info->gravity=(GravityType) ParseCommandOption(
MagickGravityOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("grayscale",option+1) == 0)
{
PixelIntensityMethod
method;
(void) SyncImagesSettings(mogrify_info,*image);
method=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,argv[i+1]);
(void) GrayscaleImage(*image,method);
InheritException(exception,&(*image)->exception);
break;
}
break;
}
case 'h':
{
if (LocaleCompare("highlight-color",option+1) == 0)
{
(void) SetImageArtifact(*image,"compare:highlight-color",argv[i+1]);
break;
}
if (LocaleCompare("hough-lines",option+1) == 0)
{
/*
Identify lines in the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
if ((flags & XiValue) == 0)
geometry_info.xi=40;
mogrify_image=HoughLineImage(*image,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,(size_t) geometry_info.xi,exception);
break;
}
break;
}
case 'i':
{
if (LocaleCompare("identify",option+1) == 0)
{
char
*text;
(void) SyncImageSettings(mogrify_info,*image);
if (format == (char *) NULL)
{
(void) IdentifyImage(*image,stdout,mogrify_info->verbose);
InheritException(exception,&(*image)->exception);
break;
}
text=InterpretImageProperties(mogrify_info,*image,format);
InheritException(exception,&(*image)->exception);
if (text == (char *) NULL)
break;
(void) fputs(text,stdout);
text=DestroyString(text);
break;
}
if (LocaleCompare("implode",option+1) == 0)
{
/*
Implode image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=ImplodeImage(*image,geometry_info.rho,exception);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("0",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->interline_spacing=geometry_info.rho;
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("0",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->interword_spacing=geometry_info.rho;
break;
}
if (LocaleCompare("interpolative-resize",option+1) == 0)
{
/*
Resize image using 'point sampled' interpolation
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=InterpolativeResizeImage(*image,geometry.width,
geometry.height,(*image)->interpolate,exception);
break;
}
break;
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("0",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->kerning=geometry_info.rho;
break;
}
if (LocaleCompare("kuwahara",option+1) == 0)
{
/*
Edge preserving blur.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho-0.5;
mogrify_image=KuwaharaImageChannel(*image,channel,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("lat",option+1) == 0)
{
/*
Local adaptive threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
mogrify_image=AdaptiveThresholdImage(*image,(size_t)
geometry_info.rho,(size_t) geometry_info.sigma,(ssize_t)
geometry_info.xi,exception);
break;
}
if (LocaleCompare("level",option+1) == 0)
{
MagickRealType
black_point,
gamma,
white_point;
MagickStatusType
flags;
/*
Parse levels.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
black_point=geometry_info.rho;
white_point=(MagickRealType) QuantumRange;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
gamma=1.0;
if ((flags & XiValue) != 0)
gamma=geometry_info.xi;
if ((flags & PercentValue) != 0)
{
black_point*=(MagickRealType) (QuantumRange/100.0);
white_point*=(MagickRealType) (QuantumRange/100.0);
}
if ((flags & SigmaValue) == 0)
white_point=(MagickRealType) QuantumRange-black_point;
if ((*option == '+') || ((flags & AspectValue) != 0))
(void) LevelizeImageChannel(*image,channel,black_point,
white_point,gamma);
else
(void) LevelImageChannel(*image,channel,black_point,white_point,
gamma);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("level-colors",option+1) == 0)
{
char
token[MaxTextExtent];
const char
*p;
MagickPixelPacket
black_point,
white_point;
p=(const char *) argv[i+1];
GetNextToken(p,&p,MaxTextExtent,token); /* get black point color */
if ((isalpha((int) *token) != 0) || ((*token == '#') != 0))
(void) QueryMagickColor(token,&black_point,exception);
else
(void) QueryMagickColor("#000000",&black_point,exception);
if (isalpha((int) token[0]) || (token[0] == '#'))
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == '\0')
white_point=black_point; /* set everything to that color */
else
{
if ((isalpha((int) *token) == 0) && ((*token == '#') == 0))
GetNextToken(p,&p,MaxTextExtent,token); /* Get white point color. */
if ((isalpha((int) *token) != 0) || ((*token == '#') != 0))
(void) QueryMagickColor(token,&white_point,exception);
else
(void) QueryMagickColor("#ffffff",&white_point,exception);
}
(void) LevelColorsImageChannel(*image,channel,&black_point,
&white_point,*option == '+' ? MagickTrue : MagickFalse);
break;
}
if (LocaleCompare("linear-stretch",option+1) == 0)
{
double
black_point,
white_point;
MagickStatusType
flags;
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
black_point=geometry_info.rho;
white_point=(MagickRealType) (*image)->columns*(*image)->rows;
if ((flags & SigmaValue) != 0)
white_point=geometry_info.sigma;
if ((flags & PercentValue) != 0)
{
black_point*=(double) (*image)->columns*(*image)->rows/100.0;
white_point*=(double) (*image)->columns*(*image)->rows/100.0;
}
if ((flags & SigmaValue) == 0)
white_point=(MagickRealType) (*image)->columns*(*image)->rows-
black_point;
(void) LinearStretchImage(*image,black_point,white_point);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("linewidth",option+1) == 0)
{
draw_info->stroke_width=StringToDouble(argv[i+1],(char **) NULL);
break;
}
if (LocaleCompare("liquid-rescale",option+1) == 0)
{
/*
Liquid rescale image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
if ((flags & XValue) == 0)
geometry.x=1;
if ((flags & YValue) == 0)
geometry.y=0;
mogrify_image=LiquidRescaleImage(*image,geometry.width,
geometry.height,1.0*geometry.x,1.0*geometry.y,exception);
break;
}
if (LocaleCompare("local-contrast",option+1) == 0)
{
MagickStatusType
flags;
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & RhoValue) == 0)
geometry_info.rho=10;
if ((flags & SigmaValue) == 0)
geometry_info.sigma=12.5;
mogrify_image=LocalContrastImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("lowlight-color",option+1) == 0)
{
(void) SetImageArtifact(*image,"compare:lowlight-color",argv[i+1]);
break;
}
break;
}
case 'm':
{
if (LocaleCompare("magnify",option+1) == 0)
{
/*
Double image size.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=MagnifyImage(*image,exception);
break;
}
if (LocaleCompare("map",option+1) == 0)
{
Image
*remap_image;
/*
Transform image colors to match this set of colors.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
break;
remap_image=GetImageCache(mogrify_info,argv[i+1],exception);
if (remap_image == (Image *) NULL)
break;
(void) RemapImage(quantize_info,*image,remap_image);
InheritException(exception,&(*image)->exception);
remap_image=DestroyImage(remap_image);
break;
}
if (LocaleCompare("mask",option+1) == 0)
{
Image
*mask;
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
{
/*
Remove a mask.
*/
(void) SetImageMask(*image,(Image *) NULL);
InheritException(exception,&(*image)->exception);
break;
}
/*
Set the image mask.
*/
mask=GetImageCache(mogrify_info,argv[i+1],exception);
if (mask == (Image *) NULL)
break;
(void) SetImageMask(*image,mask);
mask=DestroyImage(mask);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("matte",option+1) == 0)
{
(void) SetImageAlphaChannel(*image,(*option == '-') ?
SetAlphaChannel : DeactivateAlphaChannel );
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("mean-shift",option+1) == 0)
{
/*
Delineate arbitrarily shaped clusters in the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
if ((flags & XiValue) == 0)
geometry_info.xi=0.10*QuantumRange;
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
mogrify_image=MeanShiftImage(*image,(size_t) geometry_info.rho,
(size_t) geometry_info.sigma,(size_t) geometry_info.xi,exception);
break;
}
if (LocaleCompare("median",option+1) == 0)
{
/*
Median filter image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=StatisticImageChannel(*image,channel,MedianStatistic,
(size_t) geometry_info.rho,(size_t) geometry_info.rho,exception);
break;
}
if (LocaleCompare("mode",option+1) == 0)
{
/*
Mode image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=StatisticImageChannel(*image,channel,ModeStatistic,
(size_t) geometry_info.rho,(size_t) geometry_info.rho,exception);
break;
}
if (LocaleCompare("modulate",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) ModulateImage(*image,argv[i+1]);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("moments",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageArtifact(*image,"identify:moments");
break;
}
(void) SetImageArtifact(*image,"identify:moments",argv[i+1]);
(void) SetImageArtifact(*image,"verbose","true");
break;
}
if (LocaleCompare("monitor",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageProgressMonitor(*image,
(MagickProgressMonitor) NULL,(void *) NULL);
break;
}
(void) SetImageProgressMonitor(*image,MonitorProgress,
(void *) NULL);
break;
}
if (LocaleCompare("monochrome",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) SetImageType(*image,BilevelType);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("morphology",option+1) == 0)
{
char
token[MaxTextExtent];
const char
*p;
KernelInfo
*kernel;
MorphologyMethod
method;
ssize_t
iterations;
/*
Morphological Image Operation
*/
(void) SyncImageSettings(mogrify_info,*image);
p=argv[i+1];
GetNextToken(p,&p,MaxTextExtent,token);
method=(MorphologyMethod) ParseCommandOption(
MagickMorphologyOptions,MagickFalse,token);
iterations=1L;
GetNextToken(p,&p,MaxTextExtent,token);
if ((*p == ':') || (*p == ','))
GetNextToken(p,&p,MaxTextExtent,token);
if ((*p != '\0'))
iterations=(ssize_t) StringToLong(p);
kernel=AcquireKernelInfo(argv[i+2]);
if (kernel == (KernelInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"UnabletoParseKernel","morphology");
status=MagickFalse;
break;
}
mogrify_image=MorphologyImageChannel(*image,channel,method,
iterations,kernel,exception);
kernel=DestroyKernelInfo(kernel);
break;
}
if (LocaleCompare("motion-blur",option+1) == 0)
{
/*
Motion blur image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=MotionBlurImageChannel(*image,channel,
geometry_info.rho,geometry_info.sigma,geometry_info.xi,exception);
break;
}
break;
}
case 'n':
{
if (LocaleCompare("negate",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) NegateImageChannel(*image,channel,*option == '+' ?
MagickTrue : MagickFalse);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("noise",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '-')
{
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=StatisticImageChannel(*image,channel,
NonpeakStatistic,(size_t) geometry_info.rho,(size_t)
geometry_info.sigma,exception);
}
else
{
NoiseType
noise;
noise=(NoiseType) ParseCommandOption(MagickNoiseOptions,
MagickFalse,argv[i+1]);
mogrify_image=AddNoiseImageChannel(*image,channel,noise,
exception);
}
break;
}
if (LocaleCompare("normalize",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) NormalizeImageChannel(*image,channel);
InheritException(exception,&(*image)->exception);
break;
}
break;
}
case 'o':
{
if (LocaleCompare("opaque",option+1) == 0)
{
MagickPixelPacket
target;
(void) SyncImageSettings(mogrify_info,*image);
(void) QueryMagickColor(argv[i+1],&target,exception);
(void) OpaquePaintImageChannel(*image,channel,&target,&fill,
*option == '-' ? MagickFalse : MagickTrue);
break;
}
if (LocaleCompare("ordered-dither",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) OrderedPosterizeImageChannel(*image,channel,argv[i+1],
exception);
break;
}
break;
}
case 'p':
{
if (LocaleCompare("paint",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=OilPaintImage(*image,geometry_info.rho,exception);
break;
}
if (LocaleCompare("pen",option+1) == 0)
{
if (*option == '+')
{
(void) QueryColorDatabase("none",&draw_info->fill,exception);
break;
}
(void) QueryColorDatabase(argv[i+1],&draw_info->fill,exception);
break;
}
if (LocaleCompare("perceptible",option+1) == 0)
{
/*
Perceptible image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) PerceptibleImageChannel(*image,channel,StringToDouble(
argv[i+1],(char **) NULL));
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (*option == '+')
(void) ParseGeometry("12",&geometry_info);
else
(void) ParseGeometry(argv[i+1],&geometry_info);
draw_info->pointsize=geometry_info.rho;
break;
}
if (LocaleCompare("polaroid",option+1) == 0)
{
double
angle;
RandomInfo
*random_info;
/*
Simulate a Polaroid picture.
*/
(void) SyncImageSettings(mogrify_info,*image);
random_info=AcquireRandomInfo();
angle=22.5*(GetPseudoRandomValue(random_info)-0.5);
random_info=DestroyRandomInfo(random_info);
if (*option == '-')
{
SetGeometryInfo(&geometry_info);
flags=ParseGeometry(argv[i+1],&geometry_info);
angle=geometry_info.rho;
}
mogrify_image=PolaroidImage(*image,draw_info,angle,exception);
break;
}
if (LocaleCompare("posterize",option+1) == 0)
{
/*
Posterize image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) PosterizeImage(*image,StringToUnsignedLong(argv[i+1]),
quantize_info->dither);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("preview",option+1) == 0)
{
PreviewType
preview_type;
/*
Preview image.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
preview_type=UndefinedPreview;
else
preview_type=(PreviewType) ParseCommandOption(
MagickPreviewOptions,MagickFalse,argv[i+1]);
mogrify_image=PreviewImage(*image,preview_type,exception);
break;
}
if (LocaleCompare("profile",option+1) == 0)
{
const char
*name;
const StringInfo
*profile;
Image
*profile_image;
ImageInfo
*profile_info;
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
{
/*
Remove a profile from the image.
*/
(void) ProfileImage(*image,argv[i+1],(const unsigned char *)
NULL,0,MagickTrue);
InheritException(exception,&(*image)->exception);
break;
}
/*
Associate a profile with the image.
*/
profile_info=CloneImageInfo(mogrify_info);
profile=GetImageProfile(*image,"iptc");
if (profile != (StringInfo *) NULL)
profile_info->profile=(void *) CloneStringInfo(profile);
profile_image=GetImageCache(profile_info,argv[i+1],exception);
profile_info=DestroyImageInfo(profile_info);
if (profile_image == (Image *) NULL)
{
StringInfo
*profile;
profile_info=CloneImageInfo(mogrify_info);
(void) CopyMagickString(profile_info->filename,argv[i+1],
MaxTextExtent);
profile=FileToStringInfo(profile_info->filename,~0UL,exception);
if (profile != (StringInfo *) NULL)
{
(void) SetImageInfo(profile_info,0,exception);
(void) ProfileImage(*image,profile_info->magick,
GetStringInfoDatum(profile),(size_t)
GetStringInfoLength(profile),MagickFalse);
profile=DestroyStringInfo(profile);
}
profile_info=DestroyImageInfo(profile_info);
break;
}
ResetImageProfileIterator(profile_image);
name=GetNextImageProfile(profile_image);
while (name != (const char *) NULL)
{
profile=GetImageProfile(profile_image,name);
if (profile != (StringInfo *) NULL)
(void) ProfileImage(*image,name,GetStringInfoDatum(profile),
(size_t) GetStringInfoLength(profile),MagickFalse);
name=GetNextImageProfile(profile_image);
}
profile_image=DestroyImage(profile_image);
break;
}
break;
}
case 'q':
{
if (LocaleCompare("quantize",option+1) == 0)
{
if (*option == '+')
{
quantize_info->colorspace=UndefinedColorspace;
break;
}
quantize_info->colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("radial-blur",option+1) == 0 ||
LocaleCompare("rotational-blur",option+1) == 0)
{
/*
Radial blur image.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=RotationalBlurImageChannel(*image,channel,
StringToDouble(argv[i+1],(char **) NULL),exception);
break;
}
if (LocaleCompare("raise",option+1) == 0)
{
/*
Surround image with a raise of solid color.
*/
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
(void) RaiseImage(*image,&geometry,*option == '-' ? MagickTrue :
MagickFalse);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("random-threshold",option+1) == 0)
{
/*
Threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) RandomThresholdImageChannel(*image,channel,argv[i+1],
exception);
break;
}
if (LocaleCompare("recolor",option+1) == 0)
{
KernelInfo
*kernel;
(void) SyncImageSettings(mogrify_info,*image);
kernel=AcquireKernelInfo(argv[i+1]);
if (kernel == (KernelInfo *) NULL)
break;
mogrify_image=ColorMatrixImage(*image,kernel,exception);
kernel=DestroyKernelInfo(kernel);
break;
}
if (LocaleCompare("region",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
if (region_image != (Image *) NULL)
{
/*
Composite region.
*/
(void) CompositeImage(region_image,region_image->matte !=
MagickFalse ? CopyCompositeOp : OverCompositeOp,*image,
region_geometry.x,region_geometry.y);
InheritException(exception,®ion_image->exception);
*image=DestroyImage(*image);
*image=region_image;
region_image=(Image *) NULL;
}
if (*option == '+')
break;
/*
Apply transformations to a selected region of the image.
*/
(void) ParseGravityGeometry(*image,argv[i+1],®ion_geometry,
exception);
mogrify_image=CropImage(*image,®ion_geometry,exception);
if (mogrify_image == (Image *) NULL)
break;
region_image=(*image);
*image=mogrify_image;
mogrify_image=(Image *) NULL;
break;
}
if (LocaleCompare("render",option+1) == 0)
{
(void) SyncImageSettings(mogrify_info,*image);
draw_info->render=(*option == '+') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("remap",option+1) == 0)
{
Image
*remap_image;
/*
Transform image colors to match this set of colors.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
break;
remap_image=GetImageCache(mogrify_info,argv[i+1],exception);
if (remap_image == (Image *) NULL)
break;
(void) RemapImage(quantize_info,*image,remap_image);
InheritException(exception,&(*image)->exception);
remap_image=DestroyImage(remap_image);
break;
}
if (LocaleCompare("repage",option+1) == 0)
{
if (*option == '+')
{
(void) ParseAbsoluteGeometry("0x0+0+0",&(*image)->page);
break;
}
(void) ResetImagePage(*image,argv[i+1]);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("resample",option+1) == 0)
{
/*
Resample image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=ResampleImage(*image,geometry_info.rho,
geometry_info.sigma,(*image)->filter,(*image)->blur,exception);
break;
}
if (LocaleCompare("resize",option+1) == 0)
{
/*
Resize image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ResizeImage(*image,geometry.width,geometry.height,
(*image)->filter,(*image)->blur,exception);
break;
}
if (LocaleCompare("roll",option+1) == 0)
{
/*
Roll image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
if ((flags & PercentValue) != 0)
{
geometry.x*=(double) (*image)->columns/100.0;
geometry.y*=(double) (*image)->rows/100.0;
}
mogrify_image=RollImage(*image,geometry.x,geometry.y,exception);
break;
}
if (LocaleCompare("rotate",option+1) == 0)
{
char
*geometry;
/*
Check for conditional image rotation.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (strchr(argv[i+1],'>') != (char *) NULL)
if ((*image)->columns <= (*image)->rows)
break;
if (strchr(argv[i+1],'<') != (char *) NULL)
if ((*image)->columns >= (*image)->rows)
break;
/*
Rotate image.
*/
geometry=ConstantString(argv[i+1]);
(void) SubstituteString(&geometry,">","");
(void) SubstituteString(&geometry,"<","");
(void) ParseGeometry(geometry,&geometry_info);
geometry=DestroyString(geometry);
mogrify_image=RotateImage(*image,geometry_info.rho,exception);
break;
}
break;
}
case 's':
{
if (LocaleCompare("sample",option+1) == 0)
{
/*
Sample image with pixel replication.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=SampleImage(*image,geometry.width,geometry.height,
exception);
break;
}
if (LocaleCompare("scale",option+1) == 0)
{
/*
Resize image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ScaleImage(*image,geometry.width,geometry.height,
exception);
break;
}
if (LocaleCompare("selective-blur",option+1) == 0)
{
/*
Selectively blur pixels within a contrast threshold.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & PercentValue) != 0)
geometry_info.xi=(double) QuantumRange*geometry_info.xi/100.0;
mogrify_image=SelectiveBlurImageChannel(*image,channel,
geometry_info.rho,geometry_info.sigma,geometry_info.xi,exception);
break;
}
if (LocaleCompare("separate",option+1) == 0)
{
/*
Break channels into separate images.
WARNING: This can generate multiple images!
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=SeparateImages(*image,channel,exception);
break;
}
if (LocaleCompare("sepia-tone",option+1) == 0)
{
double
threshold;
/*
Sepia-tone image.
*/
(void) SyncImageSettings(mogrify_info,*image);
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
mogrify_image=SepiaToneImage(*image,threshold,exception);
break;
}
if (LocaleCompare("segment",option+1) == 0)
{
/*
Segment image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
(void) SegmentImage(*image,(*image)->colorspace,
mogrify_info->verbose,geometry_info.rho,geometry_info.sigma);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("set",option+1) == 0)
{
char
*value;
/*
Set image option.
*/
if (*option == '+')
{
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
(void) DeleteImageRegistry(argv[i+1]+9);
else
if (LocaleNCompare(argv[i+1],"option:",7) == 0)
{
(void) DeleteImageOption(mogrify_info,argv[i+1]+7);
(void) DeleteImageArtifact(*image,argv[i+1]+7);
}
else
(void) DeleteImageProperty(*image,argv[i+1]);
break;
}
value=InterpretImageProperties(mogrify_info,*image,argv[i+2]);
InheritException(exception,&(*image)->exception);
if (value == (char *) NULL)
break;
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
(void) SetImageRegistry(StringRegistryType,argv[i+1]+9,value,
exception);
else
if (LocaleNCompare(argv[i+1],"option:",7) == 0)
{
(void) SetImageOption(image_info,argv[i+1]+7,value);
(void) SetImageOption(mogrify_info,argv[i+1]+7,value);
(void) SetImageArtifact(*image,argv[i+1]+7,value);
}
else
(void) SetImageProperty(*image,argv[i+1],value);
value=DestroyString(value);
break;
}
if (LocaleCompare("shade",option+1) == 0)
{
/*
Shade image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=ShadeImage(*image,(*option == '-') ? MagickTrue :
MagickFalse,geometry_info.rho,geometry_info.sigma,exception);
break;
}
if (LocaleCompare("shadow",option+1) == 0)
{
/*
Shadow image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=4.0;
if ((flags & PsiValue) == 0)
geometry_info.psi=4.0;
mogrify_image=ShadowImage(*image,geometry_info.rho,
geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5),(ssize_t)
ceil(geometry_info.psi-0.5),exception);
break;
}
if (LocaleCompare("sharpen",option+1) == 0)
{
/*
Sharpen image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=SharpenImageChannel(*image,channel,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("shave",option+1) == 0)
{
/*
Shave the image edges.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParsePageGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ShaveImage(*image,&geometry,exception);
break;
}
if (LocaleCompare("shear",option+1) == 0)
{
/*
Shear image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=geometry_info.rho;
mogrify_image=ShearImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("sigmoidal-contrast",option+1) == 0)
{
/*
Sigmoidal non-linearity contrast control.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=(double) QuantumRange/2.0;
if ((flags & PercentValue) != 0)
geometry_info.sigma=(double) QuantumRange*geometry_info.sigma/
100.0;
(void) SigmoidalContrastImageChannel(*image,channel,
(*option == '-') ? MagickTrue : MagickFalse,geometry_info.rho,
geometry_info.sigma);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("sketch",option+1) == 0)
{
/*
Sketch image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=SketchImage(*image,geometry_info.rho,
geometry_info.sigma,geometry_info.xi,exception);
break;
}
if (LocaleCompare("solarize",option+1) == 0)
{
double
threshold;
(void) SyncImageSettings(mogrify_info,*image);
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
(void) SolarizeImageChannel(*image,channel,threshold,exception);
break;
}
if (LocaleCompare("sparse-color",option+1) == 0)
{
SparseColorMethod
method;
char
*arguments;
/*
Sparse Color Interpolated Gradient
*/
(void) SyncImageSettings(mogrify_info,*image);
method=(SparseColorMethod) ParseCommandOption(
MagickSparseColorOptions,MagickFalse,argv[i+1]);
arguments=InterpretImageProperties(mogrify_info,*image,argv[i+2]);
InheritException(exception,&(*image)->exception);
if (arguments == (char *) NULL)
break;
mogrify_image=SparseColorOption(*image,channel,method,arguments,
option[0] == '+' ? MagickTrue : MagickFalse,exception);
arguments=DestroyString(arguments);
break;
}
if (LocaleCompare("splice",option+1) == 0)
{
/*
Splice a solid color into the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGravityGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=SpliceImage(*image,&geometry,exception);
break;
}
if (LocaleCompare("spread",option+1) == 0)
{
/*
Spread an image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=SpreadImage(*image,geometry_info.rho,exception);
break;
}
if (LocaleCompare("statistic",option+1) == 0)
{
StatisticType
type;
(void) SyncImageSettings(mogrify_info,*image);
type=(StatisticType) ParseCommandOption(MagickStatisticOptions,
MagickFalse,argv[i+1]);
(void) ParseGeometry(argv[i+2],&geometry_info);
mogrify_image=StatisticImageChannel(*image,channel,type,(size_t)
geometry_info.rho,(size_t) geometry_info.sigma,exception);
break;
}
if (LocaleCompare("stretch",option+1) == 0)
{
if (*option == '+')
{
draw_info->stretch=UndefinedStretch;
break;
}
draw_info->stretch=(StretchType) ParseCommandOption(
MagickStretchOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("strip",option+1) == 0)
{
/*
Strip image of profiles and comments.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) StripImage(*image);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("stroke",option+1) == 0)
{
ExceptionInfo
*sans;
if (*option == '+')
{
(void) QueryColorDatabase("none",&draw_info->stroke,exception);
if (draw_info->stroke_pattern != (Image *) NULL)
draw_info->stroke_pattern=DestroyImage(
draw_info->stroke_pattern);
break;
}
sans=AcquireExceptionInfo();
status=QueryColorDatabase(argv[i+1],&draw_info->stroke,sans);
sans=DestroyExceptionInfo(sans);
if (status == MagickFalse)
draw_info->stroke_pattern=GetImageCache(mogrify_info,argv[i+1],
exception);
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
draw_info->stroke_width=StringToDouble(argv[i+1],(char **) NULL);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
if (*option == '+')
{
draw_info->style=UndefinedStyle;
break;
}
draw_info->style=(StyleType) ParseCommandOption(MagickStyleOptions,
MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("swirl",option+1) == 0)
{
/*
Swirl image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseGeometry(argv[i+1],&geometry_info);
mogrify_image=SwirlImage(*image,geometry_info.rho,exception);
break;
}
break;
}
case 't':
{
if (LocaleCompare("threshold",option+1) == 0)
{
double
threshold;
/*
Threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
threshold=(double) QuantumRange/2;
else
threshold=StringToDoubleInterval(argv[i+1],(double) QuantumRange+
1.0);
(void) BilevelImageChannel(*image,channel,threshold);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("thumbnail",option+1) == 0)
{
/*
Thumbnail image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) ParseRegionGeometry(*image,argv[i+1],&geometry,exception);
mogrify_image=ThumbnailImage(*image,geometry.width,geometry.height,
exception);
break;
}
if (LocaleCompare("tile",option+1) == 0)
{
if (*option == '+')
{
if (draw_info->fill_pattern != (Image *) NULL)
draw_info->fill_pattern=DestroyImage(draw_info->fill_pattern);
break;
}
draw_info->fill_pattern=GetImageCache(mogrify_info,argv[i+1],
exception);
break;
}
if (LocaleCompare("tint",option+1) == 0)
{
/*
Tint the image.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=TintImage(*image,argv[i+1],draw_info->fill,exception);
break;
}
if (LocaleCompare("transform",option+1) == 0)
{
/*
Affine transform image.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=AffineTransformImage(*image,&draw_info->affine,
exception);
break;
}
if (LocaleCompare("transparent",option+1) == 0)
{
MagickPixelPacket
target;
(void) SyncImageSettings(mogrify_info,*image);
(void) QueryMagickColor(argv[i+1],&target,exception);
(void) TransparentPaintImage(*image,&target,(Quantum)
TransparentOpacity,*option == '-' ? MagickFalse : MagickTrue);
InheritException(exception,&(*image)->exception);
break;
}
if (LocaleCompare("transpose",option+1) == 0)
{
/*
Transpose image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=TransposeImage(*image,exception);
break;
}
if (LocaleCompare("transverse",option+1) == 0)
{
/*
Transverse image scanlines.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=TransverseImage(*image,exception);
break;
}
if (LocaleCompare("treedepth",option+1) == 0)
{
quantize_info->tree_depth=StringToUnsignedLong(argv[i+1]);
break;
}
if (LocaleCompare("trim",option+1) == 0)
{
/*
Trim image.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=TrimImage(*image,exception);
break;
}
if (LocaleCompare("type",option+1) == 0)
{
ImageType
type;
(void) SyncImageSettings(mogrify_info,*image);
if (*option == '+')
type=UndefinedType;
else
type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
argv[i+1]);
(*image)->type=UndefinedType;
(void) SetImageType(*image,type);
InheritException(exception,&(*image)->exception);
break;
}
break;
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
(void) QueryColorDatabase(argv[i+1],&draw_info->undercolor,
exception);
break;
}
if (LocaleCompare("unique",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageArtifact(*image,"identify:unique-colors");
break;
}
(void) SetImageArtifact(*image,"identify:unique-colors","true");
(void) SetImageArtifact(*image,"verbose","true");
break;
}
if (LocaleCompare("unique-colors",option+1) == 0)
{
/*
Unique image colors.
*/
(void) SyncImageSettings(mogrify_info,*image);
mogrify_image=UniqueImageColors(*image,exception);
break;
}
if (LocaleCompare("unsharp",option+1) == 0)
{
/*
Unsharp mask image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=1.0;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.05;
mogrify_image=UnsharpMaskImageChannel(*image,channel,
geometry_info.rho,geometry_info.sigma,geometry_info.xi,
geometry_info.psi,exception);
break;
}
break;
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
(void) SetImageArtifact(*image,option+1,
*option == '+' ? "false" : "true");
break;
}
if (LocaleCompare("vignette",option+1) == 0)
{
/*
Vignette image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
if ((flags & XiValue) == 0)
geometry_info.xi=0.1*(*image)->columns;
if ((flags & PsiValue) == 0)
geometry_info.psi=0.1*(*image)->rows;
if ((flags & PercentValue) != 0)
{
geometry_info.xi*=(double) (*image)->columns/100.0;
geometry_info.psi*=(double) (*image)->rows/100.0;
}
mogrify_image=VignetteImage(*image,geometry_info.rho,
geometry_info.sigma,(ssize_t) ceil(geometry_info.xi-0.5),(ssize_t)
ceil(geometry_info.psi-0.5),exception);
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageVirtualPixelMethod(*image,
UndefinedVirtualPixelMethod);
break;
}
(void) SetImageVirtualPixelMethod(*image,(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,
argv[i+1]));
break;
}
break;
}
case 'w':
{
if (LocaleCompare("wave",option+1) == 0)
{
/*
Wave image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & SigmaValue) == 0)
geometry_info.sigma=1.0;
mogrify_image=WaveImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("wavelet-denoise",option+1) == 0)
{
/*
Wavelet denoise image.
*/
(void) SyncImageSettings(mogrify_info,*image);
flags=ParseGeometry(argv[i+1],&geometry_info);
if ((flags & PercentValue) != 0)
{
geometry_info.rho=QuantumRange*geometry_info.rho/100.0;
geometry_info.sigma=QuantumRange*geometry_info.sigma/100.0;
}
if ((flags & SigmaValue) == 0)
geometry_info.sigma=0.0;
mogrify_image=WaveletDenoiseImage(*image,geometry_info.rho,
geometry_info.sigma,exception);
break;
}
if (LocaleCompare("weight",option+1) == 0)
{
ssize_t
weight;
weight=ParseCommandOption(MagickWeightOptions,MagickFalse,
argv[i+1]);
if (weight == -1)
weight=StringToUnsignedLong(argv[i+1]);
draw_info->weight=(size_t) weight;
break;
}
if (LocaleCompare("white-threshold",option+1) == 0)
{
/*
White threshold image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) WhiteThresholdImageChannel(*image,channel,argv[i+1],
exception);
InheritException(exception,&(*image)->exception);
break;
}
break;
}
default:
break;
}
/*
Replace current image with any image that was generated.
*/
if (mogrify_image != (Image *) NULL)
ReplaceImageInListReturnLast(image,mogrify_image);
i+=count;
}
if (region_image != (Image *) NULL)
{
/*
Composite transformed region onto image.
*/
(void) SyncImageSettings(mogrify_info,*image);
(void) CompositeImage(region_image,region_image->matte != MagickFalse ?
CopyCompositeOp : OverCompositeOp,*image,region_geometry.x,
region_geometry.y);
InheritException(exception,®ion_image->exception);
*image=DestroyImage(*image);
*image=region_image;
region_image = (Image *) NULL;
}
/*
Free resources.
*/
quantize_info=DestroyQuantizeInfo(quantize_info);
draw_info=DestroyDrawInfo(draw_info);
mogrify_info=DestroyImageInfo(mogrify_info);
status=(MagickStatusType) (exception->severity < ErrorException ? 1 : 0);
return(status == 0 ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e C o m m a n d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImageCommand() transforms an image or a sequence of images. These
% transforms include image scaling, image rotation, color reduction, and
% others. The transmogrified image overwrites the original image.
%
% The format of the MogrifyImageCommand method is:
%
% MagickBooleanType MogrifyImageCommand(ImageInfo *image_info,int argc,
% const char **argv,char **metadata,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o argc: the number of elements in the argument vector.
%
% o argv: A text array containing the command line arguments.
%
% o metadata: any metadata is returned here.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType MogrifyUsage(void)
{
static const char
miscellaneous[] =
" -debug events display copious debugging information\n"
" -distribute-cache port\n"
" distributed pixel cache spanning one or more servers\n"
" -help print program options\n"
" -list type print a list of supported option arguments\n"
" -log format format of debugging information\n"
" -version print version information",
operators[] =
" -adaptive-blur geometry\n"
" adaptively blur pixels; decrease effect near edges\n"
" -adaptive-resize geometry\n"
" adaptively resize image using 'mesh' interpolation\n"
" -adaptive-sharpen geometry\n"
" adaptively sharpen pixels; increase effect near edges\n"
" -alpha option on, activate, off, deactivate, set, opaque, copy\n"
" transparent, extract, background, or shape\n"
" -annotate geometry text\n"
" annotate the image with text\n"
" -auto-gamma automagically adjust gamma level of image\n"
" -auto-level automagically adjust color levels of image\n"
" -auto-orient automagically orient (rotate) image\n"
" -bench iterations measure performance\n"
" -black-threshold value\n"
" force all pixels below the threshold into black\n"
" -blue-shift simulate a scene at nighttime in the moonlight\n"
" -blur geometry reduce image noise and reduce detail levels\n"
" -border geometry surround image with a border of color\n"
" -bordercolor color border color\n"
" -brightness-contrast geometry\n"
" improve brightness / contrast of the image\n"
" -canny geometry detect edges in the image\n"
" -cdl filename color correct with a color decision list\n"
" -charcoal radius simulate a charcoal drawing\n"
" -chop geometry remove pixels from the image interior\n"
" -clamp keep pixel values in range (0-QuantumRange)\n"
" -clip clip along the first path from the 8BIM profile\n"
" -clip-mask filename associate a clip mask with the image\n"
" -clip-path id clip along a named path from the 8BIM profile\n"
" -colorize value colorize the image with the fill color\n"
" -color-matrix matrix apply color correction to the image\n"
" -connected-components connectivity\n"
" connected-components uniquely labeled\n"
" -contrast enhance or reduce the image contrast\n"
" -contrast-stretch geometry\n"
" improve contrast by `stretching' the intensity range\n"
" -convolve coefficients\n"
" apply a convolution kernel to the image\n"
" -cycle amount cycle the image colormap\n"
" -decipher filename convert cipher pixels to plain pixels\n"
" -deskew threshold straighten an image\n"
" -despeckle reduce the speckles within an image\n"
" -distort method args\n"
" distort images according to given method ad args\n"
" -draw string annotate the image with a graphic primitive\n"
" -edge radius apply a filter to detect edges in the image\n"
" -encipher filename convert plain pixels to cipher pixels\n"
" -emboss radius emboss an image\n"
" -enhance apply a digital filter to enhance a noisy image\n"
" -equalize perform histogram equalization to an image\n"
" -evaluate operator value\n"
" evaluate an arithmetic, relational, or logical expression\n"
" -extent geometry set the image size\n"
" -extract geometry extract area from image\n"
" -hough-lines geometry\n"
" identify lines in the image\n"
" -features distance analyze image features (e.g. contrast, correlation)\n"
" -fft implements the discrete Fourier transform (DFT)\n"
" -flip flip image vertically\n"
" -floodfill geometry color\n"
" floodfill the image with color\n"
" -flop flop image horizontally\n"
" -frame geometry surround image with an ornamental border\n"
" -function name parameters\n"
" apply function over image values\n"
" -gamma value level of gamma correction\n"
" -gaussian-blur geometry\n"
" reduce image noise and reduce detail levels\n"
" -geometry geometry preferred size or location of the image\n"
" -grayscale method convert image to grayscale\n"
" -help print program options\n"
" -identify identify the format and characteristics of the image\n"
" -ift implements the inverse discrete Fourier transform (DFT)\n"
" -implode amount implode image pixels about the center\n"
" -kuwahara geometry edge preserving noise reduction filter\n"
" -lat geometry local adaptive thresholding\n"
" -layers method optimize, merge, or compare image layers\n"
" -level value adjust the level of image contrast\n"
" -level-colors color,color\n"
" level image with the given colors\n"
" -linear-stretch geometry\n"
" improve contrast by `stretching with saturation'\n"
" -liquid-rescale geometry\n"
" rescale image with seam-carving\n"
" -local-contrast geometry\n"
" enhance local contrast\n"
" -magnify double the size of the image with pixel art scaling\n"
" -mean-shift geometry delineate arbitrarily shaped clusters in the image\n"
" -median geometry apply a median filter to the image\n"
" -mode geometry make each pixel the 'predominant color' of the\n"
" neighborhood\n"
" -modulate value vary the brightness, saturation, and hue\n"
" -monochrome transform image to black and white\n"
" -morphology method kernel\n"
" apply a morphology method to the image\n"
" -motion-blur geometry\n"
" simulate motion blur\n"
" -negate replace every pixel with its complementary color \n"
" -noise geometry add or reduce noise in an image\n"
" -normalize transform image to span the full range of colors\n"
" -opaque color change this color to the fill color\n"
" -ordered-dither NxN\n"
" add a noise pattern to the image with specific\n"
" amplitudes\n"
" -paint radius simulate an oil painting\n"
" -perceptible epsilon\n"
" pixel value less than |epsilon| become epsilon or\n"
" -epsilon\n"
" -polaroid angle simulate a Polaroid picture\n"
" -posterize levels reduce the image to a limited number of color levels\n"
" -profile filename add, delete, or apply an image profile\n"
" -quantize colorspace reduce colors in this colorspace\n"
" -radial-blur angle radial blur the image\n"
" -raise value lighten/darken image edges to create a 3-D effect\n"
" -random-threshold low,high\n"
" random threshold the image\n"
" -region geometry apply options to a portion of the image\n"
" -render render vector graphics\n"
" -resample geometry change the resolution of an image\n"
" -resize geometry resize the image\n"
" -roll geometry roll an image vertically or horizontally\n"
" -rotate degrees apply Paeth rotation to the image\n"
" -sample geometry scale image with pixel sampling\n"
" -scale geometry scale the image\n"
" -segment values segment an image\n"
" -selective-blur geometry\n"
" selectively blur pixels within a contrast threshold\n"
" -sepia-tone threshold\n"
" simulate a sepia-toned photo\n"
" -set property value set an image property\n"
" -shade degrees shade the image using a distant light source\n"
" -shadow geometry simulate an image shadow\n"
" -sharpen geometry sharpen the image\n"
" -shave geometry shave pixels from the image edges\n"
" -shear geometry slide one edge of the image along the X or Y axis\n"
" -sigmoidal-contrast geometry\n"
" increase the contrast without saturating highlights or\n"
" shadows\n"
" -sketch geometry simulate a pencil sketch\n"
" -solarize threshold negate all pixels above the threshold level\n"
" -sparse-color method args\n"
" fill in a image based on a few color points\n"
" -splice geometry splice the background color into the image\n"
" -spread radius displace image pixels by a random amount\n"
" -statistic type radius\n"
" replace each pixel with corresponding statistic from the neighborhood\n"
" -strip strip image of all profiles and comments\n"
" -swirl degrees swirl image pixels about the center\n"
" -threshold value threshold the image\n"
" -thumbnail geometry create a thumbnail of the image\n"
" -tile filename tile image when filling a graphic primitive\n"
" -tint value tint the image with the fill color\n"
" -transform affine transform image\n"
" -transparent color make this color transparent within the image\n"
" -transpose flip image vertically and rotate 90 degrees\n"
" -transverse flop image horizontally and rotate 270 degrees\n"
" -trim trim image edges\n"
" -type type image type\n"
" -unique-colors discard all but one of any pixel color\n"
" -unsharp geometry sharpen the image\n"
" -vignette geometry soften the edges of the image in vignette style\n"
" -wave geometry alter an image along a sine wave\n"
" -wavelet-denoise threshold\n"
" removes noise from the image using a wavelet transform\n"
" -white-threshold value\n"
" force all pixels above the threshold into white",
sequence_operators[] =
" -affinity filename transform image colors to match this set of colors\n"
" -append append an image sequence\n"
" -clut apply a color lookup table to the image\n"
" -coalesce merge a sequence of images\n"
" -combine combine a sequence of images\n"
" -compare mathematically and visually annotate the difference between an image and its reconstruction\n"
" -complex operator perform complex mathematics on an image sequence\n"
" -composite composite image\n"
" -copy geometry offset\n"
" copy pixels from one area of an image to another\n"
" -crop geometry cut out a rectangular region of the image\n"
" -deconstruct break down an image sequence into constituent parts\n"
" -evaluate-sequence operator\n"
" evaluate an arithmetic, relational, or logical expression\n"
" -flatten flatten a sequence of images\n"
" -fx expression apply mathematical expression to an image channel(s)\n"
" -hald-clut apply a Hald color lookup table to the image\n"
" -layers method optimize, merge, or compare image layers\n"
" -morph value morph an image sequence\n"
" -mosaic create a mosaic from an image sequence\n"
" -poly terms build a polynomial from the image sequence and the corresponding\n"
" terms (coefficients and degree pairs).\n"
" -print string interpret string and print to console\n"
" -process arguments process the image with a custom image filter\n"
" -separate separate an image channel into a grayscale image\n"
" -smush geometry smush an image sequence together\n"
" -write filename write images to this file",
settings[] =
" -adjoin join images into a single multi-image file\n"
" -affine matrix affine transform matrix\n"
" -alpha option activate, deactivate, reset, or set the alpha channel\n"
" -antialias remove pixel-aliasing\n"
" -authenticate password\n"
" decipher image with this password\n"
" -attenuate value lessen (or intensify) when adding noise to an image\n"
" -background color background color\n"
" -bias value add bias when convolving an image\n"
" -black-point-compensation\n"
" use black point compensation\n"
" -blue-primary point chromaticity blue primary point\n"
" -bordercolor color border color\n"
" -caption string assign a caption to an image\n"
" -cdl filename color correct with a color decision list\n"
" -channel type apply option to select image channels\n"
" -colors value preferred number of colors in the image\n"
" -colorspace type alternate image colorspace\n"
" -comment string annotate image with comment\n"
" -compose operator set image composite operator\n"
" -compress type type of pixel compression when writing the image\n"
" -decipher filename convert cipher pixels to plain pixels\n"
" -define format:option\n"
" define one or more image format options\n"
" -delay value display the next image after pausing\n"
" -density geometry horizontal and vertical density of the image\n"
" -depth value image depth\n"
" -direction type render text right-to-left or left-to-right\n"
" -display server get image or font from this X server\n"
" -dispose method layer disposal method\n"
" -dither method apply error diffusion to image\n"
" -encipher filename convert plain pixels to cipher pixels\n"
" -encoding type text encoding type\n"
" -endian type endianness (MSB or LSB) of the image\n"
" -family name render text with this font family\n"
" -features distance analyze image features (e.g. contrast, correlation)\n"
" -fill color color to use when filling a graphic primitive\n"
" -filter type use this filter when resizing an image\n"
" -flatten flatten a sequence of images\n"
" -font name render text with this font\n"
" -format \"string\" output formatted image characteristics\n"
" -function name apply a function to the image\n"
" -fuzz distance colors within this distance are considered equal\n"
" -gravity type horizontal and vertical text placement\n"
" -green-primary point chromaticity green primary point\n"
" -intensity method method to generate intensity value from pixel\n"
" -intent type type of rendering intent when managing the image color\n"
" -interlace type type of image interlacing scheme\n"
" -interline-spacing value\n"
" set the space between two text lines\n"
" -interpolate method pixel color interpolation method\n"
" -interword-spacing value\n"
" set the space between two words\n"
" -kerning value set the space between two letters\n"
" -label string assign a label to an image\n"
" -limit type value pixel cache resource limit\n"
" -loop iterations add Netscape loop extension to your GIF animation\n"
" -mask filename associate a mask with the image\n"
" -matte store matte channel if the image has one\n"
" -mattecolor color frame color\n"
" -monitor monitor progress\n"
" -morphology method kernel\n"
" apply a morphology method to the image\n"
" -orient type image orientation\n"
" -page geometry size and location of an image canvas (setting)\n"
" -path path write images to this path on disk\n"
" -ping efficiently determine image attributes\n"
" -pointsize value font point size\n"
" -precision value maximum number of significant digits to print\n"
" -preview type image preview type\n"
" -quality value JPEG/MIFF/PNG compression level\n"
" -quiet suppress all warning messages\n"
" -red-primary point chromaticity red primary point\n"
" -regard-warnings pay attention to warning messages\n"
" -remap filename transform image colors to match this set of colors\n"
" -repage geometry size and location of an image canvas\n"
" -respect-parentheses settings remain in effect until parenthesis boundary\n"
" -sampling-factor geometry\n"
" horizontal and vertical sampling factor\n"
" -scene value image scene number\n"
" -seed value seed a new sequence of pseudo-random numbers\n"
" -size geometry width and height of image\n"
" -stretch type render text with this font stretch\n"
" -stroke color graphic primitive stroke color\n"
" -strokewidth value graphic primitive stroke width\n"
" -style type render text with this font style\n"
" -synchronize synchronize image to storage device\n"
" -taint declare the image as modified\n"
" -texture filename name of texture to tile onto the image background\n"
" -tile-offset geometry\n"
" tile offset\n"
" -treedepth value color tree depth\n"
" -transparent-color color\n"
" transparent color\n"
" -undercolor color annotation bounding box color\n"
" -units type the units of image resolution\n"
" -verbose print detailed information about the image\n"
" -view FlashPix viewing transforms\n"
" -virtual-pixel method\n"
" virtual pixel access method\n"
" -weight type render text with this font weight\n"
" -white-point point chromaticity white point",
stack_operators[] =
" -delete indexes delete the image from the image sequence\n"
" -duplicate count,indexes\n"
" duplicate an image one or more times\n"
" -insert index insert last image into the image sequence\n"
" -reverse reverse image sequence\n"
" -swap indexes swap two images in the image sequence";
ListMagickVersion(stdout);
(void) printf("Usage: %s [options ...] file [ [options ...] file ...]\n",
GetClientName());
(void) printf("\nImage Settings:\n");
(void) puts(settings);
(void) printf("\nImage Operators:\n");
(void) puts(operators);
(void) printf("\nImage Sequence Operators:\n");
(void) puts(sequence_operators);
(void) printf("\nImage Stack Operators:\n");
(void) puts(stack_operators);
(void) printf("\nMiscellaneous Options:\n");
(void) puts(miscellaneous);
(void) printf(
"\nBy default, the image format of `file' is determined by its magic\n");
(void) printf(
"number. To specify a particular image format, precede the filename\n");
(void) printf(
"with an image format name and a colon (i.e. ps:image) or specify the\n");
(void) printf(
"image type as the filename suffix (i.e. image.ps). Specify 'file' as\n");
(void) printf("'-' for standard input or output.\n");
return(MagickFalse);
}
WandExport MagickBooleanType MogrifyImageCommand(ImageInfo *image_info,
int argc,char **argv,char **wand_unused(metadata),ExceptionInfo *exception)
{
#define DestroyMogrify() \
{ \
if (format != (char *) NULL) \
format=DestroyString(format); \
if (path != (char *) NULL) \
path=DestroyString(path); \
DestroyImageStack(); \
for (i=0; i < (ssize_t) argc; i++) \
argv[i]=DestroyString(argv[i]); \
argv=(char **) RelinquishMagickMemory(argv); \
}
#define ThrowMogrifyException(asperity,tag,option) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),asperity,tag,"`%s'", \
option); \
DestroyMogrify(); \
return(MagickFalse); \
}
#define ThrowMogrifyInvalidArgumentException(option,argument) \
{ \
(void) ThrowMagickException(exception,GetMagickModule(),OptionError, \
"InvalidArgument","`%s': %s",argument,option); \
DestroyMogrify(); \
return(MagickFalse); \
}
char
*format,
*option,
*path;
Image
*image;
ImageStack
image_stack[MaxImageStackDepth+1];
MagickBooleanType
global_colormap;
MagickBooleanType
fire,
pend,
respect_parenthesis;
MagickStatusType
status;
register ssize_t
i;
ssize_t
j,
k;
wand_unreferenced(metadata);
/*
Set defaults.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(exception != (ExceptionInfo *) NULL);
if (argc == 2)
{
option=argv[1];
if ((LocaleCompare("version",option+1) == 0) ||
(LocaleCompare("-version",option+1) == 0))
{
ListMagickVersion(stdout);
return(MagickTrue);
}
}
if (argc < 2)
return(MogrifyUsage());
format=(char *) NULL;
path=(char *) NULL;
global_colormap=MagickFalse;
k=0;
j=1;
NewImageStack();
option=(char *) NULL;
pend=MagickFalse;
respect_parenthesis=MagickFalse;
status=MagickTrue;
/*
Parse command line.
*/
ReadCommandlLine(argc,&argv);
status=ExpandFilenames(&argc,&argv);
if (status == MagickFalse)
ThrowMogrifyException(ResourceLimitError,"MemoryAllocationFailed",
GetExceptionMessage(errno));
for (i=1; i < (ssize_t) argc; i++)
{
option=argv[i];
if (LocaleCompare(option,"(") == 0)
{
FireImageStack(MagickFalse,MagickTrue,pend);
if (k == MaxImageStackDepth)
ThrowMogrifyException(OptionError,"ParenthesisNestedTooDeeply",
option);
PushImageStack();
continue;
}
if (LocaleCompare(option,")") == 0)
{
FireImageStack(MagickFalse,MagickTrue,MagickTrue);
if (k == 0)
ThrowMogrifyException(OptionError,"UnableToParseExpression",option);
PopImageStack();
continue;
}
if (IsCommandOption(option) == MagickFalse)
{
char
backup_filename[MaxTextExtent],
*filename;
Image
*images;
struct stat
properties;
/*
Option is a file name: begin by reading image from specified file.
*/
FireImageStack(MagickFalse,MagickFalse,pend);
filename=argv[i];
if ((LocaleCompare(filename,"--") == 0) && (i < (ssize_t) (argc-1)))
filename=argv[++i];
(void) SetImageOption(image_info,"filename",filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
images=ReadImages(image_info,exception);
status&=(images != (Image *) NULL) &&
(exception->severity < ErrorException);
if (images == (Image *) NULL)
continue;
properties=(*GetBlobProperties(images));
if (format != (char *) NULL)
(void) CopyMagickString(images->filename,images->magick_filename,
MaxTextExtent);
if (path != (char *) NULL)
{
GetPathComponent(option,TailPath,filename);
(void) FormatLocaleString(images->filename,MaxTextExtent,"%s%c%s",
path,*DirectorySeparator,filename);
}
if (format != (char *) NULL)
AppendImageFormat(format,images->filename);
AppendImageStack(images);
FinalizeImageSettings(image_info,image,MagickFalse);
if (global_colormap != MagickFalse)
{
QuantizeInfo
*quantize_info;
quantize_info=AcquireQuantizeInfo(image_info);
(void) RemapImages(quantize_info,images,(Image *) NULL);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
*backup_filename='\0';
if ((LocaleCompare(image->filename,"-") != 0) &&
(IsPathWritable(image->filename) != MagickFalse))
{
register ssize_t
i;
/*
Rename image file as backup.
*/
(void) CopyMagickString(backup_filename,image->filename,
MaxTextExtent);
for (i=0; i < 6; i++)
{
(void) ConcatenateMagickString(backup_filename,"~",MaxTextExtent);
if (IsPathAccessible(backup_filename) == MagickFalse)
break;
}
if ((IsPathAccessible(backup_filename) != MagickFalse) ||
(rename_utf8(image->filename,backup_filename) != 0))
*backup_filename='\0';
}
/*
Write transmogrified image to disk.
*/
image_info->synchronize=MagickTrue;
status&=WriteImages(image_info,image,image->filename,exception);
if (status != MagickFalse)
{
#if defined(MAGICKCORE_HAVE_UTIME)
{
MagickBooleanType
preserve_timestamp;
preserve_timestamp=IsStringTrue(GetImageOption(image_info,
"preserve-timestamp"));
if (preserve_timestamp != MagickFalse)
{
struct utimbuf
timestamp;
timestamp.actime=properties.st_atime;
timestamp.modtime=properties.st_mtime;
(void) utime(image->filename,×tamp);
}
}
#endif
if (*backup_filename != '\0')
(void) remove_utf8(backup_filename);
}
RemoveAllImageStack();
continue;
}
pend=image != (Image *) NULL ? MagickTrue : MagickFalse;
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adaptive-blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("adaptive-resize",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("adaptive-sharpen",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("affine",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("alpha",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickAlphaOptions,MagickFalse,argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,"UnrecognizedAlphaChannelType",
argv[i]);
break;
}
if (LocaleCompare("annotate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
i++;
break;
}
if (LocaleCompare("antialias",option+1) == 0)
break;
if (LocaleCompare("append",option+1) == 0)
break;
if (LocaleCompare("attenuate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("authenticate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("auto-gamma",option+1) == 0)
break;
if (LocaleCompare("auto-level",option+1) == 0)
break;
if (LocaleCompare("auto-orient",option+1) == 0)
break;
if (LocaleCompare("average",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'b':
{
if (LocaleCompare("background",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("bias",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("black-point-compensation",option+1) == 0)
break;
if (LocaleCompare("black-threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("blue-primary",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("blue-shift",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("border",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("brightness-contrast",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'c':
{
if (LocaleCompare("cache",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("canny",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("caption",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("channel",option+1) == 0)
{
ssize_t
channel;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
channel=ParseChannelOption(argv[i]);
if (channel < 0)
ThrowMogrifyException(OptionError,"UnrecognizedChannelType",
argv[i]);
break;
}
if (LocaleCompare("cdl",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("charcoal",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("chop",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("clamp",option+1) == 0)
break;
if (LocaleCompare("clip",option+1) == 0)
break;
if (LocaleCompare("clip-mask",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("clut",option+1) == 0)
break;
if (LocaleCompare("coalesce",option+1) == 0)
break;
if (LocaleCompare("colorize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("color-matrix",option+1) == 0)
{
KernelInfo
*kernel_info;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
kernel_info=AcquireKernelInfo(argv[i]);
if (kernel_info == (KernelInfo *) NULL)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("colors",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
ssize_t
colorspace;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,
argv[i]);
if (colorspace < 0)
ThrowMogrifyException(OptionError,"UnrecognizedColorspace",
argv[i]);
break;
}
if (LocaleCompare("combine",option+1) == 0)
{
if (*option == '-')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("comment",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("compare",option+1) == 0)
break;
if (LocaleCompare("complex",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickComplexOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedComplexOperator",
argv[i]);
break;
}
if (LocaleCompare("composite",option+1) == 0)
break;
if (LocaleCompare("compress",option+1) == 0)
{
ssize_t
compress;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
compress=ParseCommandOption(MagickCompressOptions,MagickFalse,
argv[i]);
if (compress < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageCompression",
argv[i]);
break;
}
if (LocaleCompare("concurrent",option+1) == 0)
break;
if (LocaleCompare("connected-components",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("contrast",option+1) == 0)
break;
if (LocaleCompare("contrast-stretch",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("convolve",option+1) == 0)
{
KernelInfo
*kernel_info;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
kernel_info=AcquireKernelInfo(argv[i]);
if (kernel_info == (KernelInfo *) NULL)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("copy",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("crop",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("cycle",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'd':
{
if (LocaleCompare("decipher",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("deconstruct",option+1) == 0)
break;
if (LocaleCompare("debug",option+1) == 0)
{
ssize_t
event;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
event=ParseCommandOption(MagickLogEventOptions,MagickFalse,argv[i]);
if (event < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEventType",
argv[i]);
(void) SetLogEventMask(argv[i]);
break;
}
if (LocaleCompare("define",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (*option == '+')
{
const char
*define;
define=GetImageOption(image_info,argv[i]);
if (define == (const char *) NULL)
ThrowMogrifyException(OptionError,"NoSuchOption",argv[i]);
break;
}
break;
}
if (LocaleCompare("delay",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("delete",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("density",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("deskew",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("despeckle",option+1) == 0)
break;
if (LocaleCompare("dft",option+1) == 0)
break;
if (LocaleCompare("direction",option+1) == 0)
{
ssize_t
direction;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
direction=ParseCommandOption(MagickDirectionOptions,MagickFalse,
argv[i]);
if (direction < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDirectionType",
argv[i]);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("dispose",option+1) == 0)
{
ssize_t
dispose;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
dispose=ParseCommandOption(MagickDisposeOptions,MagickFalse,
argv[i]);
if (dispose < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDisposeMethod",
argv[i]);
break;
}
if (LocaleCompare("distort",option+1) == 0)
{
ssize_t
op;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickDistortOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDistortMethod",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
ssize_t
method;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
method=ParseCommandOption(MagickDitherOptions,MagickFalse,argv[i]);
if (method < 0)
ThrowMogrifyException(OptionError,"UnrecognizedDitherMethod",
argv[i]);
break;
}
if (LocaleCompare("draw",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("duplicate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("duration",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'e':
{
if (LocaleCompare("edge",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("emboss",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("encipher",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("encoding",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("endian",option+1) == 0)
{
ssize_t
endian;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
endian=ParseCommandOption(MagickEndianOptions,MagickFalse,argv[i]);
if (endian < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEndianType",
argv[i]);
break;
}
if (LocaleCompare("enhance",option+1) == 0)
break;
if (LocaleCompare("equalize",option+1) == 0)
break;
if (LocaleCompare("evaluate",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("evaluate-sequence",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickEvaluateOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedEvaluateOperator",
argv[i]);
break;
}
if (LocaleCompare("extent",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("extract",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("features",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("filter",option+1) == 0)
{
ssize_t
filter;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
filter=ParseCommandOption(MagickFilterOptions,MagickFalse,argv[i]);
if (filter < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageFilter",
argv[i]);
break;
}
if (LocaleCompare("flatten",option+1) == 0)
break;
if (LocaleCompare("flip",option+1) == 0)
break;
if (LocaleCompare("flop",option+1) == 0)
break;
if (LocaleCompare("floodfill",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
(void) CopyMagickString(argv[i]+1,"sans",MaxTextExtent);
(void) CloneString(&format,(char *) NULL);
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
(void) CloneString(&format,argv[i]);
(void) CopyMagickString(image_info->filename,format,MaxTextExtent);
(void) ConcatenateMagickString(image_info->filename,":",
MaxTextExtent);
(void) SetImageInfo(image_info,0,exception);
if (*image_info->magick == '\0')
ThrowMogrifyException(OptionError,"UnrecognizedImageFormat",
format);
break;
}
if (LocaleCompare("frame",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("function",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickFunctionOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedFunction",argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("fuzz",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("fx",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'g':
{
if (LocaleCompare("gamma",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if ((LocaleCompare("gaussian-blur",option+1) == 0) ||
(LocaleCompare("gaussian",option+1) == 0))
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("geometry",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("gravity",option+1) == 0)
{
ssize_t
gravity;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
gravity=ParseCommandOption(MagickGravityOptions,MagickFalse,
argv[i]);
if (gravity < 0)
ThrowMogrifyException(OptionError,"UnrecognizedGravityType",
argv[i]);
break;
}
if (LocaleCompare("grayscale",option+1) == 0)
{
ssize_t
method;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
method=ParseCommandOption(MagickPixelIntensityOptions,MagickFalse,
argv[i]);
if (method < 0)
ThrowMogrifyException(OptionError,"UnrecognizedIntensityMethod",
argv[i]);
break;
}
if (LocaleCompare("green-primary",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'h':
{
if (LocaleCompare("hald-clut",option+1) == 0)
break;
if (LocaleCompare("hough-lines",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if ((LocaleCompare("help",option+1) == 0) ||
(LocaleCompare("-help",option+1) == 0))
return(MogrifyUsage());
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'i':
{
if (LocaleCompare("identify",option+1) == 0)
break;
if (LocaleCompare("idft",option+1) == 0)
break;
if (LocaleCompare("implode",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("intensity",option+1) == 0)
{
ssize_t
intensity;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
intensity=ParseCommandOption(MagickPixelIntensityOptions,
MagickFalse,argv[i]);
if (intensity < 0)
ThrowMogrifyException(OptionError,
"UnrecognizedPixelIntensityMethod",argv[i]);
break;
}
if (LocaleCompare("intent",option+1) == 0)
{
ssize_t
intent;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
intent=ParseCommandOption(MagickIntentOptions,MagickFalse,argv[i]);
if (intent < 0)
ThrowMogrifyException(OptionError,"UnrecognizedIntentType",
argv[i]);
break;
}
if (LocaleCompare("interlace",option+1) == 0)
{
ssize_t
interlace;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
interlace=ParseCommandOption(MagickInterlaceOptions,MagickFalse,
argv[i]);
if (interlace < 0)
ThrowMogrifyException(OptionError,"UnrecognizedInterlaceType",
argv[i]);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
ssize_t
interpolate;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
interpolate=ParseCommandOption(MagickInterpolateOptions,MagickFalse,
argv[i]);
if (interpolate < 0)
ThrowMogrifyException(OptionError,"UnrecognizedInterpolateMethod",
argv[i]);
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("kuwahara",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'l':
{
if (LocaleCompare("label",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("lat",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
}
if (LocaleCompare("layers",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickLayerOptions,MagickFalse,argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,"UnrecognizedLayerMethod",
argv[i]);
break;
}
if (LocaleCompare("level",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("level-colors",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("linewidth",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("limit",option+1) == 0)
{
char
*p;
double
value;
ssize_t
resource;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
resource=ParseCommandOption(MagickResourceOptions,MagickFalse,
argv[i]);
if (resource < 0)
ThrowMogrifyException(OptionError,"UnrecognizedResourceType",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
value=StringToDouble(argv[i],&p);
(void) value;
if ((p == argv[i]) && (LocaleCompare("unlimited",argv[i]) != 0))
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("liquid-rescale",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("list",option+1) == 0)
{
ssize_t
list;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i]);
if (list < 0)
ThrowMogrifyException(OptionError,"UnrecognizedListType",argv[i]);
status=MogrifyImageInfo(image_info,(int) (i-j+1),(const char **)
argv+j,exception);
return(status == 0 ? MagickFalse : MagickTrue);
}
if (LocaleCompare("local-contrast",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("log",option+1) == 0)
{
if (*option == '+')
break;
i++;
if ((i == (ssize_t) argc) ||
(strchr(argv[i],'%') == (char *) NULL))
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("loop",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'm':
{
if (LocaleCompare("magnify",option+1) == 0)
break;
if (LocaleCompare("map",option+1) == 0)
{
global_colormap=(*option == '+') ? MagickTrue : MagickFalse;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("mask",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("matte",option+1) == 0)
break;
if (LocaleCompare("mattecolor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickMetricOptions,MagickTrue,argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,"UnrecognizedMetricType",
argv[i]);
break;
}
if (LocaleCompare("maximum",option+1) == 0)
break;
if (LocaleCompare("mean-shift",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("median",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("minimum",option+1) == 0)
break;
if (LocaleCompare("modulate",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("mode",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("monitor",option+1) == 0)
break;
if (LocaleCompare("monochrome",option+1) == 0)
break;
if (LocaleCompare("morph",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("morphology",option+1) == 0)
{
char
token[MaxTextExtent];
KernelInfo
*kernel_info;
ssize_t
op;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
GetNextToken(argv[i],(const char **) NULL,MaxTextExtent,token);
op=ParseCommandOption(MagickMorphologyOptions,MagickFalse,token);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedMorphologyMethod",
token);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
kernel_info=AcquireKernelInfo(argv[i]);
if (kernel_info == (KernelInfo *) NULL)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
kernel_info=DestroyKernelInfo(kernel_info);
break;
}
if (LocaleCompare("mosaic",option+1) == 0)
break;
if (LocaleCompare("motion-blur",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'n':
{
if (LocaleCompare("negate",option+1) == 0)
break;
if (LocaleCompare("noise",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (*option == '+')
{
ssize_t
noise;
noise=ParseCommandOption(MagickNoiseOptions,MagickFalse,argv[i]);
if (noise < 0)
ThrowMogrifyException(OptionError,"UnrecognizedNoiseType",
argv[i]);
break;
}
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("noop",option+1) == 0)
break;
if (LocaleCompare("normalize",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'o':
{
if (LocaleCompare("opaque",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("ordered-dither",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("orient",option+1) == 0)
{
ssize_t
orientation;
orientation=UndefinedOrientation;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
orientation=ParseCommandOption(MagickOrientationOptions,MagickFalse,
argv[i]);
if (orientation < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageOrientation",
argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'p':
{
if (LocaleCompare("page",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("paint",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("path",option+1) == 0)
{
(void) CloneString(&path,(char *) NULL);
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
(void) CloneString(&path,argv[i]);
break;
}
if (LocaleCompare("perceptible",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("polaroid",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("poly",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("posterize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("precision",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("print",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("process",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("profile",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'q':
{
if (LocaleCompare("quality",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("quantize",option+1) == 0)
{
ssize_t
colorspace;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,
argv[i]);
if (colorspace < 0)
ThrowMogrifyException(OptionError,"UnrecognizedColorspace",
argv[i]);
break;
}
if (LocaleCompare("quiet",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'r':
{
if (LocaleCompare("radial-blur",option+1) == 0 ||
LocaleCompare("rotational-blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("raise",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("random-threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("recolor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("red-primary",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
}
if (LocaleCompare("regard-warnings",option+1) == 0)
break;
if (LocaleCompare("region",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("remap",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("render",option+1) == 0)
break;
if (LocaleCompare("repage",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("resample",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("resize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleNCompare("respect-parentheses",option+1,17) == 0)
{
respect_parenthesis=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("reverse",option+1) == 0)
break;
if (LocaleCompare("roll",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("rotate",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 's':
{
if (LocaleCompare("sample",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sampling-factor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("scale",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("scene",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("seed",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("segment",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("selective-blur",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("separate",option+1) == 0)
break;
if (LocaleCompare("sepia-tone",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("set",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("shade",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("shadow",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sharpen",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("shave",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("shear",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sigmoidal-contrast",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("size",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sketch",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("smush",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
i++;
break;
}
if (LocaleCompare("solarize",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("sparse-color",option+1) == 0)
{
ssize_t
op;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickSparseColorOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedSparseColorMethod",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("splice",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("spread",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("statistic",option+1) == 0)
{
ssize_t
op;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
op=ParseCommandOption(MagickStatisticOptions,MagickFalse,argv[i]);
if (op < 0)
ThrowMogrifyException(OptionError,"UnrecognizedStatisticType",
argv[i]);
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("stretch",option+1) == 0)
{
ssize_t
stretch;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
stretch=ParseCommandOption(MagickStretchOptions,MagickFalse,argv[i]);
if (stretch < 0)
ThrowMogrifyException(OptionError,"UnrecognizedStyleType",
argv[i]);
break;
}
if (LocaleCompare("strip",option+1) == 0)
break;
if (LocaleCompare("stroke",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
ssize_t
style;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
style=ParseCommandOption(MagickStyleOptions,MagickFalse,argv[i]);
if (style < 0)
ThrowMogrifyException(OptionError,"UnrecognizedStyleType",
argv[i]);
break;
}
if (LocaleCompare("swap",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("swirl",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("synchronize",option+1) == 0)
break;
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 't':
{
if (LocaleCompare("taint",option+1) == 0)
break;
if (LocaleCompare("texture",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("tile",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("tile-offset",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("tint",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("transform",option+1) == 0)
break;
if (LocaleCompare("transpose",option+1) == 0)
break;
if (LocaleCompare("transverse",option+1) == 0)
break;
if (LocaleCompare("threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("thumbnail",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("transparent",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("transparent-color",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("treedepth",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("trim",option+1) == 0)
break;
if (LocaleCompare("type",option+1) == 0)
{
ssize_t
type;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
type=ParseCommandOption(MagickTypeOptions,MagickFalse,argv[i]);
if (type < 0)
ThrowMogrifyException(OptionError,"UnrecognizedImageType",
argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("unique-colors",option+1) == 0)
break;
if (LocaleCompare("units",option+1) == 0)
{
ssize_t
units;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
units=ParseCommandOption(MagickResolutionOptions,MagickFalse,
argv[i]);
if (units < 0)
ThrowMogrifyException(OptionError,"UnrecognizedUnitsType",
argv[i]);
break;
}
if (LocaleCompare("unsharp",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
image_info->verbose=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if ((LocaleCompare("version",option+1) == 0) ||
(LocaleCompare("-version",option+1) == 0))
{
ListMagickVersion(stdout);
break;
}
if (LocaleCompare("view",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("vignette",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
ssize_t
method;
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
method=ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,
argv[i]);
if (method < 0)
ThrowMogrifyException(OptionError,
"UnrecognizedVirtualPixelMethod",argv[i]);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case 'w':
{
if (LocaleCompare("wave",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("wavelet-denoise",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("weight",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
if (LocaleCompare("white-point",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("white-threshold",option+1) == 0)
{
if (*option == '+')
break;
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
if (IsGeometry(argv[i]) == MagickFalse)
ThrowMogrifyInvalidArgumentException(option,argv[i]);
break;
}
if (LocaleCompare("write",option+1) == 0)
{
i++;
if (i == (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingArgument",option);
break;
}
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
case '?':
break;
default:
ThrowMogrifyException(OptionError,"UnrecognizedOption",option)
}
fire=(GetCommandOptionFlags(MagickCommandOptions,MagickFalse,option) &
FireOptionFlag) == 0 ? MagickFalse : MagickTrue;
if (fire != MagickFalse)
FireImageStack(MagickFalse,MagickTrue,MagickTrue);
}
if (k != 0)
ThrowMogrifyException(OptionError,"UnbalancedParenthesis",argv[i]);
if (i != (ssize_t) argc)
ThrowMogrifyException(OptionError,"MissingAnImageFilename",argv[i]);
DestroyMogrify();
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImageInfo() applies image processing settings to the image as
% prescribed by command line options.
%
% The format of the MogrifyImageInfo method is:
%
% MagickBooleanType MogrifyImageInfo(ImageInfo *image_info,const int argc,
% const char **argv,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MogrifyImageInfo(ImageInfo *image_info,
const int argc,const char **argv,ExceptionInfo *exception)
{
const char
*option;
GeometryInfo
geometry_info;
ssize_t
count;
register ssize_t
i;
/*
Initialize method variables.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (argc < 0)
return(MagickTrue);
/*
Set the image settings.
*/
for (i=0; i < (ssize_t) argc; i++)
{
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=ParseCommandOption(MagickCommandOptions,MagickFalse,option);
count=MagickMax(count,0L);
if ((i+count) >= (ssize_t) argc)
break;
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("adjoin",option+1) == 0)
{
image_info->adjoin=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("antialias",option+1) == 0)
{
image_info->antialias=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("attenuate",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("authenticate",option+1) == 0)
{
if (*option == '+')
(void) CloneString(&image_info->authenticate,(char *) NULL);
else
(void) CloneString(&image_info->authenticate,argv[i+1]);
break;
}
break;
}
case 'b':
{
if (LocaleCompare("background",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) QueryColorDatabase(MogrifyBackgroundColor,
&image_info->background_color,exception);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorDatabase(argv[i+1],&image_info->background_color,
exception);
break;
}
if (LocaleCompare("bias",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("black-point-compensation",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("blue-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("bordercolor",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) QueryColorDatabase(MogrifyBorderColor,
&image_info->border_color,exception);
break;
}
(void) QueryColorDatabase(argv[i+1],&image_info->border_color,
exception);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("box",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,"undercolor","none");
break;
}
(void) SetImageOption(image_info,"undercolor",argv[i+1]);
break;
}
break;
}
case 'c':
{
if (LocaleCompare("cache",option+1) == 0)
{
MagickSizeType
limit;
limit=MagickResourceInfinity;
if (LocaleCompare("unlimited",argv[i+1]) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+1],100.0);
(void) SetMagickResourceLimit(MemoryResource,limit);
(void) SetMagickResourceLimit(MapResource,2*limit);
break;
}
if (LocaleCompare("caption",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("channel",option+1) == 0)
{
if (*option == '+')
{
image_info->channel=DefaultChannels;
break;
}
image_info->channel=(ChannelType) ParseChannelOption(argv[i+1]);
break;
}
if (LocaleCompare("colors",option+1) == 0)
{
image_info->colors=StringToUnsignedLong(argv[i+1]);
break;
}
if (LocaleCompare("colorspace",option+1) == 0)
{
if (*option == '+')
{
image_info->colorspace=UndefinedColorspace;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->colorspace=(ColorspaceType) ParseCommandOption(
MagickColorspaceOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("comment",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("compose",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("compress",option+1) == 0)
{
if (*option == '+')
{
image_info->compression=UndefinedCompression;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("debug",option+1) == 0)
{
if (*option == '+')
(void) SetLogEventMask("none");
else
(void) SetLogEventMask(argv[i+1]);
image_info->debug=IsEventLogging();
break;
}
if (LocaleCompare("define",option+1) == 0)
{
if (*option == '+')
{
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
(void) DeleteImageRegistry(argv[i+1]+9);
else
(void) DeleteImageOption(image_info,argv[i+1]);
break;
}
if (LocaleNCompare(argv[i+1],"registry:",9) == 0)
{
(void) DefineImageRegistry(StringRegistryType,argv[i+1]+9,
exception);
break;
}
(void) DefineImageOption(image_info,argv[i+1]);
break;
}
if (LocaleCompare("delay",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("density",option+1) == 0)
{
/*
Set image density.
*/
if (*option == '+')
{
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
(void) SetImageOption(image_info,option+1,"72");
break;
}
(void) CloneString(&image_info->density,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("depth",option+1) == 0)
{
if (*option == '+')
{
image_info->depth=MAGICKCORE_QUANTUM_DEPTH;
break;
}
image_info->depth=StringToUnsignedLong(argv[i+1]);
break;
}
if (LocaleCompare("direction",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("display",option+1) == 0)
{
if (*option == '+')
{
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
break;
}
(void) CloneString(&image_info->server_name,argv[i+1]);
break;
}
if (LocaleCompare("dispose",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
image_info->dither=MagickFalse;
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
image_info->dither=MagickTrue;
break;
}
break;
}
case 'e':
{
if (LocaleCompare("encoding",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("endian",option+1) == 0)
{
if (*option == '+')
{
image_info->endian=UndefinedEndian;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->endian=(EndianType) ParseCommandOption(
MagickEndianOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("extract",option+1) == 0)
{
/*
Set image extract geometry.
*/
if (*option == '+')
{
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
break;
}
(void) CloneString(&image_info->extract,argv[i+1]);
break;
}
break;
}
case 'f':
{
if (LocaleCompare("family",option+1) == 0)
{
if (*option != '+')
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("fill",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("filter",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("font",option+1) == 0)
{
if (*option == '+')
{
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
break;
}
(void) CloneString(&image_info->font,argv[i+1]);
break;
}
if (LocaleCompare("format",option+1) == 0)
{
register const char
*q;
for (q=strchr(argv[i+1],'%'); q != (char *) NULL; q=strchr(q+1,'%'))
if (strchr("Agkrz@[#",*(q+1)) != (char *) NULL)
image_info->ping=MagickFalse;
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("fuzz",option+1) == 0)
{
if (*option == '+')
{
image_info->fuzz=0.0;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->fuzz=StringToDoubleInterval(argv[i+1],(double)
QuantumRange+1.0);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'g':
{
if (LocaleCompare("gravity",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("green-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'i':
{
if (LocaleCompare("intensity",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("intent",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interlace",option+1) == 0)
{
if (*option == '+')
{
image_info->interlace=UndefinedInterlace;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->interlace=(InterlaceType) ParseCommandOption(
MagickInterlaceOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interline-spacing",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interpolate",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("interword-spacing",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'k':
{
if (LocaleCompare("kerning",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("label",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("limit",option+1) == 0)
{
MagickSizeType
limit;
ResourceType
type;
if (*option == '+')
break;
type=(ResourceType) ParseCommandOption(MagickResourceOptions,
MagickFalse,argv[i+1]);
limit=MagickResourceInfinity;
if (LocaleCompare("unlimited",argv[i+2]) != 0)
limit=(MagickSizeType) SiPrefixToDoubleInterval(argv[i+2],100.0);
(void) SetMagickResourceLimit(type,limit);
break;
}
if (LocaleCompare("list",option+1) == 0)
{
ssize_t
list;
/*
Display configuration list.
*/
list=ParseCommandOption(MagickListOptions,MagickFalse,argv[i+1]);
switch (list)
{
case MagickCoderOptions:
{
(void) ListCoderInfo((FILE *) NULL,exception);
break;
}
case MagickColorOptions:
{
(void) ListColorInfo((FILE *) NULL,exception);
break;
}
case MagickConfigureOptions:
{
(void) ListConfigureInfo((FILE *) NULL,exception);
break;
}
case MagickDelegateOptions:
{
(void) ListDelegateInfo((FILE *) NULL,exception);
break;
}
case MagickFontOptions:
{
(void) ListTypeInfo((FILE *) NULL,exception);
break;
}
case MagickFormatOptions:
{
(void) ListMagickInfo((FILE *) NULL,exception);
break;
}
case MagickLocaleOptions:
{
(void) ListLocaleInfo((FILE *) NULL,exception);
break;
}
case MagickLogOptions:
{
(void) ListLogInfo((FILE *) NULL,exception);
break;
}
case MagickMagicOptions:
{
(void) ListMagicInfo((FILE *) NULL,exception);
break;
}
case MagickMimeOptions:
{
(void) ListMimeInfo((FILE *) NULL,exception);
break;
}
case MagickModuleOptions:
{
(void) ListModuleInfo((FILE *) NULL,exception);
break;
}
case MagickPolicyOptions:
{
(void) ListPolicyInfo((FILE *) NULL,exception);
break;
}
case MagickResourceOptions:
{
(void) ListMagickResourceInfo((FILE *) NULL,exception);
break;
}
case MagickThresholdOptions:
{
(void) ListThresholdMaps((FILE *) NULL,exception);
break;
}
default:
{
(void) ListCommandOptions((FILE *) NULL,(CommandOption) list,
exception);
break;
}
}
break;
}
if (LocaleCompare("log",option+1) == 0)
{
if (*option == '+')
break;
(void) SetLogFormat(argv[i+1]);
break;
}
if (LocaleCompare("loop",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'm':
{
if (LocaleCompare("matte",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("mattecolor",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorDatabase(MogrifyMatteColor,
&image_info->matte_color,exception);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
(void) QueryColorDatabase(argv[i+1],&image_info->matte_color,
exception);
break;
}
if (LocaleCompare("metric",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("monitor",option+1) == 0)
{
(void) SetImageInfoProgressMonitor(image_info,MonitorProgress,
(void *) NULL);
break;
}
if (LocaleCompare("monochrome",option+1) == 0)
{
image_info->monochrome=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
break;
}
case 'o':
{
if (LocaleCompare("orient",option+1) == 0)
{
if (*option == '+')
{
image_info->orientation=UndefinedOrientation;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
}
case 'p':
{
if (LocaleCompare("page",option+1) == 0)
{
char
*canonical_page,
page[MaxTextExtent];
const char
*image_option;
MagickStatusType
flags;
RectangleInfo
geometry;
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
(void) CloneString(&image_info->page,(char *) NULL);
break;
}
(void) memset(&geometry,0,sizeof(geometry));
image_option=GetImageOption(image_info,"page");
if (image_option != (const char *) NULL)
(void) ParseAbsoluteGeometry(image_option,&geometry);
canonical_page=GetPageGeometry(argv[i+1]);
flags=ParseAbsoluteGeometry(canonical_page,&geometry);
canonical_page=DestroyString(canonical_page);
(void) FormatLocaleString(page,MaxTextExtent,"%lux%lu",
(unsigned long) geometry.width,(unsigned long) geometry.height);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
(void) FormatLocaleString(page,MaxTextExtent,"%lux%lu%+ld%+ld",
(unsigned long) geometry.width,(unsigned long) geometry.height,
(long) geometry.x,(long) geometry.y);
(void) SetImageOption(image_info,option+1,page);
(void) CloneString(&image_info->page,page);
break;
}
if (LocaleCompare("pen",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("ping",option+1) == 0)
{
image_info->ping=(*option == '-') ? MagickTrue : MagickFalse;
break;
}
if (LocaleCompare("pointsize",option+1) == 0)
{
if (*option == '+')
geometry_info.rho=0.0;
else
(void) ParseGeometry(argv[i+1],&geometry_info);
image_info->pointsize=geometry_info.rho;
break;
}
if (LocaleCompare("precision",option+1) == 0)
{
(void) SetMagickPrecision(StringToInteger(argv[i+1]));
break;
}
if (LocaleCompare("preview",option+1) == 0)
{
/*
Preview image.
*/
if (*option == '+')
{
image_info->preview_type=UndefinedPreview;
break;
}
image_info->preview_type=(PreviewType) ParseCommandOption(
MagickPreviewOptions,MagickFalse,argv[i+1]);
break;
}
break;
}
case 'q':
{
if (LocaleCompare("quality",option+1) == 0)
{
/*
Set image compression quality.
*/
if (*option == '+')
{
image_info->quality=UndefinedCompressionQuality;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->quality=StringToUnsignedLong(argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("quiet",option+1) == 0)
{
static WarningHandler
warning_handler = (WarningHandler) NULL;
if (*option == '+')
{
/*
Restore error or warning messages.
*/
warning_handler=SetWarningHandler(warning_handler);
break;
}
/*
Suppress error or warning messages.
*/
warning_handler=SetWarningHandler((WarningHandler) NULL);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("red-primary",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 's':
{
if (LocaleCompare("sampling-factor",option+1) == 0)
{
/*
Set image sampling factor.
*/
if (*option == '+')
{
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
break;
}
(void) CloneString(&image_info->sampling_factor,argv[i+1]);
break;
}
if (LocaleCompare("scene",option+1) == 0)
{
/*
Set image scene.
*/
if (*option == '+')
{
image_info->scene=0;
(void) SetImageOption(image_info,option+1,"0");
break;
}
image_info->scene=StringToUnsignedLong(argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("seed",option+1) == 0)
{
unsigned long
seed;
if (*option == '+')
{
seed=(unsigned long) time((time_t *) NULL);
SetRandomSecretKey(seed);
break;
}
seed=StringToUnsignedLong(argv[i+1]);
SetRandomSecretKey(seed);
break;
}
if (LocaleCompare("size",option+1) == 0)
{
if (*option == '+')
{
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
break;
}
(void) CloneString(&image_info->size,argv[i+1]);
break;
}
if (LocaleCompare("stroke",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"none");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("strokewidth",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("style",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"none");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("synchronize",option+1) == 0)
{
if (*option == '+')
{
image_info->synchronize=MagickFalse;
break;
}
image_info->synchronize=MagickTrue;
break;
}
break;
}
case 't':
{
if (LocaleCompare("taint",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"false");
break;
}
(void) SetImageOption(image_info,option+1,"true");
break;
}
if (LocaleCompare("texture",option+1) == 0)
{
if (*option == '+')
{
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
break;
}
(void) CloneString(&image_info->texture,argv[i+1]);
break;
}
if (LocaleCompare("tile-offset",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("transparent-color",option+1) == 0)
{
if (*option == '+')
{
(void) QueryColorDatabase("none",&image_info->transparent_color, exception);
(void) SetImageOption(image_info,option+1,"none");
break;
}
(void) QueryColorDatabase(argv[i+1],&image_info->transparent_color,
exception);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("type",option+1) == 0)
{
if (*option == '+')
{
image_info->type=UndefinedType;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->type=(ImageType) ParseCommandOption(MagickTypeOptions,
MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'u':
{
if (LocaleCompare("undercolor",option+1) == 0)
{
if (*option == '+')
{
(void) DeleteImageOption(image_info,option+1);
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("units",option+1) == 0)
{
if (*option == '+')
{
image_info->units=UndefinedResolution;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->units=(ResolutionType) ParseCommandOption(
MagickResolutionOptions,MagickFalse,argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'v':
{
if (LocaleCompare("verbose",option+1) == 0)
{
if (*option == '+')
{
image_info->verbose=MagickFalse;
break;
}
image_info->verbose=MagickTrue;
image_info->ping=MagickFalse;
break;
}
if (LocaleCompare("view",option+1) == 0)
{
if (*option == '+')
{
if (image_info->view != (char *) NULL)
image_info->view=DestroyString(image_info->view);
break;
}
(void) CloneString(&image_info->view,argv[i+1]);
break;
}
if (LocaleCompare("virtual-pixel",option+1) == 0)
{
if (*option == '+')
{
image_info->virtual_pixel_method=UndefinedVirtualPixelMethod;
(void) SetImageOption(image_info,option+1,"undefined");
break;
}
image_info->virtual_pixel_method=(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,
argv[i+1]);
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("weight",option+1) == 0)
{
if (*option == '+')
(void) SetImageOption(image_info,option+1,"0");
else
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
if (LocaleCompare("white-point",option+1) == 0)
{
if (*option == '+')
{
(void) SetImageOption(image_info,option+1,"0.0");
break;
}
(void) SetImageOption(image_info,option+1,argv[i+1]);
break;
}
break;
}
default:
break;
}
i+=count;
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e L i s t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImageList() applies any command line options that might affect the
% entire image list (e.g. -append, -coalesce, etc.).
%
% The format of the MogrifyImage method is:
%
% MagickBooleanType MogrifyImageList(ImageInfo *image_info,const int argc,
% const char **argv,Image **images,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o images: pointer to pointer of the first image in image list.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MogrifyImageList(ImageInfo *image_info,
const int argc,const char **argv,Image **images,ExceptionInfo *exception)
{
ChannelType
channel;
const char
*option;
ImageInfo
*mogrify_info;
MagickStatusType
status;
QuantizeInfo
*quantize_info;
register ssize_t
i;
ssize_t
count,
index;
/*
Apply options to the image list.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image **) NULL);
assert((*images)->previous == (Image *) NULL);
assert((*images)->signature == MagickCoreSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
if ((argc <= 0) || (*argv == (char *) NULL))
return(MagickTrue);
mogrify_info=CloneImageInfo(image_info);
quantize_info=AcquireQuantizeInfo(mogrify_info);
channel=mogrify_info->channel;
status=MagickTrue;
for (i=0; i < (ssize_t) argc; i++)
{
if (*images == (Image *) NULL)
break;
option=argv[i];
if (IsCommandOption(option) == MagickFalse)
continue;
count=ParseCommandOption(MagickCommandOptions,MagickFalse,option);
count=MagickMax(count,0L);
if ((i+count) >= (ssize_t) argc)
break;
status=MogrifyImageInfo(mogrify_info,(int) count+1,argv+i,exception);
switch (*(option+1))
{
case 'a':
{
if (LocaleCompare("affinity",option+1) == 0)
{
(void) SyncImagesSettings(mogrify_info,*images);
if (*option == '+')
{
(void) RemapImages(quantize_info,*images,(Image *) NULL);
InheritException(exception,&(*images)->exception);
break;
}
i++;
break;
}
if (LocaleCompare("append",option+1) == 0)
{
Image
*append_image;
(void) SyncImagesSettings(mogrify_info,*images);
append_image=AppendImages(*images,*option == '-' ? MagickTrue :
MagickFalse,exception);
if (append_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=append_image;
break;
}
if (LocaleCompare("average",option+1) == 0)
{
Image
*average_image;
/*
Average an image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images);
average_image=EvaluateImages(*images,MeanEvaluateOperator,
exception);
if (average_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=average_image;
break;
}
break;
}
case 'c':
{
if (LocaleCompare("channel",option+1) == 0)
{
if (*option == '+')
{
channel=DefaultChannels;
break;
}
channel=(ChannelType) ParseChannelOption(argv[i+1]);
break;
}
if (LocaleCompare("clut",option+1) == 0)
{
Image
*clut_image,
*image;
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
clut_image=RemoveFirstImageFromList(images);
if (clut_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ImageSequenceRequired","`%s'",option);
image=DestroyImage(image);
status=MagickFalse;
break;
}
(void) ClutImageChannel(image,channel,clut_image);
clut_image=DestroyImage(clut_image);
InheritException(exception,&image->exception);
*images=DestroyImageList(*images);
*images=image;
break;
}
if (LocaleCompare("coalesce",option+1) == 0)
{
Image
*coalesce_image;
(void) SyncImagesSettings(mogrify_info,*images);
coalesce_image=CoalesceImages(*images,exception);
if (coalesce_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=coalesce_image;
break;
}
if (LocaleCompare("combine",option+1) == 0)
{
Image
*combine_image;
(void) SyncImagesSettings(mogrify_info,*images);
combine_image=CombineImages(*images,channel,exception);
if (combine_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=combine_image;
break;
}
if (LocaleCompare("compare",option+1) == 0)
{
double
distortion;
Image
*difference_image,
*image,
*reconstruct_image;
MetricType
metric;
/*
Mathematically and visually annotate the difference between an
image and its reconstruction.
*/
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
reconstruct_image=RemoveFirstImageFromList(images);
if (reconstruct_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ImageSequenceRequired","`%s'",option);
image=DestroyImage(image);
status=MagickFalse;
break;
}
metric=UndefinedMetric;
option=GetImageOption(image_info,"metric");
if (option != (const char *) NULL)
metric=(MetricType) ParseCommandOption(MagickMetricOptions,
MagickFalse,option);
difference_image=CompareImageChannels(image,reconstruct_image,
channel,metric,&distortion,exception);
if (difference_image == (Image *) NULL)
break;
reconstruct_image=DestroyImage(reconstruct_image);
image=DestroyImage(image);
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=difference_image;
break;
}
if (LocaleCompare("complex",option+1) == 0)
{
ComplexOperator
op;
Image
*complex_images;
(void) SyncImageSettings(mogrify_info,*images);
op=(ComplexOperator) ParseCommandOption(MagickComplexOptions,
MagickFalse,argv[i+1]);
complex_images=ComplexImages(*images,op,exception);
if (complex_images == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=complex_images;
break;
}
if (LocaleCompare("composite",option+1) == 0)
{
Image
*mask_image,
*composite_image,
*image;
RectangleInfo
geometry;
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
composite_image=RemoveFirstImageFromList(images);
if (composite_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ImageSequenceRequired","`%s'",option);
image=DestroyImage(image);
status=MagickFalse;
break;
}
(void) TransformImage(&composite_image,(char *) NULL,
composite_image->geometry);
SetGeometry(composite_image,&geometry);
(void) ParseAbsoluteGeometry(composite_image->geometry,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,
&geometry);
mask_image=RemoveFirstImageFromList(images);
if (mask_image != (Image *) NULL)
{
if ((image->compose == DisplaceCompositeOp) ||
(image->compose == DistortCompositeOp))
{
/*
Merge Y displacement into X displacement image.
*/
(void) CompositeImage(composite_image,CopyGreenCompositeOp,
mask_image,0,0);
mask_image=DestroyImage(mask_image);
}
else
{
/*
Set a blending mask for the composition.
*/
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
image->mask=mask_image;
(void) NegateImage(image->mask,MagickFalse);
}
}
(void) CompositeImageChannel(image,channel,image->compose,
composite_image,geometry.x,geometry.y);
if (mask_image != (Image *) NULL)
{
image->mask=DestroyImage(image->mask);
mask_image=image->mask;
}
composite_image=DestroyImage(composite_image);
InheritException(exception,&image->exception);
*images=DestroyImageList(*images);
*images=image;
break;
}
if (LocaleCompare("copy",option+1) == 0)
{
Image
*source_image;
OffsetInfo
offset;
RectangleInfo
geometry;
/*
Copy image pixels.
*/
(void) SyncImageSettings(mogrify_info,*images);
(void) ParsePageGeometry(*images,argv[i+2],&geometry,exception);
offset.x=geometry.x;
offset.y=geometry.y;
source_image=(*images);
if (source_image->next != (Image *) NULL)
source_image=source_image->next;
(void) ParsePageGeometry(source_image,argv[i+1],&geometry,
exception);
status=CopyImagePixels(*images,source_image,&geometry,&offset,
exception);
break;
}
break;
}
case 'd':
{
if (LocaleCompare("deconstruct",option+1) == 0)
{
Image
*deconstruct_image;
(void) SyncImagesSettings(mogrify_info,*images);
deconstruct_image=DeconstructImages(*images,exception);
if (deconstruct_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=deconstruct_image;
break;
}
if (LocaleCompare("delete",option+1) == 0)
{
if (*option == '+')
DeleteImages(images,"-1",exception);
else
DeleteImages(images,argv[i+1],exception);
break;
}
if (LocaleCompare("dither",option+1) == 0)
{
if (*option == '+')
{
quantize_info->dither=MagickFalse;
break;
}
quantize_info->dither=MagickTrue;
quantize_info->dither_method=(DitherMethod) ParseCommandOption(
MagickDitherOptions,MagickFalse,argv[i+1]);
break;
}
if (LocaleCompare("duplicate",option+1) == 0)
{
Image
*duplicate_images;
if (*option == '+')
duplicate_images=DuplicateImages(*images,1,"-1",exception);
else
{
const char
*p;
size_t
number_duplicates;
number_duplicates=(size_t) StringToLong(argv[i+1]);
p=strchr(argv[i+1],',');
if (p == (const char *) NULL)
duplicate_images=DuplicateImages(*images,number_duplicates,
"-1",exception);
else
duplicate_images=DuplicateImages(*images,number_duplicates,p,
exception);
}
AppendImageToList(images, duplicate_images);
(void) SyncImagesSettings(mogrify_info,*images);
break;
}
break;
}
case 'e':
{
if (LocaleCompare("evaluate-sequence",option+1) == 0)
{
Image
*evaluate_image;
MagickEvaluateOperator
op;
(void) SyncImageSettings(mogrify_info,*images);
op=(MagickEvaluateOperator) ParseCommandOption(
MagickEvaluateOptions,MagickFalse,argv[i+1]);
evaluate_image=EvaluateImages(*images,op,exception);
if (evaluate_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=evaluate_image;
break;
}
break;
}
case 'f':
{
if (LocaleCompare("fft",option+1) == 0)
{
Image
*fourier_image;
/*
Implements the discrete Fourier transform (DFT).
*/
(void) SyncImageSettings(mogrify_info,*images);
fourier_image=ForwardFourierTransformImage(*images,*option == '-' ?
MagickTrue : MagickFalse,exception);
if (fourier_image == (Image *) NULL)
break;
*images=DestroyImageList(*images);
*images=fourier_image;
break;
}
if (LocaleCompare("flatten",option+1) == 0)
{
Image
*flatten_image;
(void) SyncImagesSettings(mogrify_info,*images);
flatten_image=MergeImageLayers(*images,FlattenLayer,exception);
if (flatten_image == (Image *) NULL)
break;
*images=DestroyImageList(*images);
*images=flatten_image;
break;
}
if (LocaleCompare("fx",option+1) == 0)
{
Image
*fx_image;
(void) SyncImagesSettings(mogrify_info,*images);
fx_image=FxImageChannel(*images,channel,argv[i+1],exception);
if (fx_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=fx_image;
break;
}
break;
}
case 'h':
{
if (LocaleCompare("hald-clut",option+1) == 0)
{
Image
*hald_image,
*image;
(void) SyncImagesSettings(mogrify_info,*images);
image=RemoveFirstImageFromList(images);
hald_image=RemoveFirstImageFromList(images);
if (hald_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ImageSequenceRequired","`%s'",option);
image=DestroyImage(image);
status=MagickFalse;
break;
}
(void) HaldClutImageChannel(image,channel,hald_image);
hald_image=DestroyImage(hald_image);
InheritException(exception,&image->exception);
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=image;
break;
}
break;
}
case 'i':
{
if (LocaleCompare("ift",option+1) == 0)
{
Image
*fourier_image,
*magnitude_image,
*phase_image;
/*
Implements the inverse fourier discrete Fourier transform (DFT).
*/
(void) SyncImagesSettings(mogrify_info,*images);
magnitude_image=RemoveFirstImageFromList(images);
phase_image=RemoveFirstImageFromList(images);
if (phase_image == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"ImageSequenceRequired","`%s'",option);
magnitude_image=DestroyImage(magnitude_image);
status=MagickFalse;
break;
}
fourier_image=InverseFourierTransformImage(magnitude_image,
phase_image,*option == '-' ? MagickTrue : MagickFalse,exception);
magnitude_image=DestroyImage(magnitude_image);
phase_image=DestroyImage(phase_image);
if (fourier_image == (Image *) NULL)
break;
if (*images != (Image *) NULL)
*images=DestroyImageList(*images);
*images=fourier_image;
break;
}
if (LocaleCompare("insert",option+1) == 0)
{
Image
*p,
*q;
index=0;
if (*option != '+')
index=(ssize_t) StringToLong(argv[i+1]);
p=RemoveLastImageFromList(images);
if (p == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",argv[i+1]);
status=MagickFalse;
break;
}
q=p;
if (index == 0)
PrependImageToList(images,q);
else
if (index == (ssize_t) GetImageListLength(*images))
AppendImageToList(images,q);
else
{
q=GetImageFromList(*images,index-1);
if (q == (Image *) NULL)
{
p=DestroyImage(p);
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",argv[i+1]);
status=MagickFalse;
break;
}
InsertImageInList(&q,p);
}
*images=GetFirstImageInList(q);
break;
}
break;
}
case 'l':
{
if (LocaleCompare("layers",option+1) == 0)
{
Image
*layers;
ImageLayerMethod
method;
(void) SyncImagesSettings(mogrify_info,*images);
layers=(Image *) NULL;
method=(ImageLayerMethod) ParseCommandOption(MagickLayerOptions,
MagickFalse,argv[i+1]);
switch (method)
{
case CoalesceLayer:
{
layers=CoalesceImages(*images,exception);
break;
}
case CompareAnyLayer:
case CompareClearLayer:
case CompareOverlayLayer:
default:
{
layers=CompareImageLayers(*images,method,exception);
break;
}
case MergeLayer:
case FlattenLayer:
case MosaicLayer:
case TrimBoundsLayer:
{
layers=MergeImageLayers(*images,method,exception);
break;
}
case DisposeLayer:
{
layers=DisposeImages(*images,exception);
break;
}
case OptimizeImageLayer:
{
layers=OptimizeImageLayers(*images,exception);
break;
}
case OptimizePlusLayer:
{
layers=OptimizePlusImageLayers(*images,exception);
break;
}
case OptimizeTransLayer:
{
OptimizeImageTransparency(*images,exception);
break;
}
case RemoveDupsLayer:
{
RemoveDuplicateLayers(images,exception);
break;
}
case RemoveZeroLayer:
{
RemoveZeroDelayLayers(images,exception);
break;
}
case OptimizeLayer:
{
/*
General Purpose, GIF Animation Optimizer.
*/
layers=CoalesceImages(*images,exception);
if (layers == (Image *) NULL)
{
status=MagickFalse;
break;
}
InheritException(exception,&layers->exception);
*images=DestroyImageList(*images);
*images=layers;
layers=OptimizeImageLayers(*images,exception);
if (layers == (Image *) NULL)
{
status=MagickFalse;
break;
}
InheritException(exception,&layers->exception);
*images=DestroyImageList(*images);
*images=layers;
layers=(Image *) NULL;
OptimizeImageTransparency(*images,exception);
InheritException(exception,&(*images)->exception);
(void) RemapImages(quantize_info,*images,(Image *) NULL);
break;
}
case CompositeLayer:
{
CompositeOperator
compose;
Image
*source;
RectangleInfo
geometry;
/*
Split image sequence at the first 'NULL:' image.
*/
source=(*images);
while (source != (Image *) NULL)
{
source=GetNextImageInList(source);
if ((source != (Image *) NULL) &&
(LocaleCompare(source->magick,"NULL") == 0))
break;
}
if (source != (Image *) NULL)
{
if ((GetPreviousImageInList(source) == (Image *) NULL) ||
(GetNextImageInList(source) == (Image *) NULL))
source=(Image *) NULL;
else
{
/*
Separate the two lists, junk the null: image.
*/
source=SplitImageList(source->previous);
DeleteImageFromList(&source);
}
}
if (source == (Image *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"MissingNullSeparator","layers Composite");
status=MagickFalse;
break;
}
/*
Adjust offset with gravity and virtual canvas.
*/
SetGeometry(*images,&geometry);
(void) ParseAbsoluteGeometry((*images)->geometry,&geometry);
geometry.width=source->page.width != 0 ?
source->page.width : source->columns;
geometry.height=source->page.height != 0 ?
source->page.height : source->rows;
GravityAdjustGeometry((*images)->page.width != 0 ?
(*images)->page.width : (*images)->columns,
(*images)->page.height != 0 ? (*images)->page.height :
(*images)->rows,(*images)->gravity,&geometry);
compose=OverCompositeOp;
option=GetImageOption(mogrify_info,"compose");
if (option != (const char *) NULL)
compose=(CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,option);
CompositeLayers(*images,compose,source,geometry.x,geometry.y,
exception);
source=DestroyImageList(source);
break;
}
}
if (layers == (Image *) NULL)
break;
InheritException(exception,&layers->exception);
*images=DestroyImageList(*images);
*images=layers;
break;
}
break;
}
case 'm':
{
if (LocaleCompare("map",option+1) == 0)
{
(void) SyncImagesSettings(mogrify_info,*images);
if (*option == '+')
{
(void) RemapImages(quantize_info,*images,(Image *) NULL);
InheritException(exception,&(*images)->exception);
break;
}
i++;
break;
}
if (LocaleCompare("maximum",option+1) == 0)
{
Image
*maximum_image;
/*
Maximum image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images);
maximum_image=EvaluateImages(*images,MaxEvaluateOperator,exception);
if (maximum_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=maximum_image;
break;
}
if (LocaleCompare("minimum",option+1) == 0)
{
Image
*minimum_image;
/*
Minimum image sequence (deprecated).
*/
(void) SyncImagesSettings(mogrify_info,*images);
minimum_image=EvaluateImages(*images,MinEvaluateOperator,exception);
if (minimum_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=minimum_image;
break;
}
if (LocaleCompare("morph",option+1) == 0)
{
Image
*morph_image;
(void) SyncImagesSettings(mogrify_info,*images);
morph_image=MorphImages(*images,StringToUnsignedLong(argv[i+1]),
exception);
if (morph_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=morph_image;
break;
}
if (LocaleCompare("mosaic",option+1) == 0)
{
Image
*mosaic_image;
(void) SyncImagesSettings(mogrify_info,*images);
mosaic_image=MergeImageLayers(*images,MosaicLayer,exception);
if (mosaic_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=mosaic_image;
break;
}
break;
}
case 'p':
{
if (LocaleCompare("poly",option+1) == 0)
{
char
*args,
token[MaxTextExtent];
const char
*p;
double
*arguments;
Image
*polynomial_image;
register ssize_t
x;
size_t
number_arguments;
/*
Polynomial image.
*/
(void) SyncImageSettings(mogrify_info,*images);
args=InterpretImageProperties(mogrify_info,*images,argv[i+1]);
InheritException(exception,&(*images)->exception);
if (args == (char *) NULL)
break;
p=(char *) args;
for (x=0; *p != '\0'; x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
}
number_arguments=(size_t) x;
arguments=(double *) AcquireQuantumMemory(number_arguments,
sizeof(*arguments));
if (arguments == (double *) NULL)
ThrowWandFatalException(ResourceLimitFatalError,
"MemoryAllocationFailed",(*images)->filename);
(void) memset(arguments,0,number_arguments*
sizeof(*arguments));
p=(char *) args;
for (x=0; (x < (ssize_t) number_arguments) && (*p != '\0'); x++)
{
GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
GetNextToken(p,&p,MaxTextExtent,token);
arguments[x]=StringToDouble(token,(char **) NULL);
}
args=DestroyString(args);
polynomial_image=PolynomialImageChannel(*images,channel,
number_arguments >> 1,arguments,exception);
arguments=(double *) RelinquishMagickMemory(arguments);
if (polynomial_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=polynomial_image;
break;
}
if (LocaleCompare("print",option+1) == 0)
{
char
*string;
(void) SyncImagesSettings(mogrify_info,*images);
string=InterpretImageProperties(mogrify_info,*images,argv[i+1]);
if (string == (char *) NULL)
break;
InheritException(exception,&(*images)->exception);
(void) FormatLocaleFile(stdout,"%s",string);
string=DestroyString(string);
}
if (LocaleCompare("process",option+1) == 0)
{
char
**arguments;
int
j,
number_arguments;
(void) SyncImagesSettings(mogrify_info,*images);
arguments=StringToArgv(argv[i+1],&number_arguments);
if (arguments == (char **) NULL)
break;
if ((argc > 1) && (strchr(arguments[1],'=') != (char *) NULL))
{
char
breaker,
quote,
*token;
const char
*arguments;
int
next,
status;
size_t
length;
TokenInfo
*token_info;
/*
Support old style syntax, filter="-option arg".
*/
length=strlen(argv[i+1]);
token=(char *) NULL;
if (~length >= (MaxTextExtent-1))
token=(char *) AcquireQuantumMemory(length+MaxTextExtent,
sizeof(*token));
if (token == (char *) NULL)
break;
next=0;
arguments=argv[i+1];
token_info=AcquireTokenInfo();
status=Tokenizer(token_info,0,token,length,arguments,"","=",
"\"",'\0',&breaker,&next,"e);
token_info=DestroyTokenInfo(token_info);
if (status == 0)
{
const char
*argv;
argv=(&(arguments[next]));
(void) InvokeDynamicImageFilter(token,&(*images),1,&argv,
exception);
}
token=DestroyString(token);
break;
}
(void) SubstituteString(&arguments[1],"-","");
(void) InvokeDynamicImageFilter(arguments[1],&(*images),
number_arguments-2,(const char **) arguments+2,exception);
for (j=0; j < number_arguments; j++)
arguments[j]=DestroyString(arguments[j]);
arguments=(char **) RelinquishMagickMemory(arguments);
break;
}
break;
}
case 'r':
{
if (LocaleCompare("reverse",option+1) == 0)
{
ReverseImageList(images);
InheritException(exception,&(*images)->exception);
break;
}
break;
}
case 's':
{
if (LocaleCompare("smush",option+1) == 0)
{
Image
*smush_image;
ssize_t
offset;
(void) SyncImagesSettings(mogrify_info,*images);
offset=(ssize_t) StringToLong(argv[i+1]);
smush_image=SmushImages(*images,*option == '-' ? MagickTrue :
MagickFalse,offset,exception);
if (smush_image == (Image *) NULL)
{
status=MagickFalse;
break;
}
*images=DestroyImageList(*images);
*images=smush_image;
break;
}
if (LocaleCompare("swap",option+1) == 0)
{
Image
*p,
*q,
*u,
*v;
ssize_t
swap_index;
index=(-1);
swap_index=(-2);
if (*option != '+')
{
GeometryInfo
geometry_info;
MagickStatusType
flags;
swap_index=(-1);
flags=ParseGeometry(argv[i+1],&geometry_info);
index=(ssize_t) geometry_info.rho;
if ((flags & SigmaValue) != 0)
swap_index=(ssize_t) geometry_info.sigma;
}
p=GetImageFromList(*images,index);
q=GetImageFromList(*images,swap_index);
if ((p == (Image *) NULL) || (q == (Image *) NULL))
{
(void) ThrowMagickException(exception,GetMagickModule(),
OptionError,"NoSuchImage","`%s'",(*images)->filename);
status=MagickFalse;
break;
}
if (p == q)
break;
u=CloneImage(p,0,0,MagickTrue,exception);
if (u == (Image *) NULL)
break;
v=CloneImage(q,0,0,MagickTrue,exception);
if (v == (Image *) NULL)
{
u=DestroyImage(u);
break;
}
ReplaceImageInList(&p,v);
ReplaceImageInList(&q,u);
*images=GetFirstImageInList(q);
break;
}
break;
}
case 'w':
{
if (LocaleCompare("write",option+1) == 0)
{
char
key[MaxTextExtent];
Image
*write_images;
ImageInfo
*write_info;
(void) SyncImagesSettings(mogrify_info,*images);
(void) FormatLocaleString(key,MaxTextExtent,"cache:%s",argv[i+1]);
(void) DeleteImageRegistry(key);
write_images=(*images);
if (*option == '+')
write_images=CloneImageList(*images,exception);
write_info=CloneImageInfo(mogrify_info);
status&=WriteImages(write_info,write_images,argv[i+1],exception);
write_info=DestroyImageInfo(write_info);
if (*option == '+')
write_images=DestroyImageList(write_images);
break;
}
break;
}
default:
break;
}
i+=count;
}
quantize_info=DestroyQuantizeInfo(quantize_info);
mogrify_info=DestroyImageInfo(mogrify_info);
status&=MogrifyImageInfo(image_info,argc,argv,exception);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M o g r i f y I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MogrifyImages() applies image processing options to a sequence of images as
% prescribed by command line options.
%
% The format of the MogrifyImage method is:
%
% MagickBooleanType MogrifyImages(ImageInfo *image_info,
% const MagickBooleanType post,const int argc,const char **argv,
% Image **images,Exceptioninfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info..
%
% o post: If true, post process image list operators otherwise pre-process.
%
% o argc: Specifies a pointer to an integer describing the number of
% elements in the argument vector.
%
% o argv: Specifies a pointer to a text array containing the command line
% arguments.
%
% o images: pointer to a pointer of the first image in image list.
%
% o exception: return any errors or warnings in this structure.
%
*/
WandExport MagickBooleanType MogrifyImages(ImageInfo *image_info,
const MagickBooleanType post,const int argc,const char **argv,
Image **images,ExceptionInfo *exception)
{
#define MogrifyImageTag "Mogrify/Image"
MagickStatusType
status;
MagickBooleanType
proceed;
size_t
n;
register ssize_t
i;
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (images == (Image **) NULL)
return(MogrifyImage(image_info,argc,argv,images,exception));
assert((*images)->previous == (Image *) NULL);
assert((*images)->signature == MagickCoreSignature);
if ((*images)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
(*images)->filename);
if ((argc <= 0) || (*argv == (char *) NULL))
return(MagickTrue);
(void) SetImageInfoProgressMonitor(image_info,(MagickProgressMonitor) NULL,
(void *) NULL);
status=MagickTrue;
#if 0
(void) FormatLocaleFile(stderr, "mogrify start %s %d (%s)\n",argv[0],argc,
post?"post":"pre");
#endif
/*
Pre-process multi-image sequence operators
*/
if (post == MagickFalse)
status&=MogrifyImageList(image_info,argc,argv,images,exception);
/*
For each image, process simple single image operators
*/
i=0;
n=GetImageListLength(*images);
for (;;)
{
#if 0
(void) FormatLocaleFile(stderr,"mogrify %ld of %ld\n",(long)
GetImageIndexInList(*images),(long)GetImageListLength(*images));
#endif
status&=MogrifyImage(image_info,argc,argv,images,exception);
proceed=SetImageProgress(*images,MogrifyImageTag,(MagickOffsetType) i, n);
if (proceed == MagickFalse)
break;
if ((*images)->next == (Image *) NULL)
break;
*images=(*images)->next;
i++;
}
assert(*images != (Image *) NULL);
#if 0
(void) FormatLocaleFile(stderr,"mogrify end %ld of %ld\n",(long)
GetImageIndexInList(*images),(long)GetImageListLength(*images));
#endif
/*
Post-process, multi-image sequence operators
*/
*images=GetFirstImageInList(*images);
if (post != MagickFalse)
status&=MogrifyImageList(image_info,argc,argv,images,exception);
return(status != 0 ? MagickTrue : MagickFalse);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_947_0 |
crossvul-cpp_data_good_3486_18 | /*
* SuperH process tracing
*
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
* Copyright (C) 2002 - 2009 Paul Mundt
*
* Audit support by Yuichi Nakamura <ynakam@hitachisoft.jp>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/security.h>
#include <linux/signal.h>
#include <linux/io.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <linux/tracehook.h>
#include <linux/elf.h>
#include <linux/regset.h>
#include <linux/hw_breakpoint.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
#include <asm/fpu.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/*
* This routine will get a word off of the process kernel stack.
*/
static inline int get_stack_long(struct task_struct *task, int offset)
{
unsigned char *stack;
stack = (unsigned char *)task_pt_regs(task);
stack += offset;
return (*((int *)stack));
}
/*
* This routine will put a word on the process kernel stack.
*/
static inline int put_stack_long(struct task_struct *task, int offset,
unsigned long data)
{
unsigned char *stack;
stack = (unsigned char *)task_pt_regs(task);
stack += offset;
*(unsigned long *) stack = data;
return 0;
}
void ptrace_triggered(struct perf_event *bp,
struct perf_sample_data *data, struct pt_regs *regs)
{
struct perf_event_attr attr;
/*
* Disable the breakpoint request here since ptrace has defined a
* one-shot behaviour for breakpoint exceptions.
*/
attr = bp->attr;
attr.disabled = true;
modify_user_hw_breakpoint(bp, &attr);
}
static int set_single_step(struct task_struct *tsk, unsigned long addr)
{
struct thread_struct *thread = &tsk->thread;
struct perf_event *bp;
struct perf_event_attr attr;
bp = thread->ptrace_bps[0];
if (!bp) {
ptrace_breakpoint_init(&attr);
attr.bp_addr = addr;
attr.bp_len = HW_BREAKPOINT_LEN_2;
attr.bp_type = HW_BREAKPOINT_R;
bp = register_user_hw_breakpoint(&attr, ptrace_triggered, tsk);
if (IS_ERR(bp))
return PTR_ERR(bp);
thread->ptrace_bps[0] = bp;
} else {
int err;
attr = bp->attr;
attr.bp_addr = addr;
/* reenable breakpoint */
attr.disabled = false;
err = modify_user_hw_breakpoint(bp, &attr);
if (unlikely(err))
return err;
}
return 0;
}
void user_enable_single_step(struct task_struct *child)
{
unsigned long pc = get_stack_long(child, offsetof(struct pt_regs, pc));
set_tsk_thread_flag(child, TIF_SINGLESTEP);
if (ptrace_get_breakpoints(child) < 0)
return;
set_single_step(child, pc);
ptrace_put_breakpoints(child);
}
void user_disable_single_step(struct task_struct *child)
{
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
}
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_regs *regs = task_pt_regs(target);
int ret;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs->regs,
0, 16 * sizeof(unsigned long));
if (!ret)
/* PC, PR, SR, GBR, MACH, MACL, TRA */
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
®s->pc,
offsetof(struct pt_regs, pc),
sizeof(struct pt_regs));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->regs,
0, 16 * sizeof(unsigned long));
if (!ret && count > 0)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
®s->pc,
offsetof(struct pt_regs, pc),
sizeof(struct pt_regs));
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
#ifdef CONFIG_SH_FPU
int fpregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
if ((boot_cpu_data.flags & CPU_HAS_FPU))
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->hardfpu, 0, -1);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->softfpu, 0, -1);
}
static int fpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
set_stopped_child_used_math(target);
if ((boot_cpu_data.flags & CPU_HAS_FPU))
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->hardfpu, 0, -1);
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->softfpu, 0, -1);
}
static int fpregs_active(struct task_struct *target,
const struct user_regset *regset)
{
return tsk_used_math(target) ? regset->n : 0;
}
#endif
#ifdef CONFIG_SH_DSP
static int dspregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_dspregs *regs =
(struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
int ret;
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs,
0, sizeof(struct pt_dspregs));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_dspregs), -1);
return ret;
}
static int dspregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_dspregs *regs =
(struct pt_dspregs *)&target->thread.dsp_status.dsp_regs;
int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs,
0, sizeof(struct pt_dspregs));
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_dspregs), -1);
return ret;
}
static int dspregs_active(struct task_struct *target,
const struct user_regset *regset)
{
struct pt_regs *regs = task_pt_regs(target);
return regs->sr & SR_DSP ? regset->n : 0;
}
#endif
const struct pt_regs_offset regoffset_table[] = {
REGS_OFFSET_NAME(0),
REGS_OFFSET_NAME(1),
REGS_OFFSET_NAME(2),
REGS_OFFSET_NAME(3),
REGS_OFFSET_NAME(4),
REGS_OFFSET_NAME(5),
REGS_OFFSET_NAME(6),
REGS_OFFSET_NAME(7),
REGS_OFFSET_NAME(8),
REGS_OFFSET_NAME(9),
REGS_OFFSET_NAME(10),
REGS_OFFSET_NAME(11),
REGS_OFFSET_NAME(12),
REGS_OFFSET_NAME(13),
REGS_OFFSET_NAME(14),
REGS_OFFSET_NAME(15),
REG_OFFSET_NAME(pc),
REG_OFFSET_NAME(pr),
REG_OFFSET_NAME(sr),
REG_OFFSET_NAME(gbr),
REG_OFFSET_NAME(mach),
REG_OFFSET_NAME(macl),
REG_OFFSET_NAME(tra),
REG_OFFSET_END,
};
/*
* These are our native regset flavours.
*/
enum sh_regset {
REGSET_GENERAL,
#ifdef CONFIG_SH_FPU
REGSET_FPU,
#endif
#ifdef CONFIG_SH_DSP
REGSET_DSP,
#endif
};
static const struct user_regset sh_regsets[] = {
/*
* Format is:
* R0 --> R15
* PC, PR, SR, GBR, MACH, MACL, TRA
*/
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(long),
.align = sizeof(long),
.get = genregs_get,
.set = genregs_set,
},
#ifdef CONFIG_SH_FPU
[REGSET_FPU] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_fpu_struct) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.get = fpregs_get,
.set = fpregs_set,
.active = fpregs_active,
},
#endif
#ifdef CONFIG_SH_DSP
[REGSET_DSP] = {
.n = sizeof(struct pt_dspregs) / sizeof(long),
.size = sizeof(long),
.align = sizeof(long),
.get = dspregs_get,
.set = dspregs_set,
.active = dspregs_active,
},
#endif
};
static const struct user_regset_view user_sh_native_view = {
.name = "sh",
.e_machine = EM_SH,
.regsets = sh_regsets,
.n = ARRAY_SIZE(sh_regsets),
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_sh_native_view;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
unsigned long __user *datap = (unsigned long __user *)data;
int ret;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
ret = -EIO;
if ((addr & 3) || addr < 0 ||
addr > sizeof(struct user) - 3)
break;
if (addr < sizeof(struct pt_regs))
tmp = get_stack_long(child, addr);
else if (addr >= offsetof(struct user, fpu) &&
addr < offsetof(struct user, u_fpvalid)) {
if (!tsk_used_math(child)) {
if (addr == offsetof(struct user, fpu.fpscr))
tmp = FPSCR_INIT;
else
tmp = 0;
} else {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
tmp = ((unsigned long *)child->thread.xstate)
[index >> 2];
}
} else if (addr == offsetof(struct user, u_fpvalid))
tmp = !!tsk_used_math(child);
else if (addr == PT_TEXT_ADDR)
tmp = child->mm->start_code;
else if (addr == PT_DATA_ADDR)
tmp = child->mm->start_data;
else if (addr == PT_TEXT_END_ADDR)
tmp = child->mm->end_code;
else if (addr == PT_TEXT_LEN)
tmp = child->mm->end_code - child->mm->start_code;
else
tmp = 0;
ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
ret = -EIO;
if ((addr & 3) || addr < 0 ||
addr > sizeof(struct user) - 3)
break;
if (addr < sizeof(struct pt_regs))
ret = put_stack_long(child, addr, data);
else if (addr >= offsetof(struct user, fpu) &&
addr < offsetof(struct user, u_fpvalid)) {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
set_stopped_child_used_math(child);
((unsigned long *)child->thread.xstate)
[index >> 2] = data;
ret = 0;
} else if (addr == offsetof(struct user, u_fpvalid)) {
conditional_stopped_child_used_math(data, child);
ret = 0;
}
break;
case PTRACE_GETREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
case PTRACE_SETREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
#ifdef CONFIG_SH_FPU
case PTRACE_GETFPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
case PTRACE_SETFPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
#endif
#ifdef CONFIG_SH_DSP
case PTRACE_GETDSPREGS:
return copy_regset_to_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
datap);
case PTRACE_SETDSPREGS:
return copy_regset_from_user(child, &user_sh_native_view,
REGSET_DSP,
0, sizeof(struct pt_dspregs),
datap);
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
static inline int audit_arch(void)
{
int arch = EM_SH;
#ifdef CONFIG_CPU_LITTLE_ENDIAN
arch |= __AUDIT_ARCH_LE;
#endif
return arch;
}
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
{
long ret = 0;
secure_computing(regs->regs[0]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
/*
* Tracing decided this syscall should not happen.
* We'll return a bogus call number to get an ENOSYS
* error, but leave the original number in regs->regs[0].
*/
ret = -1L;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[0]);
if (unlikely(current->audit_context))
audit_syscall_entry(audit_arch(), regs->regs[3],
regs->regs[4], regs->regs[5],
regs->regs[6], regs->regs[7]);
return ret ?: regs->regs[0];
}
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
if (unlikely(current->audit_context))
audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
regs->regs[0]);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->regs[0]);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/good_3486_18 |
crossvul-cpp_data_bad_2058_0 | /*
* FarSync WAN driver for Linux (2.6.x kernel version)
*
* Actually sync driver for X.21, V.35 and V.24 on FarSync T-series cards
*
* Copyright (C) 2001-2004 FarSite Communications Ltd.
* www.farsite.co.uk
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Author: R.J.Dunlop <bob.dunlop@farsite.co.uk>
* Maintainer: Kevin Curtis <kevin.curtis@farsite.co.uk>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/if.h>
#include <linux/hdlc.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include "farsync.h"
/*
* Module info
*/
MODULE_AUTHOR("R.J.Dunlop <bob.dunlop@farsite.co.uk>");
MODULE_DESCRIPTION("FarSync T-Series WAN driver. FarSite Communications Ltd.");
MODULE_LICENSE("GPL");
/* Driver configuration and global parameters
* ==========================================
*/
/* Number of ports (per card) and cards supported
*/
#define FST_MAX_PORTS 4
#define FST_MAX_CARDS 32
/* Default parameters for the link
*/
#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is
* useful */
#define FST_TXQ_DEPTH 16 /* This one is for the buffering
* of frames on the way down to the card
* so that we can keep the card busy
* and maximise throughput
*/
#define FST_HIGH_WATER_MARK 12 /* Point at which we flow control
* network layer */
#define FST_LOW_WATER_MARK 8 /* Point at which we remove flow
* control from network layer */
#define FST_MAX_MTU 8000 /* Huge but possible */
#define FST_DEF_MTU 1500 /* Common sane value */
#define FST_TX_TIMEOUT (2*HZ)
#ifdef ARPHRD_RAWHDLC
#define ARPHRD_MYTYPE ARPHRD_RAWHDLC /* Raw frames */
#else
#define ARPHRD_MYTYPE ARPHRD_HDLC /* Cisco-HDLC (keepalives etc) */
#endif
/*
* Modules parameters and associated variables
*/
static int fst_txq_low = FST_LOW_WATER_MARK;
static int fst_txq_high = FST_HIGH_WATER_MARK;
static int fst_max_reads = 7;
static int fst_excluded_cards = 0;
static int fst_excluded_list[FST_MAX_CARDS];
module_param(fst_txq_low, int, 0);
module_param(fst_txq_high, int, 0);
module_param(fst_max_reads, int, 0);
module_param(fst_excluded_cards, int, 0);
module_param_array(fst_excluded_list, int, NULL, 0);
/* Card shared memory layout
* =========================
*/
#pragma pack(1)
/* This information is derived in part from the FarSite FarSync Smc.h
* file. Unfortunately various name clashes and the non-portability of the
* bit field declarations in that file have meant that I have chosen to
* recreate the information here.
*
* The SMC (Shared Memory Configuration) has a version number that is
* incremented every time there is a significant change. This number can
* be used to check that we have not got out of step with the firmware
* contained in the .CDE files.
*/
#define SMC_VERSION 24
#define FST_MEMSIZE 0x100000 /* Size of card memory (1Mb) */
#define SMC_BASE 0x00002000L /* Base offset of the shared memory window main
* configuration structure */
#define BFM_BASE 0x00010000L /* Base offset of the shared memory window DMA
* buffers */
#define LEN_TX_BUFFER 8192 /* Size of packet buffers */
#define LEN_RX_BUFFER 8192
#define LEN_SMALL_TX_BUFFER 256 /* Size of obsolete buffs used for DOS diags */
#define LEN_SMALL_RX_BUFFER 256
#define NUM_TX_BUFFER 2 /* Must be power of 2. Fixed by firmware */
#define NUM_RX_BUFFER 8
/* Interrupt retry time in milliseconds */
#define INT_RETRY_TIME 2
/* The Am186CH/CC processors support a SmartDMA mode using circular pools
* of buffer descriptors. The structure is almost identical to that used
* in the LANCE Ethernet controllers. Details available as PDF from the
* AMD web site: http://www.amd.com/products/epd/processors/\
* 2.16bitcont/3.am186cxfa/a21914/21914.pdf
*/
struct txdesc { /* Transmit descriptor */
volatile u16 ladr; /* Low order address of packet. This is a
* linear address in the Am186 memory space
*/
volatile u8 hadr; /* High order address. Low 4 bits only, high 4
* bits must be zero
*/
volatile u8 bits; /* Status and config */
volatile u16 bcnt; /* 2s complement of packet size in low 15 bits.
* Transmit terminal count interrupt enable in
* top bit.
*/
u16 unused; /* Not used in Tx */
};
struct rxdesc { /* Receive descriptor */
volatile u16 ladr; /* Low order address of packet */
volatile u8 hadr; /* High order address */
volatile u8 bits; /* Status and config */
volatile u16 bcnt; /* 2s complement of buffer size in low 15 bits.
* Receive terminal count interrupt enable in
* top bit.
*/
volatile u16 mcnt; /* Message byte count (15 bits) */
};
/* Convert a length into the 15 bit 2's complement */
/* #define cnv_bcnt(len) (( ~(len) + 1 ) & 0x7FFF ) */
/* Since we need to set the high bit to enable the completion interrupt this
* can be made a lot simpler
*/
#define cnv_bcnt(len) (-(len))
/* Status and config bits for the above */
#define DMA_OWN 0x80 /* SmartDMA owns the descriptor */
#define TX_STP 0x02 /* Tx: start of packet */
#define TX_ENP 0x01 /* Tx: end of packet */
#define RX_ERR 0x40 /* Rx: error (OR of next 4 bits) */
#define RX_FRAM 0x20 /* Rx: framing error */
#define RX_OFLO 0x10 /* Rx: overflow error */
#define RX_CRC 0x08 /* Rx: CRC error */
#define RX_HBUF 0x04 /* Rx: buffer error */
#define RX_STP 0x02 /* Rx: start of packet */
#define RX_ENP 0x01 /* Rx: end of packet */
/* Interrupts from the card are caused by various events which are presented
* in a circular buffer as several events may be processed on one physical int
*/
#define MAX_CIRBUFF 32
struct cirbuff {
u8 rdindex; /* read, then increment and wrap */
u8 wrindex; /* write, then increment and wrap */
u8 evntbuff[MAX_CIRBUFF];
};
/* Interrupt event codes.
* Where appropriate the two low order bits indicate the port number
*/
#define CTLA_CHG 0x18 /* Control signal changed */
#define CTLB_CHG 0x19
#define CTLC_CHG 0x1A
#define CTLD_CHG 0x1B
#define INIT_CPLT 0x20 /* Initialisation complete */
#define INIT_FAIL 0x21 /* Initialisation failed */
#define ABTA_SENT 0x24 /* Abort sent */
#define ABTB_SENT 0x25
#define ABTC_SENT 0x26
#define ABTD_SENT 0x27
#define TXA_UNDF 0x28 /* Transmission underflow */
#define TXB_UNDF 0x29
#define TXC_UNDF 0x2A
#define TXD_UNDF 0x2B
#define F56_INT 0x2C
#define M32_INT 0x2D
#define TE1_ALMA 0x30
/* Port physical configuration. See farsync.h for field values */
struct port_cfg {
u16 lineInterface; /* Physical interface type */
u8 x25op; /* Unused at present */
u8 internalClock; /* 1 => internal clock, 0 => external */
u8 transparentMode; /* 1 => on, 0 => off */
u8 invertClock; /* 0 => normal, 1 => inverted */
u8 padBytes[6]; /* Padding */
u32 lineSpeed; /* Speed in bps */
};
/* TE1 port physical configuration */
struct su_config {
u32 dataRate;
u8 clocking;
u8 framing;
u8 structure;
u8 interface;
u8 coding;
u8 lineBuildOut;
u8 equalizer;
u8 transparentMode;
u8 loopMode;
u8 range;
u8 txBufferMode;
u8 rxBufferMode;
u8 startingSlot;
u8 losThreshold;
u8 enableIdleCode;
u8 idleCode;
u8 spare[44];
};
/* TE1 Status */
struct su_status {
u32 receiveBufferDelay;
u32 framingErrorCount;
u32 codeViolationCount;
u32 crcErrorCount;
u32 lineAttenuation;
u8 portStarted;
u8 lossOfSignal;
u8 receiveRemoteAlarm;
u8 alarmIndicationSignal;
u8 spare[40];
};
/* Finally sling all the above together into the shared memory structure.
* Sorry it's a hodge podge of arrays, structures and unused bits, it's been
* evolving under NT for some time so I guess we're stuck with it.
* The structure starts at offset SMC_BASE.
* See farsync.h for some field values.
*/
struct fst_shared {
/* DMA descriptor rings */
struct rxdesc rxDescrRing[FST_MAX_PORTS][NUM_RX_BUFFER];
struct txdesc txDescrRing[FST_MAX_PORTS][NUM_TX_BUFFER];
/* Obsolete small buffers */
u8 smallRxBuffer[FST_MAX_PORTS][NUM_RX_BUFFER][LEN_SMALL_RX_BUFFER];
u8 smallTxBuffer[FST_MAX_PORTS][NUM_TX_BUFFER][LEN_SMALL_TX_BUFFER];
u8 taskStatus; /* 0x00 => initialising, 0x01 => running,
* 0xFF => halted
*/
u8 interruptHandshake; /* Set to 0x01 by adapter to signal interrupt,
* set to 0xEE by host to acknowledge interrupt
*/
u16 smcVersion; /* Must match SMC_VERSION */
u32 smcFirmwareVersion; /* 0xIIVVRRBB where II = product ID, VV = major
* version, RR = revision and BB = build
*/
u16 txa_done; /* Obsolete completion flags */
u16 rxa_done;
u16 txb_done;
u16 rxb_done;
u16 txc_done;
u16 rxc_done;
u16 txd_done;
u16 rxd_done;
u16 mailbox[4]; /* Diagnostics mailbox. Not used */
struct cirbuff interruptEvent; /* interrupt causes */
u32 v24IpSts[FST_MAX_PORTS]; /* V.24 control input status */
u32 v24OpSts[FST_MAX_PORTS]; /* V.24 control output status */
struct port_cfg portConfig[FST_MAX_PORTS];
u16 clockStatus[FST_MAX_PORTS]; /* lsb: 0=> present, 1=> absent */
u16 cableStatus; /* lsb: 0=> present, 1=> absent */
u16 txDescrIndex[FST_MAX_PORTS]; /* transmit descriptor ring index */
u16 rxDescrIndex[FST_MAX_PORTS]; /* receive descriptor ring index */
u16 portMailbox[FST_MAX_PORTS][2]; /* command, modifier */
u16 cardMailbox[4]; /* Not used */
/* Number of times the card thinks the host has
* missed an interrupt by not acknowledging
* within 2mS (I guess NT has problems)
*/
u32 interruptRetryCount;
/* Driver private data used as an ID. We'll not
* use this as I'd rather keep such things
* in main memory rather than on the PCI bus
*/
u32 portHandle[FST_MAX_PORTS];
/* Count of Tx underflows for stats */
u32 transmitBufferUnderflow[FST_MAX_PORTS];
/* Debounced V.24 control input status */
u32 v24DebouncedSts[FST_MAX_PORTS];
/* Adapter debounce timers. Don't touch */
u32 ctsTimer[FST_MAX_PORTS];
u32 ctsTimerRun[FST_MAX_PORTS];
u32 dcdTimer[FST_MAX_PORTS];
u32 dcdTimerRun[FST_MAX_PORTS];
u32 numberOfPorts; /* Number of ports detected at startup */
u16 _reserved[64];
u16 cardMode; /* Bit-mask to enable features:
* Bit 0: 1 enables LED identify mode
*/
u16 portScheduleOffset;
struct su_config suConfig; /* TE1 Bits */
struct su_status suStatus;
u32 endOfSmcSignature; /* endOfSmcSignature MUST be the last member of
* the structure and marks the end of shared
* memory. Adapter code initializes it as
* END_SIG.
*/
};
/* endOfSmcSignature value */
#define END_SIG 0x12345678
/* Mailbox values. (portMailbox) */
#define NOP 0 /* No operation */
#define ACK 1 /* Positive acknowledgement to PC driver */
#define NAK 2 /* Negative acknowledgement to PC driver */
#define STARTPORT 3 /* Start an HDLC port */
#define STOPPORT 4 /* Stop an HDLC port */
#define ABORTTX 5 /* Abort the transmitter for a port */
#define SETV24O 6 /* Set V24 outputs */
/* PLX Chip Register Offsets */
#define CNTRL_9052 0x50 /* Control Register */
#define CNTRL_9054 0x6c /* Control Register */
#define INTCSR_9052 0x4c /* Interrupt control/status register */
#define INTCSR_9054 0x68 /* Interrupt control/status register */
/* 9054 DMA Registers */
/*
* Note that we will be using DMA Channel 0 for copying rx data
* and Channel 1 for copying tx data
*/
#define DMAMODE0 0x80
#define DMAPADR0 0x84
#define DMALADR0 0x88
#define DMASIZ0 0x8c
#define DMADPR0 0x90
#define DMAMODE1 0x94
#define DMAPADR1 0x98
#define DMALADR1 0x9c
#define DMASIZ1 0xa0
#define DMADPR1 0xa4
#define DMACSR0 0xa8
#define DMACSR1 0xa9
#define DMAARB 0xac
#define DMATHR 0xb0
#define DMADAC0 0xb4
#define DMADAC1 0xb8
#define DMAMARBR 0xac
#define FST_MIN_DMA_LEN 64
#define FST_RX_DMA_INT 0x01
#define FST_TX_DMA_INT 0x02
#define FST_CARD_INT 0x04
/* Larger buffers are positioned in memory at offset BFM_BASE */
struct buf_window {
u8 txBuffer[FST_MAX_PORTS][NUM_TX_BUFFER][LEN_TX_BUFFER];
u8 rxBuffer[FST_MAX_PORTS][NUM_RX_BUFFER][LEN_RX_BUFFER];
};
/* Calculate offset of a buffer object within the shared memory window */
#define BUF_OFFSET(X) (BFM_BASE + offsetof(struct buf_window, X))
#pragma pack()
/* Device driver private information
* =================================
*/
/* Per port (line or channel) information
*/
struct fst_port_info {
struct net_device *dev; /* Device struct - must be first */
struct fst_card_info *card; /* Card we're associated with */
int index; /* Port index on the card */
int hwif; /* Line hardware (lineInterface copy) */
int run; /* Port is running */
int mode; /* Normal or FarSync raw */
int rxpos; /* Next Rx buffer to use */
int txpos; /* Next Tx buffer to use */
int txipos; /* Next Tx buffer to check for free */
int start; /* Indication of start/stop to network */
/*
* A sixteen entry transmit queue
*/
int txqs; /* index to get next buffer to tx */
int txqe; /* index to queue next packet */
struct sk_buff *txq[FST_TXQ_DEPTH]; /* The queue */
int rxqdepth;
};
/* Per card information
*/
struct fst_card_info {
char __iomem *mem; /* Card memory mapped to kernel space */
char __iomem *ctlmem; /* Control memory for PCI cards */
unsigned int phys_mem; /* Physical memory window address */
unsigned int phys_ctlmem; /* Physical control memory address */
unsigned int irq; /* Interrupt request line number */
unsigned int nports; /* Number of serial ports */
unsigned int type; /* Type index of card */
unsigned int state; /* State of card */
spinlock_t card_lock; /* Lock for SMP access */
unsigned short pci_conf; /* PCI card config in I/O space */
/* Per port info */
struct fst_port_info ports[FST_MAX_PORTS];
struct pci_dev *device; /* Information about the pci device */
int card_no; /* Inst of the card on the system */
int family; /* TxP or TxU */
int dmarx_in_progress;
int dmatx_in_progress;
unsigned long int_count;
unsigned long int_time_ave;
void *rx_dma_handle_host;
dma_addr_t rx_dma_handle_card;
void *tx_dma_handle_host;
dma_addr_t tx_dma_handle_card;
struct sk_buff *dma_skb_rx;
struct fst_port_info *dma_port_rx;
struct fst_port_info *dma_port_tx;
int dma_len_rx;
int dma_len_tx;
int dma_txpos;
int dma_rxpos;
};
/* Convert an HDLC device pointer into a port info pointer and similar */
#define dev_to_port(D) (dev_to_hdlc(D)->priv)
#define port_to_dev(P) ((P)->dev)
/*
* Shared memory window access macros
*
* We have a nice memory based structure above, which could be directly
* mapped on i386 but might not work on other architectures unless we use
* the readb,w,l and writeb,w,l macros. Unfortunately these macros take
* physical offsets so we have to convert. The only saving grace is that
* this should all collapse back to a simple indirection eventually.
*/
#define WIN_OFFSET(X) ((long)&(((struct fst_shared *)SMC_BASE)->X))
#define FST_RDB(C,E) readb ((C)->mem + WIN_OFFSET(E))
#define FST_RDW(C,E) readw ((C)->mem + WIN_OFFSET(E))
#define FST_RDL(C,E) readl ((C)->mem + WIN_OFFSET(E))
#define FST_WRB(C,E,B) writeb ((B), (C)->mem + WIN_OFFSET(E))
#define FST_WRW(C,E,W) writew ((W), (C)->mem + WIN_OFFSET(E))
#define FST_WRL(C,E,L) writel ((L), (C)->mem + WIN_OFFSET(E))
/*
* Debug support
*/
#if FST_DEBUG
static int fst_debug_mask = { FST_DEBUG };
/* Most common debug activity is to print something if the corresponding bit
* is set in the debug mask. Note: this uses a non-ANSI extension in GCC to
* support variable numbers of macro parameters. The inverted if prevents us
* eating someone else's else clause.
*/
#define dbg(F, fmt, args...) \
do { \
if (fst_debug_mask & (F)) \
printk(KERN_DEBUG pr_fmt(fmt), ##args); \
} while (0)
#else
#define dbg(F, fmt, args...) \
do { \
if (0) \
printk(KERN_DEBUG pr_fmt(fmt), ##args); \
} while (0)
#endif
/*
* PCI ID lookup table
*/
static DEFINE_PCI_DEVICE_TABLE(fst_pci_dev_id) = {
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T2P},
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4P, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T4P},
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T1U, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T1U},
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2U, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T2U},
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4U, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T4U},
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_TE1},
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1C, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_TE1},
{0,} /* End */
};
MODULE_DEVICE_TABLE(pci, fst_pci_dev_id);
/*
* Device Driver Work Queues
*
* So that we don't spend too much time processing events in the
* Interrupt Service routine, we will declare a work queue per Card
* and make the ISR schedule a task in the queue for later execution.
* In the 2.4 Kernel we used to use the immediate queue for BH's
* Now that they are gone, tasklets seem to be much better than work
* queues.
*/
static void do_bottom_half_tx(struct fst_card_info *card);
static void do_bottom_half_rx(struct fst_card_info *card);
static void fst_process_tx_work_q(unsigned long work_q);
static void fst_process_int_work_q(unsigned long work_q);
static DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q, 0);
static DECLARE_TASKLET(fst_int_task, fst_process_int_work_q, 0);
static struct fst_card_info *fst_card_array[FST_MAX_CARDS];
static spinlock_t fst_work_q_lock;
static u64 fst_work_txq;
static u64 fst_work_intq;
static void
fst_q_work_item(u64 * queue, int card_index)
{
unsigned long flags;
u64 mask;
/*
* Grab the queue exclusively
*/
spin_lock_irqsave(&fst_work_q_lock, flags);
/*
* Making an entry in the queue is simply a matter of setting
* a bit for the card indicating that there is work to do in the
* bottom half for the card. Note the limitation of 64 cards.
* That ought to be enough
*/
mask = (u64)1 << card_index;
*queue |= mask;
spin_unlock_irqrestore(&fst_work_q_lock, flags);
}
static void
fst_process_tx_work_q(unsigned long /*void **/work_q)
{
unsigned long flags;
u64 work_txq;
int i;
/*
* Grab the queue exclusively
*/
dbg(DBG_TX, "fst_process_tx_work_q\n");
spin_lock_irqsave(&fst_work_q_lock, flags);
work_txq = fst_work_txq;
fst_work_txq = 0;
spin_unlock_irqrestore(&fst_work_q_lock, flags);
/*
* Call the bottom half for each card with work waiting
*/
for (i = 0; i < FST_MAX_CARDS; i++) {
if (work_txq & 0x01) {
if (fst_card_array[i] != NULL) {
dbg(DBG_TX, "Calling tx bh for card %d\n", i);
do_bottom_half_tx(fst_card_array[i]);
}
}
work_txq = work_txq >> 1;
}
}
static void
fst_process_int_work_q(unsigned long /*void **/work_q)
{
unsigned long flags;
u64 work_intq;
int i;
/*
* Grab the queue exclusively
*/
dbg(DBG_INTR, "fst_process_int_work_q\n");
spin_lock_irqsave(&fst_work_q_lock, flags);
work_intq = fst_work_intq;
fst_work_intq = 0;
spin_unlock_irqrestore(&fst_work_q_lock, flags);
/*
* Call the bottom half for each card with work waiting
*/
for (i = 0; i < FST_MAX_CARDS; i++) {
if (work_intq & 0x01) {
if (fst_card_array[i] != NULL) {
dbg(DBG_INTR,
"Calling rx & tx bh for card %d\n", i);
do_bottom_half_rx(fst_card_array[i]);
do_bottom_half_tx(fst_card_array[i]);
}
}
work_intq = work_intq >> 1;
}
}
/* Card control functions
* ======================
*/
/* Place the processor in reset state
*
* Used to be a simple write to card control space but a glitch in the latest
* AMD Am186CH processor means that we now have to do it by asserting and de-
* asserting the PLX chip PCI Adapter Software Reset. Bit 30 in CNTRL register
* at offset 9052_CNTRL. Note the updates for the TXU.
*/
static inline void
fst_cpureset(struct fst_card_info *card)
{
unsigned char interrupt_line_register;
unsigned long j = jiffies + 1;
unsigned int regval;
if (card->family == FST_FAMILY_TXU) {
if (pci_read_config_byte
(card->device, PCI_INTERRUPT_LINE, &interrupt_line_register)) {
dbg(DBG_ASS,
"Error in reading interrupt line register\n");
}
/*
* Assert PLX software reset and Am186 hardware reset
* and then deassert the PLX software reset but 186 still in reset
*/
outw(0x440f, card->pci_conf + CNTRL_9054 + 2);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
/*
* We are delaying here to allow the 9054 to reset itself
*/
j = jiffies + 1;
while (jiffies < j)
/* Do nothing */ ;
outw(0x240f, card->pci_conf + CNTRL_9054 + 2);
/*
* We are delaying here to allow the 9054 to reload its eeprom
*/
j = jiffies + 1;
while (jiffies < j)
/* Do nothing */ ;
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
if (pci_write_config_byte
(card->device, PCI_INTERRUPT_LINE, interrupt_line_register)) {
dbg(DBG_ASS,
"Error in writing interrupt line register\n");
}
} else {
regval = inl(card->pci_conf + CNTRL_9052);
outl(regval | 0x40000000, card->pci_conf + CNTRL_9052);
outl(regval & ~0x40000000, card->pci_conf + CNTRL_9052);
}
}
/* Release the processor from reset
*/
static inline void
fst_cpurelease(struct fst_card_info *card)
{
if (card->family == FST_FAMILY_TXU) {
/*
* Force posted writes to complete
*/
(void) readb(card->mem);
/*
* Release LRESET DO = 1
* Then release Local Hold, DO = 1
*/
outw(0x040e, card->pci_conf + CNTRL_9054 + 2);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
} else {
(void) readb(card->ctlmem);
}
}
/* Clear the cards interrupt flag
*/
static inline void
fst_clear_intr(struct fst_card_info *card)
{
if (card->family == FST_FAMILY_TXU) {
(void) readb(card->ctlmem);
} else {
/* Poke the appropriate PLX chip register (same as enabling interrupts)
*/
outw(0x0543, card->pci_conf + INTCSR_9052);
}
}
/* Enable card interrupts
*/
static inline void
fst_enable_intr(struct fst_card_info *card)
{
if (card->family == FST_FAMILY_TXU) {
outl(0x0f0c0900, card->pci_conf + INTCSR_9054);
} else {
outw(0x0543, card->pci_conf + INTCSR_9052);
}
}
/* Disable card interrupts
*/
static inline void
fst_disable_intr(struct fst_card_info *card)
{
if (card->family == FST_FAMILY_TXU) {
outl(0x00000000, card->pci_conf + INTCSR_9054);
} else {
outw(0x0000, card->pci_conf + INTCSR_9052);
}
}
/* Process the result of trying to pass a received frame up the stack
*/
static void
fst_process_rx_status(int rx_status, char *name)
{
switch (rx_status) {
case NET_RX_SUCCESS:
{
/*
* Nothing to do here
*/
break;
}
case NET_RX_DROP:
{
dbg(DBG_ASS, "%s: Received packet dropped\n", name);
break;
}
}
}
/* Initilaise DMA for PLX 9054
*/
static inline void
fst_init_dma(struct fst_card_info *card)
{
/*
* This is only required for the PLX 9054
*/
if (card->family == FST_FAMILY_TXU) {
pci_set_master(card->device);
outl(0x00020441, card->pci_conf + DMAMODE0);
outl(0x00020441, card->pci_conf + DMAMODE1);
outl(0x0, card->pci_conf + DMATHR);
}
}
/* Tx dma complete interrupt
*/
static void
fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
int len, int txpos)
{
struct net_device *dev = port_to_dev(port);
/*
* Everything is now set, just tell the card to go
*/
dbg(DBG_TX, "fst_tx_dma_complete\n");
FST_WRB(card, txDescrRing[port->index][txpos].bits,
DMA_OWN | TX_STP | TX_ENP);
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
dev->trans_start = jiffies;
}
/*
* Mark it for our own raw sockets interface
*/
static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;
skb_reset_mac_header(skb);
skb->pkt_type = PACKET_HOST;
return htons(ETH_P_CUST);
}
/* Rx dma complete interrupt
*/
static void
fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
int len, struct sk_buff *skb, int rxp)
{
struct net_device *dev = port_to_dev(port);
int pi;
int rx_status;
dbg(DBG_TX, "fst_rx_dma_complete\n");
pi = port->index;
memcpy(skb_put(skb, len), card->rx_dma_handle_host, len);
/* Reset buffer descriptor */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
/* Update stats */
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
/* Push upstream */
dbg(DBG_RX, "Pushing the frame up the stack\n");
if (port->mode == FST_RAW)
skb->protocol = farsync_type_trans(skb, dev);
else
skb->protocol = hdlc_type_trans(skb, dev);
rx_status = netif_rx(skb);
fst_process_rx_status(rx_status, port_to_dev(port)->name);
if (rx_status == NET_RX_DROP)
dev->stats.rx_dropped++;
}
/*
* Receive a frame through the DMA
*/
static inline void
fst_rx_dma(struct fst_card_info *card, dma_addr_t skb,
dma_addr_t mem, int len)
{
/*
* This routine will setup the DMA and start it
*/
dbg(DBG_RX, "In fst_rx_dma %lx %lx %d\n",
(unsigned long) skb, (unsigned long) mem, len);
if (card->dmarx_in_progress) {
dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
}
outl(skb, card->pci_conf + DMAPADR0); /* Copy to here */
outl(mem, card->pci_conf + DMALADR0); /* from here */
outl(len, card->pci_conf + DMASIZ0); /* for this length */
outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
/*
* We use the dmarx_in_progress flag to flag the channel as busy
*/
card->dmarx_in_progress = 1;
outb(0x03, card->pci_conf + DMACSR0); /* Start the transfer */
}
/*
* Send a frame through the DMA
*/
static inline void
fst_tx_dma(struct fst_card_info *card, unsigned char *skb,
unsigned char *mem, int len)
{
/*
* This routine will setup the DMA and start it.
*/
dbg(DBG_TX, "In fst_tx_dma %p %p %d\n", skb, mem, len);
if (card->dmatx_in_progress) {
dbg(DBG_ASS, "In fst_tx_dma while dma in progress\n");
}
outl((unsigned long) skb, card->pci_conf + DMAPADR1); /* Copy from here */
outl((unsigned long) mem, card->pci_conf + DMALADR1); /* to here */
outl(len, card->pci_conf + DMASIZ1); /* for this length */
outl(0x000000004, card->pci_conf + DMADPR1); /* In this direction */
/*
* We use the dmatx_in_progress to flag the channel as busy
*/
card->dmatx_in_progress = 1;
outb(0x03, card->pci_conf + DMACSR1); /* Start the transfer */
}
/* Issue a Mailbox command for a port.
* Note we issue them on a fire and forget basis, not expecting to see an
* error and not waiting for completion.
*/
static void
fst_issue_cmd(struct fst_port_info *port, unsigned short cmd)
{
struct fst_card_info *card;
unsigned short mbval;
unsigned long flags;
int safety;
card = port->card;
spin_lock_irqsave(&card->card_lock, flags);
mbval = FST_RDW(card, portMailbox[port->index][0]);
safety = 0;
/* Wait for any previous command to complete */
while (mbval > NAK) {
spin_unlock_irqrestore(&card->card_lock, flags);
schedule_timeout_uninterruptible(1);
spin_lock_irqsave(&card->card_lock, flags);
if (++safety > 2000) {
pr_err("Mailbox safety timeout\n");
break;
}
mbval = FST_RDW(card, portMailbox[port->index][0]);
}
if (safety > 0) {
dbg(DBG_CMD, "Mailbox clear after %d jiffies\n", safety);
}
if (mbval == NAK) {
dbg(DBG_CMD, "issue_cmd: previous command was NAK'd\n");
}
FST_WRW(card, portMailbox[port->index][0], cmd);
if (cmd == ABORTTX || cmd == STARTPORT) {
port->txpos = 0;
port->txipos = 0;
port->start = 0;
}
spin_unlock_irqrestore(&card->card_lock, flags);
}
/* Port output signals control
*/
static inline void
fst_op_raise(struct fst_port_info *port, unsigned int outputs)
{
outputs |= FST_RDL(port->card, v24OpSts[port->index]);
FST_WRL(port->card, v24OpSts[port->index], outputs);
if (port->run)
fst_issue_cmd(port, SETV24O);
}
static inline void
fst_op_lower(struct fst_port_info *port, unsigned int outputs)
{
outputs = ~outputs & FST_RDL(port->card, v24OpSts[port->index]);
FST_WRL(port->card, v24OpSts[port->index], outputs);
if (port->run)
fst_issue_cmd(port, SETV24O);
}
/*
* Setup port Rx buffers
*/
static void
fst_rx_config(struct fst_port_info *port)
{
int i;
int pi;
unsigned int offset;
unsigned long flags;
struct fst_card_info *card;
pi = port->index;
card = port->card;
spin_lock_irqsave(&card->card_lock, flags);
for (i = 0; i < NUM_RX_BUFFER; i++) {
offset = BUF_OFFSET(rxBuffer[pi][i][0]);
FST_WRW(card, rxDescrRing[pi][i].ladr, (u16) offset);
FST_WRB(card, rxDescrRing[pi][i].hadr, (u8) (offset >> 16));
FST_WRW(card, rxDescrRing[pi][i].bcnt, cnv_bcnt(LEN_RX_BUFFER));
FST_WRW(card, rxDescrRing[pi][i].mcnt, LEN_RX_BUFFER);
FST_WRB(card, rxDescrRing[pi][i].bits, DMA_OWN);
}
port->rxpos = 0;
spin_unlock_irqrestore(&card->card_lock, flags);
}
/*
* Setup port Tx buffers
*/
static void
fst_tx_config(struct fst_port_info *port)
{
int i;
int pi;
unsigned int offset;
unsigned long flags;
struct fst_card_info *card;
pi = port->index;
card = port->card;
spin_lock_irqsave(&card->card_lock, flags);
for (i = 0; i < NUM_TX_BUFFER; i++) {
offset = BUF_OFFSET(txBuffer[pi][i][0]);
FST_WRW(card, txDescrRing[pi][i].ladr, (u16) offset);
FST_WRB(card, txDescrRing[pi][i].hadr, (u8) (offset >> 16));
FST_WRW(card, txDescrRing[pi][i].bcnt, 0);
FST_WRB(card, txDescrRing[pi][i].bits, 0);
}
port->txpos = 0;
port->txipos = 0;
port->start = 0;
spin_unlock_irqrestore(&card->card_lock, flags);
}
/* TE1 Alarm change interrupt event
*/
static void
fst_intr_te1_alarm(struct fst_card_info *card, struct fst_port_info *port)
{
u8 los;
u8 rra;
u8 ais;
los = FST_RDB(card, suStatus.lossOfSignal);
rra = FST_RDB(card, suStatus.receiveRemoteAlarm);
ais = FST_RDB(card, suStatus.alarmIndicationSignal);
if (los) {
/*
* Lost the link
*/
if (netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "Net carrier off\n");
netif_carrier_off(port_to_dev(port));
}
} else {
/*
* Link available
*/
if (!netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "Net carrier on\n");
netif_carrier_on(port_to_dev(port));
}
}
if (los)
dbg(DBG_INTR, "Assert LOS Alarm\n");
else
dbg(DBG_INTR, "De-assert LOS Alarm\n");
if (rra)
dbg(DBG_INTR, "Assert RRA Alarm\n");
else
dbg(DBG_INTR, "De-assert RRA Alarm\n");
if (ais)
dbg(DBG_INTR, "Assert AIS Alarm\n");
else
dbg(DBG_INTR, "De-assert AIS Alarm\n");
}
/* Control signal change interrupt event
*/
static void
fst_intr_ctlchg(struct fst_card_info *card, struct fst_port_info *port)
{
int signals;
signals = FST_RDL(card, v24DebouncedSts[port->index]);
if (signals & (((port->hwif == X21) || (port->hwif == X21D))
? IPSTS_INDICATE : IPSTS_DCD)) {
if (!netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "DCD active\n");
netif_carrier_on(port_to_dev(port));
}
} else {
if (netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "DCD lost\n");
netif_carrier_off(port_to_dev(port));
}
}
}
/* Log Rx Errors
*/
static void
fst_log_rx_error(struct fst_card_info *card, struct fst_port_info *port,
unsigned char dmabits, int rxp, unsigned short len)
{
struct net_device *dev = port_to_dev(port);
/*
* Increment the appropriate error counter
*/
dev->stats.rx_errors++;
if (dmabits & RX_OFLO) {
dev->stats.rx_fifo_errors++;
dbg(DBG_ASS, "Rx fifo error on card %d port %d buffer %d\n",
card->card_no, port->index, rxp);
}
if (dmabits & RX_CRC) {
dev->stats.rx_crc_errors++;
dbg(DBG_ASS, "Rx crc error on card %d port %d\n",
card->card_no, port->index);
}
if (dmabits & RX_FRAM) {
dev->stats.rx_frame_errors++;
dbg(DBG_ASS, "Rx frame error on card %d port %d\n",
card->card_no, port->index);
}
if (dmabits == (RX_STP | RX_ENP)) {
dev->stats.rx_length_errors++;
dbg(DBG_ASS, "Rx length error (%d) on card %d port %d\n",
len, card->card_no, port->index);
}
}
/* Rx Error Recovery
*/
static void
fst_recover_rx_error(struct fst_card_info *card, struct fst_port_info *port,
unsigned char dmabits, int rxp, unsigned short len)
{
int i;
int pi;
pi = port->index;
/*
* Discard buffer descriptors until we see the start of the
* next frame. Note that for long frames this could be in
* a subsequent interrupt.
*/
i = 0;
while ((dmabits & (DMA_OWN | RX_STP)) == 0) {
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
rxp = (rxp+1) % NUM_RX_BUFFER;
if (++i > NUM_RX_BUFFER) {
dbg(DBG_ASS, "intr_rx: Discarding more bufs"
" than we have\n");
break;
}
dmabits = FST_RDB(card, rxDescrRing[pi][rxp].bits);
dbg(DBG_ASS, "DMA Bits of next buffer was %x\n", dmabits);
}
dbg(DBG_ASS, "There were %d subsequent buffers in error\n", i);
/* Discard the terminal buffer */
if (!(dmabits & DMA_OWN)) {
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
rxp = (rxp+1) % NUM_RX_BUFFER;
}
port->rxpos = rxp;
return;
}
/* Rx complete interrupt
*/
static void
fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
{
unsigned char dmabits;
int pi;
int rxp;
int rx_status;
unsigned short len;
struct sk_buff *skb;
struct net_device *dev = port_to_dev(port);
/* Check we have a buffer to process */
pi = port->index;
rxp = port->rxpos;
dmabits = FST_RDB(card, rxDescrRing[pi][rxp].bits);
if (dmabits & DMA_OWN) {
dbg(DBG_RX | DBG_INTR, "intr_rx: No buffer port %d pos %d\n",
pi, rxp);
return;
}
if (card->dmarx_in_progress) {
return;
}
/* Get buffer length */
len = FST_RDW(card, rxDescrRing[pi][rxp].mcnt);
/* Discard the CRC */
len -= 2;
if (len == 0) {
/*
* This seems to happen on the TE1 interface sometimes
* so throw the frame away and log the event.
*/
pr_err("Frame received with 0 length. Card %d Port %d\n",
card->card_no, port->index);
/* Return descriptor to card */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
rxp = (rxp+1) % NUM_RX_BUFFER;
port->rxpos = rxp;
return;
}
/* Check buffer length and for other errors. We insist on one packet
* in one buffer. This simplifies things greatly and since we've
* allocated 8K it shouldn't be a real world limitation
*/
dbg(DBG_RX, "intr_rx: %d,%d: flags %x len %d\n", pi, rxp, dmabits, len);
if (dmabits != (RX_STP | RX_ENP) || len > LEN_RX_BUFFER - 2) {
fst_log_rx_error(card, port, dmabits, rxp, len);
fst_recover_rx_error(card, port, dmabits, rxp, len);
return;
}
/* Allocate SKB */
if ((skb = dev_alloc_skb(len)) == NULL) {
dbg(DBG_RX, "intr_rx: can't allocate buffer\n");
dev->stats.rx_dropped++;
/* Return descriptor to card */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
rxp = (rxp+1) % NUM_RX_BUFFER;
port->rxpos = rxp;
return;
}
/*
* We know the length we need to receive, len.
* It's not worth using the DMA for reads of less than
* FST_MIN_DMA_LEN
*/
if ((len < FST_MIN_DMA_LEN) || (card->family == FST_FAMILY_TXP)) {
memcpy_fromio(skb_put(skb, len),
card->mem + BUF_OFFSET(rxBuffer[pi][rxp][0]),
len);
/* Reset buffer descriptor */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
/* Update stats */
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
/* Push upstream */
dbg(DBG_RX, "Pushing frame up the stack\n");
if (port->mode == FST_RAW)
skb->protocol = farsync_type_trans(skb, dev);
else
skb->protocol = hdlc_type_trans(skb, dev);
rx_status = netif_rx(skb);
fst_process_rx_status(rx_status, port_to_dev(port)->name);
if (rx_status == NET_RX_DROP)
dev->stats.rx_dropped++;
} else {
card->dma_skb_rx = skb;
card->dma_port_rx = port;
card->dma_len_rx = len;
card->dma_rxpos = rxp;
fst_rx_dma(card, card->rx_dma_handle_card,
BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
}
if (rxp != port->rxpos) {
dbg(DBG_ASS, "About to increment rxpos by more than 1\n");
dbg(DBG_ASS, "rxp = %d rxpos = %d\n", rxp, port->rxpos);
}
rxp = (rxp+1) % NUM_RX_BUFFER;
port->rxpos = rxp;
}
/*
* The bottom halfs to the ISR
*
*/
static void
do_bottom_half_tx(struct fst_card_info *card)
{
struct fst_port_info *port;
int pi;
int txq_length;
struct sk_buff *skb;
unsigned long flags;
struct net_device *dev;
/*
* Find a free buffer for the transmit
* Step through each port on this card
*/
dbg(DBG_TX, "do_bottom_half_tx\n");
for (pi = 0, port = card->ports; pi < card->nports; pi++, port++) {
if (!port->run)
continue;
dev = port_to_dev(port);
while (!(FST_RDB(card, txDescrRing[pi][port->txpos].bits) &
DMA_OWN) &&
!(card->dmatx_in_progress)) {
/*
* There doesn't seem to be a txdone event per-se
* We seem to have to deduce it, by checking the DMA_OWN
* bit on the next buffer we think we can use
*/
spin_lock_irqsave(&card->card_lock, flags);
if ((txq_length = port->txqe - port->txqs) < 0) {
/*
* This is the case where one has wrapped and the
* maths gives us a negative number
*/
txq_length = txq_length + FST_TXQ_DEPTH;
}
spin_unlock_irqrestore(&card->card_lock, flags);
if (txq_length > 0) {
/*
* There is something to send
*/
spin_lock_irqsave(&card->card_lock, flags);
skb = port->txq[port->txqs];
port->txqs++;
if (port->txqs == FST_TXQ_DEPTH) {
port->txqs = 0;
}
spin_unlock_irqrestore(&card->card_lock, flags);
/*
* copy the data and set the required indicators on the
* card.
*/
FST_WRW(card, txDescrRing[pi][port->txpos].bcnt,
cnv_bcnt(skb->len));
if ((skb->len < FST_MIN_DMA_LEN) ||
(card->family == FST_FAMILY_TXP)) {
/* Enqueue the packet with normal io */
memcpy_toio(card->mem +
BUF_OFFSET(txBuffer[pi]
[port->
txpos][0]),
skb->data, skb->len);
FST_WRB(card,
txDescrRing[pi][port->txpos].
bits,
DMA_OWN | TX_STP | TX_ENP);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
dev->trans_start = jiffies;
} else {
/* Or do it through dma */
memcpy(card->tx_dma_handle_host,
skb->data, skb->len);
card->dma_port_tx = port;
card->dma_len_tx = skb->len;
card->dma_txpos = port->txpos;
fst_tx_dma(card,
(char *) card->
tx_dma_handle_card,
(char *)
BUF_OFFSET(txBuffer[pi]
[port->txpos][0]),
skb->len);
}
if (++port->txpos >= NUM_TX_BUFFER)
port->txpos = 0;
/*
* If we have flow control on, can we now release it?
*/
if (port->start) {
if (txq_length < fst_txq_low) {
netif_wake_queue(port_to_dev
(port));
port->start = 0;
}
}
dev_kfree_skb(skb);
} else {
/*
* Nothing to send so break out of the while loop
*/
break;
}
}
}
}
static void
do_bottom_half_rx(struct fst_card_info *card)
{
struct fst_port_info *port;
int pi;
int rx_count = 0;
/* Check for rx completions on all ports on this card */
dbg(DBG_RX, "do_bottom_half_rx\n");
for (pi = 0, port = card->ports; pi < card->nports; pi++, port++) {
if (!port->run)
continue;
while (!(FST_RDB(card, rxDescrRing[pi][port->rxpos].bits)
& DMA_OWN) && !(card->dmarx_in_progress)) {
if (rx_count > fst_max_reads) {
/*
* Don't spend forever in receive processing
* Schedule another event
*/
fst_q_work_item(&fst_work_intq, card->card_no);
tasklet_schedule(&fst_int_task);
break; /* Leave the loop */
}
fst_intr_rx(card, port);
rx_count++;
}
}
}
/*
* The interrupt service routine
* Dev_id is our fst_card_info pointer
*/
static irqreturn_t
fst_intr(int dummy, void *dev_id)
{
struct fst_card_info *card = dev_id;
struct fst_port_info *port;
int rdidx; /* Event buffer indices */
int wridx;
int event; /* Actual event for processing */
unsigned int dma_intcsr = 0;
unsigned int do_card_interrupt;
unsigned int int_retry_count;
/*
* Check to see if the interrupt was for this card
* return if not
* Note that the call to clear the interrupt is important
*/
dbg(DBG_INTR, "intr: %d %p\n", card->irq, card);
if (card->state != FST_RUNNING) {
pr_err("Interrupt received for card %d in a non running state (%d)\n",
card->card_no, card->state);
/*
* It is possible to really be running, i.e. we have re-loaded
* a running card
* Clear and reprime the interrupt source
*/
fst_clear_intr(card);
return IRQ_HANDLED;
}
/* Clear and reprime the interrupt source */
fst_clear_intr(card);
/*
* Is the interrupt for this card (handshake == 1)
*/
do_card_interrupt = 0;
if (FST_RDB(card, interruptHandshake) == 1) {
do_card_interrupt += FST_CARD_INT;
/* Set the software acknowledge */
FST_WRB(card, interruptHandshake, 0xEE);
}
if (card->family == FST_FAMILY_TXU) {
/*
* Is it a DMA Interrupt
*/
dma_intcsr = inl(card->pci_conf + INTCSR_9054);
if (dma_intcsr & 0x00200000) {
/*
* DMA Channel 0 (Rx transfer complete)
*/
dbg(DBG_RX, "DMA Rx xfer complete\n");
outb(0x8, card->pci_conf + DMACSR0);
fst_rx_dma_complete(card, card->dma_port_rx,
card->dma_len_rx, card->dma_skb_rx,
card->dma_rxpos);
card->dmarx_in_progress = 0;
do_card_interrupt += FST_RX_DMA_INT;
}
if (dma_intcsr & 0x00400000) {
/*
* DMA Channel 1 (Tx transfer complete)
*/
dbg(DBG_TX, "DMA Tx xfer complete\n");
outb(0x8, card->pci_conf + DMACSR1);
fst_tx_dma_complete(card, card->dma_port_tx,
card->dma_len_tx, card->dma_txpos);
card->dmatx_in_progress = 0;
do_card_interrupt += FST_TX_DMA_INT;
}
}
/*
* Have we been missing Interrupts
*/
int_retry_count = FST_RDL(card, interruptRetryCount);
if (int_retry_count) {
dbg(DBG_ASS, "Card %d int_retry_count is %d\n",
card->card_no, int_retry_count);
FST_WRL(card, interruptRetryCount, 0);
}
if (!do_card_interrupt) {
return IRQ_HANDLED;
}
/* Scehdule the bottom half of the ISR */
fst_q_work_item(&fst_work_intq, card->card_no);
tasklet_schedule(&fst_int_task);
/* Drain the event queue */
rdidx = FST_RDB(card, interruptEvent.rdindex) & 0x1f;
wridx = FST_RDB(card, interruptEvent.wrindex) & 0x1f;
while (rdidx != wridx) {
event = FST_RDB(card, interruptEvent.evntbuff[rdidx]);
port = &card->ports[event & 0x03];
dbg(DBG_INTR, "Processing Interrupt event: %x\n", event);
switch (event) {
case TE1_ALMA:
dbg(DBG_INTR, "TE1 Alarm intr\n");
if (port->run)
fst_intr_te1_alarm(card, port);
break;
case CTLA_CHG:
case CTLB_CHG:
case CTLC_CHG:
case CTLD_CHG:
if (port->run)
fst_intr_ctlchg(card, port);
break;
case ABTA_SENT:
case ABTB_SENT:
case ABTC_SENT:
case ABTD_SENT:
dbg(DBG_TX, "Abort complete port %d\n", port->index);
break;
case TXA_UNDF:
case TXB_UNDF:
case TXC_UNDF:
case TXD_UNDF:
/* Difficult to see how we'd get this given that we
* always load up the entire packet for DMA.
*/
dbg(DBG_TX, "Tx underflow port %d\n", port->index);
port_to_dev(port)->stats.tx_errors++;
port_to_dev(port)->stats.tx_fifo_errors++;
dbg(DBG_ASS, "Tx underflow on card %d port %d\n",
card->card_no, port->index);
break;
case INIT_CPLT:
dbg(DBG_INIT, "Card init OK intr\n");
break;
case INIT_FAIL:
dbg(DBG_INIT, "Card init FAILED intr\n");
card->state = FST_IFAILED;
break;
default:
pr_err("intr: unknown card event %d. ignored\n", event);
break;
}
/* Bump and wrap the index */
if (++rdidx >= MAX_CIRBUFF)
rdidx = 0;
}
FST_WRB(card, interruptEvent.rdindex, rdidx);
return IRQ_HANDLED;
}
/* Check that the shared memory configuration is one that we can handle
* and that some basic parameters are correct
*/
static void
check_started_ok(struct fst_card_info *card)
{
int i;
/* Check structure version and end marker */
if (FST_RDW(card, smcVersion) != SMC_VERSION) {
pr_err("Bad shared memory version %d expected %d\n",
FST_RDW(card, smcVersion), SMC_VERSION);
card->state = FST_BADVERSION;
return;
}
if (FST_RDL(card, endOfSmcSignature) != END_SIG) {
pr_err("Missing shared memory signature\n");
card->state = FST_BADVERSION;
return;
}
/* Firmware status flag, 0x00 = initialising, 0x01 = OK, 0xFF = fail */
if ((i = FST_RDB(card, taskStatus)) == 0x01) {
card->state = FST_RUNNING;
} else if (i == 0xFF) {
pr_err("Firmware initialisation failed. Card halted\n");
card->state = FST_HALTED;
return;
} else if (i != 0x00) {
pr_err("Unknown firmware status 0x%x\n", i);
card->state = FST_HALTED;
return;
}
/* Finally check the number of ports reported by firmware against the
* number we assumed at card detection. Should never happen with
* existing firmware etc so we just report it for the moment.
*/
if (FST_RDL(card, numberOfPorts) != card->nports) {
pr_warn("Port count mismatch on card %d. Firmware thinks %d we say %d\n",
card->card_no,
FST_RDL(card, numberOfPorts), card->nports);
}
}
static int
set_conf_from_info(struct fst_card_info *card, struct fst_port_info *port,
struct fstioc_info *info)
{
int err;
unsigned char my_framing;
/* Set things according to the user set valid flags
* Several of the old options have been invalidated/replaced by the
* generic hdlc package.
*/
err = 0;
if (info->valid & FSTVAL_PROTO) {
if (info->proto == FST_RAW)
port->mode = FST_RAW;
else
port->mode = FST_GEN_HDLC;
}
if (info->valid & FSTVAL_CABLE)
err = -EINVAL;
if (info->valid & FSTVAL_SPEED)
err = -EINVAL;
if (info->valid & FSTVAL_PHASE)
FST_WRB(card, portConfig[port->index].invertClock,
info->invertClock);
if (info->valid & FSTVAL_MODE)
FST_WRW(card, cardMode, info->cardMode);
if (info->valid & FSTVAL_TE1) {
FST_WRL(card, suConfig.dataRate, info->lineSpeed);
FST_WRB(card, suConfig.clocking, info->clockSource);
my_framing = FRAMING_E1;
if (info->framing == E1)
my_framing = FRAMING_E1;
if (info->framing == T1)
my_framing = FRAMING_T1;
if (info->framing == J1)
my_framing = FRAMING_J1;
FST_WRB(card, suConfig.framing, my_framing);
FST_WRB(card, suConfig.structure, info->structure);
FST_WRB(card, suConfig.interface, info->interface);
FST_WRB(card, suConfig.coding, info->coding);
FST_WRB(card, suConfig.lineBuildOut, info->lineBuildOut);
FST_WRB(card, suConfig.equalizer, info->equalizer);
FST_WRB(card, suConfig.transparentMode, info->transparentMode);
FST_WRB(card, suConfig.loopMode, info->loopMode);
FST_WRB(card, suConfig.range, info->range);
FST_WRB(card, suConfig.txBufferMode, info->txBufferMode);
FST_WRB(card, suConfig.rxBufferMode, info->rxBufferMode);
FST_WRB(card, suConfig.startingSlot, info->startingSlot);
FST_WRB(card, suConfig.losThreshold, info->losThreshold);
if (info->idleCode)
FST_WRB(card, suConfig.enableIdleCode, 1);
else
FST_WRB(card, suConfig.enableIdleCode, 0);
FST_WRB(card, suConfig.idleCode, info->idleCode);
#if FST_DEBUG
if (info->valid & FSTVAL_TE1) {
printk("Setting TE1 data\n");
printk("Line Speed = %d\n", info->lineSpeed);
printk("Start slot = %d\n", info->startingSlot);
printk("Clock source = %d\n", info->clockSource);
printk("Framing = %d\n", my_framing);
printk("Structure = %d\n", info->structure);
printk("interface = %d\n", info->interface);
printk("Coding = %d\n", info->coding);
printk("Line build out = %d\n", info->lineBuildOut);
printk("Equaliser = %d\n", info->equalizer);
printk("Transparent mode = %d\n",
info->transparentMode);
printk("Loop mode = %d\n", info->loopMode);
printk("Range = %d\n", info->range);
printk("Tx Buffer mode = %d\n", info->txBufferMode);
printk("Rx Buffer mode = %d\n", info->rxBufferMode);
printk("LOS Threshold = %d\n", info->losThreshold);
printk("Idle Code = %d\n", info->idleCode);
}
#endif
}
#if FST_DEBUG
if (info->valid & FSTVAL_DEBUG) {
fst_debug_mask = info->debug;
}
#endif
return err;
}
static void
gather_conf_info(struct fst_card_info *card, struct fst_port_info *port,
struct fstioc_info *info)
{
int i;
memset(info, 0, sizeof (struct fstioc_info));
i = port->index;
info->kernelVersion = LINUX_VERSION_CODE;
info->nports = card->nports;
info->type = card->type;
info->state = card->state;
info->proto = FST_GEN_HDLC;
info->index = i;
#if FST_DEBUG
info->debug = fst_debug_mask;
#endif
/* Only mark information as valid if card is running.
* Copy the data anyway in case it is useful for diagnostics
*/
info->valid = ((card->state == FST_RUNNING) ? FSTVAL_ALL : FSTVAL_CARD)
#if FST_DEBUG
| FSTVAL_DEBUG
#endif
;
info->lineInterface = FST_RDW(card, portConfig[i].lineInterface);
info->internalClock = FST_RDB(card, portConfig[i].internalClock);
info->lineSpeed = FST_RDL(card, portConfig[i].lineSpeed);
info->invertClock = FST_RDB(card, portConfig[i].invertClock);
info->v24IpSts = FST_RDL(card, v24IpSts[i]);
info->v24OpSts = FST_RDL(card, v24OpSts[i]);
info->clockStatus = FST_RDW(card, clockStatus[i]);
info->cableStatus = FST_RDW(card, cableStatus);
info->cardMode = FST_RDW(card, cardMode);
info->smcFirmwareVersion = FST_RDL(card, smcFirmwareVersion);
/*
* The T2U can report cable presence for both A or B
* in bits 0 and 1 of cableStatus. See which port we are and
* do the mapping.
*/
if (card->family == FST_FAMILY_TXU) {
if (port->index == 0) {
/*
* Port A
*/
info->cableStatus = info->cableStatus & 1;
} else {
/*
* Port B
*/
info->cableStatus = info->cableStatus >> 1;
info->cableStatus = info->cableStatus & 1;
}
}
/*
* Some additional bits if we are TE1
*/
if (card->type == FST_TYPE_TE1) {
info->lineSpeed = FST_RDL(card, suConfig.dataRate);
info->clockSource = FST_RDB(card, suConfig.clocking);
info->framing = FST_RDB(card, suConfig.framing);
info->structure = FST_RDB(card, suConfig.structure);
info->interface = FST_RDB(card, suConfig.interface);
info->coding = FST_RDB(card, suConfig.coding);
info->lineBuildOut = FST_RDB(card, suConfig.lineBuildOut);
info->equalizer = FST_RDB(card, suConfig.equalizer);
info->loopMode = FST_RDB(card, suConfig.loopMode);
info->range = FST_RDB(card, suConfig.range);
info->txBufferMode = FST_RDB(card, suConfig.txBufferMode);
info->rxBufferMode = FST_RDB(card, suConfig.rxBufferMode);
info->startingSlot = FST_RDB(card, suConfig.startingSlot);
info->losThreshold = FST_RDB(card, suConfig.losThreshold);
if (FST_RDB(card, suConfig.enableIdleCode))
info->idleCode = FST_RDB(card, suConfig.idleCode);
else
info->idleCode = 0;
info->receiveBufferDelay =
FST_RDL(card, suStatus.receiveBufferDelay);
info->framingErrorCount =
FST_RDL(card, suStatus.framingErrorCount);
info->codeViolationCount =
FST_RDL(card, suStatus.codeViolationCount);
info->crcErrorCount = FST_RDL(card, suStatus.crcErrorCount);
info->lineAttenuation = FST_RDL(card, suStatus.lineAttenuation);
info->lossOfSignal = FST_RDB(card, suStatus.lossOfSignal);
info->receiveRemoteAlarm =
FST_RDB(card, suStatus.receiveRemoteAlarm);
info->alarmIndicationSignal =
FST_RDB(card, suStatus.alarmIndicationSignal);
}
}
static int
fst_set_iface(struct fst_card_info *card, struct fst_port_info *port,
struct ifreq *ifr)
{
sync_serial_settings sync;
int i;
if (ifr->ifr_settings.size != sizeof (sync)) {
return -ENOMEM;
}
if (copy_from_user
(&sync, ifr->ifr_settings.ifs_ifsu.sync, sizeof (sync))) {
return -EFAULT;
}
if (sync.loopback)
return -EINVAL;
i = port->index;
switch (ifr->ifr_settings.type) {
case IF_IFACE_V35:
FST_WRW(card, portConfig[i].lineInterface, V35);
port->hwif = V35;
break;
case IF_IFACE_V24:
FST_WRW(card, portConfig[i].lineInterface, V24);
port->hwif = V24;
break;
case IF_IFACE_X21:
FST_WRW(card, portConfig[i].lineInterface, X21);
port->hwif = X21;
break;
case IF_IFACE_X21D:
FST_WRW(card, portConfig[i].lineInterface, X21D);
port->hwif = X21D;
break;
case IF_IFACE_T1:
FST_WRW(card, portConfig[i].lineInterface, T1);
port->hwif = T1;
break;
case IF_IFACE_E1:
FST_WRW(card, portConfig[i].lineInterface, E1);
port->hwif = E1;
break;
case IF_IFACE_SYNC_SERIAL:
break;
default:
return -EINVAL;
}
switch (sync.clock_type) {
case CLOCK_EXT:
FST_WRB(card, portConfig[i].internalClock, EXTCLK);
break;
case CLOCK_INT:
FST_WRB(card, portConfig[i].internalClock, INTCLK);
break;
default:
return -EINVAL;
}
FST_WRL(card, portConfig[i].lineSpeed, sync.clock_rate);
return 0;
}
static int
fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
struct ifreq *ifr)
{
sync_serial_settings sync;
int i;
/* First check what line type is set, we'll default to reporting X.21
* if nothing is set as IF_IFACE_SYNC_SERIAL implies it can't be
* changed
*/
switch (port->hwif) {
case E1:
ifr->ifr_settings.type = IF_IFACE_E1;
break;
case T1:
ifr->ifr_settings.type = IF_IFACE_T1;
break;
case V35:
ifr->ifr_settings.type = IF_IFACE_V35;
break;
case V24:
ifr->ifr_settings.type = IF_IFACE_V24;
break;
case X21D:
ifr->ifr_settings.type = IF_IFACE_X21D;
break;
case X21:
default:
ifr->ifr_settings.type = IF_IFACE_X21;
break;
}
if (ifr->ifr_settings.size == 0) {
return 0; /* only type requested */
}
if (ifr->ifr_settings.size < sizeof (sync)) {
return -ENOMEM;
}
i = port->index;
sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
/* Lucky card and linux use same encoding here */
sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
INTCLK ? CLOCK_INT : CLOCK_EXT;
sync.loopback = 0;
if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &sync, sizeof (sync))) {
return -EFAULT;
}
ifr->ifr_settings.size = sizeof (sync);
return 0;
}
static int
fst_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct fst_card_info *card;
struct fst_port_info *port;
struct fstioc_write wrthdr;
struct fstioc_info info;
unsigned long flags;
void *buf;
dbg(DBG_IOCTL, "ioctl: %x, %p\n", cmd, ifr->ifr_data);
port = dev_to_port(dev);
card = port->card;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case FSTCPURESET:
fst_cpureset(card);
card->state = FST_RESET;
return 0;
case FSTCPURELEASE:
fst_cpurelease(card);
card->state = FST_STARTING;
return 0;
case FSTWRITE: /* Code write (download) */
/* First copy in the header with the length and offset of data
* to write
*/
if (ifr->ifr_data == NULL) {
return -EINVAL;
}
if (copy_from_user(&wrthdr, ifr->ifr_data,
sizeof (struct fstioc_write))) {
return -EFAULT;
}
/* Sanity check the parameters. We don't support partial writes
* when going over the top
*/
if (wrthdr.size > FST_MEMSIZE || wrthdr.offset > FST_MEMSIZE ||
wrthdr.size + wrthdr.offset > FST_MEMSIZE) {
return -ENXIO;
}
/* Now copy the data to the card. */
buf = memdup_user(ifr->ifr_data + sizeof(struct fstioc_write),
wrthdr.size);
if (IS_ERR(buf))
return PTR_ERR(buf);
memcpy_toio(card->mem + wrthdr.offset, buf, wrthdr.size);
kfree(buf);
/* Writes to the memory of a card in the reset state constitute
* a download
*/
if (card->state == FST_RESET) {
card->state = FST_DOWNLOAD;
}
return 0;
case FSTGETCONF:
/* If card has just been started check the shared memory config
* version and marker
*/
if (card->state == FST_STARTING) {
check_started_ok(card);
/* If everything checked out enable card interrupts */
if (card->state == FST_RUNNING) {
spin_lock_irqsave(&card->card_lock, flags);
fst_enable_intr(card);
FST_WRB(card, interruptHandshake, 0xEE);
spin_unlock_irqrestore(&card->card_lock, flags);
}
}
if (ifr->ifr_data == NULL) {
return -EINVAL;
}
gather_conf_info(card, port, &info);
if (copy_to_user(ifr->ifr_data, &info, sizeof (info))) {
return -EFAULT;
}
return 0;
case FSTSETCONF:
/*
* Most of the settings have been moved to the generic ioctls
* this just covers debug and board ident now
*/
if (card->state != FST_RUNNING) {
pr_err("Attempt to configure card %d in non-running state (%d)\n",
card->card_no, card->state);
return -EIO;
}
if (copy_from_user(&info, ifr->ifr_data, sizeof (info))) {
return -EFAULT;
}
return set_conf_from_info(card, port, &info);
case SIOCWANDEV:
switch (ifr->ifr_settings.type) {
case IF_GET_IFACE:
return fst_get_iface(card, port, ifr);
case IF_IFACE_SYNC_SERIAL:
case IF_IFACE_V35:
case IF_IFACE_V24:
case IF_IFACE_X21:
case IF_IFACE_X21D:
case IF_IFACE_T1:
case IF_IFACE_E1:
return fst_set_iface(card, port, ifr);
case IF_PROTO_RAW:
port->mode = FST_RAW;
return 0;
case IF_GET_PROTO:
if (port->mode == FST_RAW) {
ifr->ifr_settings.type = IF_PROTO_RAW;
return 0;
}
return hdlc_ioctl(dev, ifr, cmd);
default:
port->mode = FST_GEN_HDLC;
dbg(DBG_IOCTL, "Passing this type to hdlc %x\n",
ifr->ifr_settings.type);
return hdlc_ioctl(dev, ifr, cmd);
}
default:
/* Not one of ours. Pass through to HDLC package */
return hdlc_ioctl(dev, ifr, cmd);
}
}
static void
fst_openport(struct fst_port_info *port)
{
int signals;
int txq_length;
/* Only init things if card is actually running. This allows open to
* succeed for downloads etc.
*/
if (port->card->state == FST_RUNNING) {
if (port->run) {
dbg(DBG_OPEN, "open: found port already running\n");
fst_issue_cmd(port, STOPPORT);
port->run = 0;
}
fst_rx_config(port);
fst_tx_config(port);
fst_op_raise(port, OPSTS_RTS | OPSTS_DTR);
fst_issue_cmd(port, STARTPORT);
port->run = 1;
signals = FST_RDL(port->card, v24DebouncedSts[port->index]);
if (signals & (((port->hwif == X21) || (port->hwif == X21D))
? IPSTS_INDICATE : IPSTS_DCD))
netif_carrier_on(port_to_dev(port));
else
netif_carrier_off(port_to_dev(port));
txq_length = port->txqe - port->txqs;
port->txqe = 0;
port->txqs = 0;
}
}
static void
fst_closeport(struct fst_port_info *port)
{
if (port->card->state == FST_RUNNING) {
if (port->run) {
port->run = 0;
fst_op_lower(port, OPSTS_RTS | OPSTS_DTR);
fst_issue_cmd(port, STOPPORT);
} else {
dbg(DBG_OPEN, "close: port not running\n");
}
}
}
static int
fst_open(struct net_device *dev)
{
int err;
struct fst_port_info *port;
port = dev_to_port(dev);
if (!try_module_get(THIS_MODULE))
return -EBUSY;
if (port->mode != FST_RAW) {
err = hdlc_open(dev);
if (err) {
module_put(THIS_MODULE);
return err;
}
}
fst_openport(port);
netif_wake_queue(dev);
return 0;
}
static int
fst_close(struct net_device *dev)
{
struct fst_port_info *port;
struct fst_card_info *card;
unsigned char tx_dma_done;
unsigned char rx_dma_done;
port = dev_to_port(dev);
card = port->card;
tx_dma_done = inb(card->pci_conf + DMACSR1);
rx_dma_done = inb(card->pci_conf + DMACSR0);
dbg(DBG_OPEN,
"Port Close: tx_dma_in_progress = %d (%x) rx_dma_in_progress = %d (%x)\n",
card->dmatx_in_progress, tx_dma_done, card->dmarx_in_progress,
rx_dma_done);
netif_stop_queue(dev);
fst_closeport(dev_to_port(dev));
if (port->mode != FST_RAW) {
hdlc_close(dev);
}
module_put(THIS_MODULE);
return 0;
}
static int
fst_attach(struct net_device *dev, unsigned short encoding, unsigned short parity)
{
/*
* Setting currently fixed in FarSync card so we check and forget
*/
if (encoding != ENCODING_NRZ || parity != PARITY_CRC16_PR1_CCITT)
return -EINVAL;
return 0;
}
static void
fst_tx_timeout(struct net_device *dev)
{
struct fst_port_info *port;
struct fst_card_info *card;
port = dev_to_port(dev);
card = port->card;
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
dbg(DBG_ASS, "Tx timeout card %d port %d\n",
card->card_no, port->index);
fst_issue_cmd(port, ABORTTX);
dev->trans_start = jiffies;
netif_wake_queue(dev);
port->start = 0;
}
static netdev_tx_t
fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fst_card_info *card;
struct fst_port_info *port;
unsigned long flags;
int txq_length;
port = dev_to_port(dev);
card = port->card;
dbg(DBG_TX, "fst_start_xmit: length = %d\n", skb->len);
/* Drop packet with error if we don't have carrier */
if (!netif_carrier_ok(dev)) {
dev_kfree_skb(skb);
dev->stats.tx_errors++;
dev->stats.tx_carrier_errors++;
dbg(DBG_ASS,
"Tried to transmit but no carrier on card %d port %d\n",
card->card_no, port->index);
return NETDEV_TX_OK;
}
/* Drop it if it's too big! MTU failure ? */
if (skb->len > LEN_TX_BUFFER) {
dbg(DBG_ASS, "Packet too large %d vs %d\n", skb->len,
LEN_TX_BUFFER);
dev_kfree_skb(skb);
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
/*
* We are always going to queue the packet
* so that the bottom half is the only place we tx from
* Check there is room in the port txq
*/
spin_lock_irqsave(&card->card_lock, flags);
if ((txq_length = port->txqe - port->txqs) < 0) {
/*
* This is the case where the next free has wrapped but the
* last used hasn't
*/
txq_length = txq_length + FST_TXQ_DEPTH;
}
spin_unlock_irqrestore(&card->card_lock, flags);
if (txq_length > fst_txq_high) {
/*
* We have got enough buffers in the pipeline. Ask the network
* layer to stop sending frames down
*/
netif_stop_queue(dev);
port->start = 1; /* I'm using this to signal stop sent up */
}
if (txq_length == FST_TXQ_DEPTH - 1) {
/*
* This shouldn't have happened but such is life
*/
dev_kfree_skb(skb);
dev->stats.tx_errors++;
dbg(DBG_ASS, "Tx queue overflow card %d port %d\n",
card->card_no, port->index);
return NETDEV_TX_OK;
}
/*
* queue the buffer
*/
spin_lock_irqsave(&card->card_lock, flags);
port->txq[port->txqe] = skb;
port->txqe++;
if (port->txqe == FST_TXQ_DEPTH)
port->txqe = 0;
spin_unlock_irqrestore(&card->card_lock, flags);
/* Scehdule the bottom half which now does transmit processing */
fst_q_work_item(&fst_work_txq, card->card_no);
tasklet_schedule(&fst_tx_task);
return NETDEV_TX_OK;
}
/*
* Card setup having checked hardware resources.
* Should be pretty bizarre if we get an error here (kernel memory
* exhaustion is one possibility). If we do see a problem we report it
* via a printk and leave the corresponding interface and all that follow
* disabled.
*/
static char *type_strings[] = {
"no hardware", /* Should never be seen */
"FarSync T2P",
"FarSync T4P",
"FarSync T1U",
"FarSync T2U",
"FarSync T4U",
"FarSync TE1"
};
static void
fst_init_card(struct fst_card_info *card)
{
int i;
int err;
/* We're working on a number of ports based on the card ID. If the
* firmware detects something different later (should never happen)
* we'll have to revise it in some way then.
*/
for (i = 0; i < card->nports; i++) {
err = register_hdlc_device(card->ports[i].dev);
if (err < 0) {
int j;
pr_err("Cannot register HDLC device for port %d (errno %d)\n",
i, -err);
for (j = i; j < card->nports; j++) {
free_netdev(card->ports[j].dev);
card->ports[j].dev = NULL;
}
card->nports = i;
break;
}
}
pr_info("%s-%s: %s IRQ%d, %d ports\n",
port_to_dev(&card->ports[0])->name,
port_to_dev(&card->ports[card->nports - 1])->name,
type_strings[card->type], card->irq, card->nports);
}
static const struct net_device_ops fst_ops = {
.ndo_open = fst_open,
.ndo_stop = fst_close,
.ndo_change_mtu = hdlc_change_mtu,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_do_ioctl = fst_ioctl,
.ndo_tx_timeout = fst_tx_timeout,
};
/*
* Initialise card when detected.
* Returns 0 to indicate success, or errno otherwise.
*/
static int
fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int no_of_cards_added = 0;
struct fst_card_info *card;
int err = 0;
int i;
printk_once(KERN_INFO
pr_fmt("FarSync WAN driver " FST_USER_VERSION
" (c) 2001-2004 FarSite Communications Ltd.\n"));
#if FST_DEBUG
dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask);
#endif
/*
* We are going to be clever and allow certain cards not to be
* configured. An exclude list can be provided in /etc/modules.conf
*/
if (fst_excluded_cards != 0) {
/*
* There are cards to exclude
*
*/
for (i = 0; i < fst_excluded_cards; i++) {
if ((pdev->devfn) >> 3 == fst_excluded_list[i]) {
pr_info("FarSync PCI device %d not assigned\n",
(pdev->devfn) >> 3);
return -EBUSY;
}
}
}
/* Allocate driver private data */
card = kzalloc(sizeof(struct fst_card_info), GFP_KERNEL);
if (card == NULL)
return -ENOMEM;
/* Try to enable the device */
if ((err = pci_enable_device(pdev)) != 0) {
pr_err("Failed to enable card. Err %d\n", -err);
kfree(card);
return err;
}
if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
pr_err("Failed to allocate regions. Err %d\n", -err);
pci_disable_device(pdev);
kfree(card);
return err;
}
/* Get virtual addresses of memory regions */
card->pci_conf = pci_resource_start(pdev, 1);
card->phys_mem = pci_resource_start(pdev, 2);
card->phys_ctlmem = pci_resource_start(pdev, 3);
if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
pr_err("Physical memory remap failed\n");
pci_release_regions(pdev);
pci_disable_device(pdev);
kfree(card);
return -ENODEV;
}
if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
pr_err("Control memory remap failed\n");
pci_release_regions(pdev);
pci_disable_device(pdev);
iounmap(card->mem);
kfree(card);
return -ENODEV;
}
dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
/* Register the interrupt handler */
if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
pr_err("Unable to register interrupt %d\n", card->irq);
pci_release_regions(pdev);
pci_disable_device(pdev);
iounmap(card->ctlmem);
iounmap(card->mem);
kfree(card);
return -ENODEV;
}
/* Record info we need */
card->irq = pdev->irq;
card->type = ent->driver_data;
card->family = ((ent->driver_data == FST_TYPE_T2P) ||
(ent->driver_data == FST_TYPE_T4P))
? FST_FAMILY_TXP : FST_FAMILY_TXU;
if ((ent->driver_data == FST_TYPE_T1U) ||
(ent->driver_data == FST_TYPE_TE1))
card->nports = 1;
else
card->nports = ((ent->driver_data == FST_TYPE_T2P) ||
(ent->driver_data == FST_TYPE_T2U)) ? 2 : 4;
card->state = FST_UNINIT;
spin_lock_init ( &card->card_lock );
for ( i = 0 ; i < card->nports ; i++ ) {
struct net_device *dev = alloc_hdlcdev(&card->ports[i]);
hdlc_device *hdlc;
if (!dev) {
while (i--)
free_netdev(card->ports[i].dev);
pr_err("FarSync: out of memory\n");
free_irq(card->irq, card);
pci_release_regions(pdev);
pci_disable_device(pdev);
iounmap(card->ctlmem);
iounmap(card->mem);
kfree(card);
return -ENODEV;
}
card->ports[i].dev = dev;
card->ports[i].card = card;
card->ports[i].index = i;
card->ports[i].run = 0;
hdlc = dev_to_hdlc(dev);
/* Fill in the net device info */
/* Since this is a PCI setup this is purely
* informational. Give them the buffer addresses
* and basic card I/O.
*/
dev->mem_start = card->phys_mem
+ BUF_OFFSET ( txBuffer[i][0][0]);
dev->mem_end = card->phys_mem
+ BUF_OFFSET ( txBuffer[i][NUM_TX_BUFFER][0]);
dev->base_addr = card->pci_conf;
dev->irq = card->irq;
dev->netdev_ops = &fst_ops;
dev->tx_queue_len = FST_TX_QUEUE_LEN;
dev->watchdog_timeo = FST_TX_TIMEOUT;
hdlc->attach = fst_attach;
hdlc->xmit = fst_start_xmit;
}
card->device = pdev;
dbg(DBG_PCI, "type %d nports %d irq %d\n", card->type,
card->nports, card->irq);
dbg(DBG_PCI, "conf %04x mem %08x ctlmem %08x\n",
card->pci_conf, card->phys_mem, card->phys_ctlmem);
/* Reset the card's processor */
fst_cpureset(card);
card->state = FST_RESET;
/* Initialise DMA (if required) */
fst_init_dma(card);
/* Record driver data for later use */
pci_set_drvdata(pdev, card);
/* Remainder of card setup */
fst_card_array[no_of_cards_added] = card;
card->card_no = no_of_cards_added++; /* Record instance and bump it */
fst_init_card(card);
if (card->family == FST_FAMILY_TXU) {
/*
* Allocate a dma buffer for transmit and receives
*/
card->rx_dma_handle_host =
pci_alloc_consistent(card->device, FST_MAX_MTU,
&card->rx_dma_handle_card);
if (card->rx_dma_handle_host == NULL) {
pr_err("Could not allocate rx dma buffer\n");
fst_disable_intr(card);
pci_release_regions(pdev);
pci_disable_device(pdev);
iounmap(card->ctlmem);
iounmap(card->mem);
kfree(card);
return -ENOMEM;
}
card->tx_dma_handle_host =
pci_alloc_consistent(card->device, FST_MAX_MTU,
&card->tx_dma_handle_card);
if (card->tx_dma_handle_host == NULL) {
pr_err("Could not allocate tx dma buffer\n");
fst_disable_intr(card);
pci_release_regions(pdev);
pci_disable_device(pdev);
iounmap(card->ctlmem);
iounmap(card->mem);
kfree(card);
return -ENOMEM;
}
}
return 0; /* Success */
}
/*
* Cleanup and close down a card
*/
static void
fst_remove_one(struct pci_dev *pdev)
{
struct fst_card_info *card;
int i;
card = pci_get_drvdata(pdev);
for (i = 0; i < card->nports; i++) {
struct net_device *dev = port_to_dev(&card->ports[i]);
unregister_hdlc_device(dev);
}
fst_disable_intr(card);
free_irq(card->irq, card);
iounmap(card->ctlmem);
iounmap(card->mem);
pci_release_regions(pdev);
if (card->family == FST_FAMILY_TXU) {
/*
* Free dma buffers
*/
pci_free_consistent(card->device, FST_MAX_MTU,
card->rx_dma_handle_host,
card->rx_dma_handle_card);
pci_free_consistent(card->device, FST_MAX_MTU,
card->tx_dma_handle_host,
card->tx_dma_handle_card);
}
fst_card_array[card->card_no] = NULL;
}
static struct pci_driver fst_driver = {
.name = FST_NAME,
.id_table = fst_pci_dev_id,
.probe = fst_add_one,
.remove = fst_remove_one,
.suspend = NULL,
.resume = NULL,
};
static int __init
fst_init(void)
{
int i;
for (i = 0; i < FST_MAX_CARDS; i++)
fst_card_array[i] = NULL;
spin_lock_init(&fst_work_q_lock);
return pci_register_driver(&fst_driver);
}
static void __exit
fst_cleanup_module(void)
{
pr_info("FarSync WAN driver unloading\n");
pci_unregister_driver(&fst_driver);
}
module_init(fst_init);
module_exit(fst_cleanup_module);
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_2058_0 |
crossvul-cpp_data_bad_1774_0 | /*
* Kernel-based Virtual Machine driver for Linux
*
* AMD SVM support
*
* Copyright (C) 2006 Qumranet, Inc.
* Copyright 2010 Red Hat, Inc. and/or its affiliates.
*
* Authors:
* Yaniv Kamay <yaniv@qumranet.com>
* Avi Kivity <avi@qumranet.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#include <linux/kvm_host.h>
#include "irq.h"
#include "mmu.h"
#include "kvm_cache_regs.h"
#include "x86.h"
#include "cpuid.h"
#include "pmu.h"
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/kernel.h>
#include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/sched.h>
#include <linux/trace_events.h>
#include <linux/slab.h>
#include <asm/perf_event.h>
#include <asm/tlbflush.h>
#include <asm/desc.h>
#include <asm/debugreg.h>
#include <asm/kvm_para.h>
#include <asm/virtext.h>
#include "trace.h"
#define __ex(x) __kvm_handle_fault_on_reboot(x)
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
static const struct x86_cpu_id svm_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_SVM),
{}
};
MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
#define IOPM_ALLOC_ORDER 2
#define MSRPM_ALLOC_ORDER 1
#define SEG_TYPE_LDT 2
#define SEG_TYPE_BUSY_TSS16 3
#define SVM_FEATURE_NPT (1 << 0)
#define SVM_FEATURE_LBRV (1 << 1)
#define SVM_FEATURE_SVML (1 << 2)
#define SVM_FEATURE_NRIP (1 << 3)
#define SVM_FEATURE_TSC_RATE (1 << 4)
#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
#define SVM_FEATURE_FLUSH_ASID (1 << 6)
#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
#define TSC_RATIO_RSVD 0xffffff0000000000ULL
#define TSC_RATIO_MIN 0x0000000000000001ULL
#define TSC_RATIO_MAX 0x000000ffffffffffULL
static bool erratum_383_found __read_mostly;
static const u32 host_save_user_msrs[] = {
#ifdef CONFIG_X86_64
MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
MSR_FS_BASE,
#endif
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
};
#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
struct kvm_vcpu;
struct nested_state {
struct vmcb *hsave;
u64 hsave_msr;
u64 vm_cr_msr;
u64 vmcb;
/* These are the merged vectors */
u32 *msrpm;
/* gpa pointers to the real vectors */
u64 vmcb_msrpm;
u64 vmcb_iopm;
/* A VMEXIT is required but not yet emulated */
bool exit_required;
/* cache for intercepts of the guest */
u32 intercept_cr;
u32 intercept_dr;
u32 intercept_exceptions;
u64 intercept;
/* Nested Paging related state */
u64 nested_cr3;
};
#define MSRPM_OFFSETS 16
static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
/*
* Set osvw_len to higher value when updated Revision Guides
* are published and we know what the new status bits are
*/
static uint64_t osvw_len = 4, osvw_status;
struct vcpu_svm {
struct kvm_vcpu vcpu;
struct vmcb *vmcb;
unsigned long vmcb_pa;
struct svm_cpu_data *svm_data;
uint64_t asid_generation;
uint64_t sysenter_esp;
uint64_t sysenter_eip;
u64 next_rip;
u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
struct {
u16 fs;
u16 gs;
u16 ldt;
u64 gs_base;
} host;
u32 *msrpm;
ulong nmi_iret_rip;
struct nested_state nested;
bool nmi_singlestep;
unsigned int3_injected;
unsigned long int3_rip;
u32 apf_reason;
/* cached guest cpuid flags for faster access */
bool nrips_enabled : 1;
};
static DEFINE_PER_CPU(u64, current_tsc_ratio);
#define TSC_RATIO_DEFAULT 0x0100000000ULL
#define MSR_INVALID 0xffffffffU
static const struct svm_direct_access_msrs {
u32 index; /* Index of the MSR */
bool always; /* True if intercept is always on */
} direct_access_msrs[] = {
{ .index = MSR_STAR, .always = true },
{ .index = MSR_IA32_SYSENTER_CS, .always = true },
#ifdef CONFIG_X86_64
{ .index = MSR_GS_BASE, .always = true },
{ .index = MSR_FS_BASE, .always = true },
{ .index = MSR_KERNEL_GS_BASE, .always = true },
{ .index = MSR_LSTAR, .always = true },
{ .index = MSR_CSTAR, .always = true },
{ .index = MSR_SYSCALL_MASK, .always = true },
#endif
{ .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
{ .index = MSR_IA32_LASTINTTOIP, .always = false },
{ .index = MSR_INVALID, .always = false },
};
/* enable NPT for AMD64 and X86 with PAE */
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
static bool npt_enabled = true;
#else
static bool npt_enabled;
#endif
/* allow nested paging (virtualized MMU) for all guests */
static int npt = true;
module_param(npt, int, S_IRUGO);
/* allow nested virtualization in KVM/SVM */
static int nested = true;
module_param(nested, int, S_IRUGO);
static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
static void svm_flush_tlb(struct kvm_vcpu *vcpu);
static void svm_complete_interrupts(struct vcpu_svm *svm);
static int nested_svm_exit_handled(struct vcpu_svm *svm);
static int nested_svm_intercept(struct vcpu_svm *svm);
static int nested_svm_vmexit(struct vcpu_svm *svm);
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code);
enum {
VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
pause filter count */
VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
VMCB_ASID, /* ASID */
VMCB_INTR, /* int_ctl, int_vector */
VMCB_NPT, /* npt_en, nCR3, gPAT */
VMCB_CR, /* CR0, CR3, CR4, EFER */
VMCB_DR, /* DR6, DR7 */
VMCB_DT, /* GDT, IDT */
VMCB_SEG, /* CS, DS, SS, ES, CPL */
VMCB_CR2, /* CR2 only */
VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
VMCB_DIRTY_MAX,
};
/* TPR and CR2 are always written before VMRUN */
#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
static inline void mark_all_dirty(struct vmcb *vmcb)
{
vmcb->control.clean = 0;
}
static inline void mark_all_clean(struct vmcb *vmcb)
{
vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
& ~VMCB_ALWAYS_DIRTY_MASK;
}
static inline void mark_dirty(struct vmcb *vmcb, int bit)
{
vmcb->control.clean &= ~(1 << bit);
}
static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
{
return container_of(vcpu, struct vcpu_svm, vcpu);
}
static void recalc_intercepts(struct vcpu_svm *svm)
{
struct vmcb_control_area *c, *h;
struct nested_state *g;
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
if (!is_guest_mode(&svm->vcpu))
return;
c = &svm->vmcb->control;
h = &svm->nested.hsave->control;
g = &svm->nested;
c->intercept_cr = h->intercept_cr | g->intercept_cr;
c->intercept_dr = h->intercept_dr | g->intercept_dr;
c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
c->intercept = h->intercept | g->intercept;
}
static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
{
if (is_guest_mode(&svm->vcpu))
return svm->nested.hsave;
else
return svm->vmcb;
}
static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept_cr |= (1U << bit);
recalc_intercepts(svm);
}
static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept_cr &= ~(1U << bit);
recalc_intercepts(svm);
}
static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
return vmcb->control.intercept_cr & (1U << bit);
}
static inline void set_dr_intercepts(struct vcpu_svm *svm)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
| (1 << INTERCEPT_DR1_READ)
| (1 << INTERCEPT_DR2_READ)
| (1 << INTERCEPT_DR3_READ)
| (1 << INTERCEPT_DR4_READ)
| (1 << INTERCEPT_DR5_READ)
| (1 << INTERCEPT_DR6_READ)
| (1 << INTERCEPT_DR7_READ)
| (1 << INTERCEPT_DR0_WRITE)
| (1 << INTERCEPT_DR1_WRITE)
| (1 << INTERCEPT_DR2_WRITE)
| (1 << INTERCEPT_DR3_WRITE)
| (1 << INTERCEPT_DR4_WRITE)
| (1 << INTERCEPT_DR5_WRITE)
| (1 << INTERCEPT_DR6_WRITE)
| (1 << INTERCEPT_DR7_WRITE);
recalc_intercepts(svm);
}
static inline void clr_dr_intercepts(struct vcpu_svm *svm)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept_dr = 0;
recalc_intercepts(svm);
}
static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept_exceptions |= (1U << bit);
recalc_intercepts(svm);
}
static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept_exceptions &= ~(1U << bit);
recalc_intercepts(svm);
}
static inline void set_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept |= (1ULL << bit);
recalc_intercepts(svm);
}
static inline void clr_intercept(struct vcpu_svm *svm, int bit)
{
struct vmcb *vmcb = get_host_vmcb(svm);
vmcb->control.intercept &= ~(1ULL << bit);
recalc_intercepts(svm);
}
static inline void enable_gif(struct vcpu_svm *svm)
{
svm->vcpu.arch.hflags |= HF_GIF_MASK;
}
static inline void disable_gif(struct vcpu_svm *svm)
{
svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
}
static inline bool gif_set(struct vcpu_svm *svm)
{
return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
}
static unsigned long iopm_base;
struct kvm_ldttss_desc {
u16 limit0;
u16 base0;
unsigned base1:8, type:5, dpl:2, p:1;
unsigned limit1:4, zero0:3, g:1, base2:8;
u32 base3;
u32 zero1;
} __attribute__((packed));
struct svm_cpu_data {
int cpu;
u64 asid_generation;
u32 max_asid;
u32 next_asid;
struct kvm_ldttss_desc *tss_desc;
struct page *save_area;
};
static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
struct svm_init_data {
int cpu;
int r;
};
static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
#define MSRS_RANGE_SIZE 2048
#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
static u32 svm_msrpm_offset(u32 msr)
{
u32 offset;
int i;
for (i = 0; i < NUM_MSR_MAPS; i++) {
if (msr < msrpm_ranges[i] ||
msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
continue;
offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
offset += (i * MSRS_RANGE_SIZE); /* add range offset */
/* Now we have the u8 offset - but need the u32 offset */
return offset / 4;
}
/* MSR not in any range */
return MSR_INVALID;
}
#define MAX_INST_SIZE 15
static inline void clgi(void)
{
asm volatile (__ex(SVM_CLGI));
}
static inline void stgi(void)
{
asm volatile (__ex(SVM_STGI));
}
static inline void invlpga(unsigned long addr, u32 asid)
{
asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
}
static int get_npt_level(void)
{
#ifdef CONFIG_X86_64
return PT64_ROOT_LEVEL;
#else
return PT32E_ROOT_LEVEL;
#endif
}
static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
vcpu->arch.efer = efer;
if (!npt_enabled && !(efer & EFER_LMA))
efer &= ~EFER_LME;
to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
}
static int is_external_interrupt(u32 info)
{
info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
}
static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
u32 ret = 0;
if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
return ret;
}
static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (mask == 0)
svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
else
svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
}
static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (svm->vmcb->control.next_rip != 0) {
WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
svm->next_rip = svm->vmcb->control.next_rip;
}
if (!svm->next_rip) {
if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
EMULATE_DONE)
printk(KERN_DEBUG "%s: NOP\n", __func__);
return;
}
if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
__func__, kvm_rip_read(vcpu), svm->next_rip);
kvm_rip_write(vcpu, svm->next_rip);
svm_set_interrupt_shadow(vcpu, 0);
}
static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
bool has_error_code, u32 error_code,
bool reinject)
{
struct vcpu_svm *svm = to_svm(vcpu);
/*
* If we are within a nested VM we'd better #VMEXIT and let the guest
* handle the exception
*/
if (!reinject &&
nested_svm_check_exception(svm, nr, has_error_code, error_code))
return;
if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
/*
* For guest debugging where we have to reinject #BP if some
* INT3 is guest-owned:
* Emulate nRIP by moving RIP forward. Will fail if injection
* raises a fault that is not intercepted. Still better than
* failing in all cases.
*/
skip_emulated_instruction(&svm->vcpu);
rip = kvm_rip_read(&svm->vcpu);
svm->int3_rip = rip + svm->vmcb->save.cs.base;
svm->int3_injected = rip - old_rip;
}
svm->vmcb->control.event_inj = nr
| SVM_EVTINJ_VALID
| (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
| SVM_EVTINJ_TYPE_EXEPT;
svm->vmcb->control.event_inj_err = error_code;
}
static void svm_init_erratum_383(void)
{
u32 low, high;
int err;
u64 val;
if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
return;
/* Use _safe variants to not break nested virtualization */
val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
if (err)
return;
val |= (1ULL << 47);
low = lower_32_bits(val);
high = upper_32_bits(val);
native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
erratum_383_found = true;
}
static void svm_init_osvw(struct kvm_vcpu *vcpu)
{
/*
* Guests should see errata 400 and 415 as fixed (assuming that
* HLT and IO instructions are intercepted).
*/
vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
vcpu->arch.osvw.status = osvw_status & ~(6ULL);
/*
* By increasing VCPU's osvw.length to 3 we are telling the guest that
* all osvw.status bits inside that length, including bit 0 (which is
* reserved for erratum 298), are valid. However, if host processor's
* osvw_len is 0 then osvw_status[0] carries no information. We need to
* be conservative here and therefore we tell the guest that erratum 298
* is present (because we really don't know).
*/
if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
vcpu->arch.osvw.status |= 1;
}
static int has_svm(void)
{
const char *msg;
if (!cpu_has_svm(&msg)) {
printk(KERN_INFO "has_svm: %s\n", msg);
return 0;
}
return 1;
}
static void svm_hardware_disable(void)
{
/* Make sure we clean up behind us */
if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
cpu_svm_disable();
amd_pmu_disable_virt();
}
static int svm_hardware_enable(void)
{
struct svm_cpu_data *sd;
uint64_t efer;
struct desc_ptr gdt_descr;
struct desc_struct *gdt;
int me = raw_smp_processor_id();
rdmsrl(MSR_EFER, efer);
if (efer & EFER_SVME)
return -EBUSY;
if (!has_svm()) {
pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
return -EINVAL;
}
sd = per_cpu(svm_data, me);
if (!sd) {
pr_err("%s: svm_data is NULL on %d\n", __func__, me);
return -EINVAL;
}
sd->asid_generation = 1;
sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
sd->next_asid = sd->max_asid + 1;
native_store_gdt(&gdt_descr);
gdt = (struct desc_struct *)gdt_descr.address;
sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
wrmsrl(MSR_EFER, efer | EFER_SVME);
wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
__this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
}
/*
* Get OSVW bits.
*
* Note that it is possible to have a system with mixed processor
* revisions and therefore different OSVW bits. If bits are not the same
* on different processors then choose the worst case (i.e. if erratum
* is present on one processor and not on another then assume that the
* erratum is present everywhere).
*/
if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
uint64_t len, status = 0;
int err;
len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
if (!err)
status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
&err);
if (err)
osvw_status = osvw_len = 0;
else {
if (len < osvw_len)
osvw_len = len;
osvw_status |= status;
osvw_status &= (1ULL << osvw_len) - 1;
}
} else
osvw_status = osvw_len = 0;
svm_init_erratum_383();
amd_pmu_enable_virt();
return 0;
}
static void svm_cpu_uninit(int cpu)
{
struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
if (!sd)
return;
per_cpu(svm_data, raw_smp_processor_id()) = NULL;
__free_page(sd->save_area);
kfree(sd);
}
static int svm_cpu_init(int cpu)
{
struct svm_cpu_data *sd;
int r;
sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
if (!sd)
return -ENOMEM;
sd->cpu = cpu;
sd->save_area = alloc_page(GFP_KERNEL);
r = -ENOMEM;
if (!sd->save_area)
goto err_1;
per_cpu(svm_data, cpu) = sd;
return 0;
err_1:
kfree(sd);
return r;
}
static bool valid_msr_intercept(u32 index)
{
int i;
for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
if (direct_access_msrs[i].index == index)
return true;
return false;
}
static void set_msr_interception(u32 *msrpm, unsigned msr,
int read, int write)
{
u8 bit_read, bit_write;
unsigned long tmp;
u32 offset;
/*
* If this warning triggers extend the direct_access_msrs list at the
* beginning of the file
*/
WARN_ON(!valid_msr_intercept(msr));
offset = svm_msrpm_offset(msr);
bit_read = 2 * (msr & 0x0f);
bit_write = 2 * (msr & 0x0f) + 1;
tmp = msrpm[offset];
BUG_ON(offset == MSR_INVALID);
read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
msrpm[offset] = tmp;
}
static void svm_vcpu_init_msrpm(u32 *msrpm)
{
int i;
memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
if (!direct_access_msrs[i].always)
continue;
set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
}
}
static void add_msr_offset(u32 offset)
{
int i;
for (i = 0; i < MSRPM_OFFSETS; ++i) {
/* Offset already in list? */
if (msrpm_offsets[i] == offset)
return;
/* Slot used by another offset? */
if (msrpm_offsets[i] != MSR_INVALID)
continue;
/* Add offset to list */
msrpm_offsets[i] = offset;
return;
}
/*
* If this BUG triggers the msrpm_offsets table has an overflow. Just
* increase MSRPM_OFFSETS in this case.
*/
BUG();
}
static void init_msrpm_offsets(void)
{
int i;
memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
u32 offset;
offset = svm_msrpm_offset(direct_access_msrs[i].index);
BUG_ON(offset == MSR_INVALID);
add_msr_offset(offset);
}
}
static void svm_enable_lbrv(struct vcpu_svm *svm)
{
u32 *msrpm = svm->msrpm;
svm->vmcb->control.lbr_ctl = 1;
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
}
static void svm_disable_lbrv(struct vcpu_svm *svm)
{
u32 *msrpm = svm->msrpm;
svm->vmcb->control.lbr_ctl = 0;
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
}
static __init int svm_hardware_setup(void)
{
int cpu;
struct page *iopm_pages;
void *iopm_va;
int r;
iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
if (!iopm_pages)
return -ENOMEM;
iopm_va = page_address(iopm_pages);
memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
init_msrpm_offsets();
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
kvm_enable_efer_bits(EFER_FFXSR);
if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
kvm_has_tsc_control = true;
kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
kvm_tsc_scaling_ratio_frac_bits = 32;
}
if (nested) {
printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
}
for_each_possible_cpu(cpu) {
r = svm_cpu_init(cpu);
if (r)
goto err;
}
if (!boot_cpu_has(X86_FEATURE_NPT))
npt_enabled = false;
if (npt_enabled && !npt) {
printk(KERN_INFO "kvm: Nested Paging disabled\n");
npt_enabled = false;
}
if (npt_enabled) {
printk(KERN_INFO "kvm: Nested Paging enabled\n");
kvm_enable_tdp();
} else
kvm_disable_tdp();
return 0;
err:
__free_pages(iopm_pages, IOPM_ALLOC_ORDER);
iopm_base = 0;
return r;
}
static __exit void svm_hardware_unsetup(void)
{
int cpu;
for_each_possible_cpu(cpu)
svm_cpu_uninit(cpu);
__free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
iopm_base = 0;
}
static void init_seg(struct vmcb_seg *seg)
{
seg->selector = 0;
seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
seg->limit = 0xffff;
seg->base = 0;
}
static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
{
seg->selector = 0;
seg->attrib = SVM_SELECTOR_P_MASK | type;
seg->limit = 0xffff;
seg->base = 0;
}
static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
return svm->vmcb->control.tsc_offset;
}
static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 g_tsc_offset = 0;
if (is_guest_mode(vcpu)) {
g_tsc_offset = svm->vmcb->control.tsc_offset -
svm->nested.hsave->control.tsc_offset;
svm->nested.hsave->control.tsc_offset = offset;
} else
trace_kvm_write_tsc_offset(vcpu->vcpu_id,
svm->vmcb->control.tsc_offset,
offset);
svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}
static void svm_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->control.tsc_offset += adjustment;
if (is_guest_mode(vcpu))
svm->nested.hsave->control.tsc_offset += adjustment;
else
trace_kvm_write_tsc_offset(vcpu->vcpu_id,
svm->vmcb->control.tsc_offset - adjustment,
svm->vmcb->control.tsc_offset);
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
}
static void init_vmcb(struct vcpu_svm *svm)
{
struct vmcb_control_area *control = &svm->vmcb->control;
struct vmcb_save_area *save = &svm->vmcb->save;
svm->vcpu.fpu_active = 1;
svm->vcpu.arch.hflags = 0;
set_cr_intercept(svm, INTERCEPT_CR0_READ);
set_cr_intercept(svm, INTERCEPT_CR3_READ);
set_cr_intercept(svm, INTERCEPT_CR4_READ);
set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
set_dr_intercepts(svm);
set_exception_intercept(svm, PF_VECTOR);
set_exception_intercept(svm, UD_VECTOR);
set_exception_intercept(svm, MC_VECTOR);
set_exception_intercept(svm, AC_VECTOR);
set_intercept(svm, INTERCEPT_INTR);
set_intercept(svm, INTERCEPT_NMI);
set_intercept(svm, INTERCEPT_SMI);
set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
set_intercept(svm, INTERCEPT_RDPMC);
set_intercept(svm, INTERCEPT_CPUID);
set_intercept(svm, INTERCEPT_INVD);
set_intercept(svm, INTERCEPT_HLT);
set_intercept(svm, INTERCEPT_INVLPG);
set_intercept(svm, INTERCEPT_INVLPGA);
set_intercept(svm, INTERCEPT_IOIO_PROT);
set_intercept(svm, INTERCEPT_MSR_PROT);
set_intercept(svm, INTERCEPT_TASK_SWITCH);
set_intercept(svm, INTERCEPT_SHUTDOWN);
set_intercept(svm, INTERCEPT_VMRUN);
set_intercept(svm, INTERCEPT_VMMCALL);
set_intercept(svm, INTERCEPT_VMLOAD);
set_intercept(svm, INTERCEPT_VMSAVE);
set_intercept(svm, INTERCEPT_STGI);
set_intercept(svm, INTERCEPT_CLGI);
set_intercept(svm, INTERCEPT_SKINIT);
set_intercept(svm, INTERCEPT_WBINVD);
set_intercept(svm, INTERCEPT_MONITOR);
set_intercept(svm, INTERCEPT_MWAIT);
set_intercept(svm, INTERCEPT_XSETBV);
control->iopm_base_pa = iopm_base;
control->msrpm_base_pa = __pa(svm->msrpm);
control->int_ctl = V_INTR_MASKING_MASK;
init_seg(&save->es);
init_seg(&save->ss);
init_seg(&save->ds);
init_seg(&save->fs);
init_seg(&save->gs);
save->cs.selector = 0xf000;
save->cs.base = 0xffff0000;
/* Executable/Readable Code Segment */
save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
save->cs.limit = 0xffff;
save->gdtr.limit = 0xffff;
save->idtr.limit = 0xffff;
init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
svm_set_efer(&svm->vcpu, 0);
save->dr6 = 0xffff0ff0;
kvm_set_rflags(&svm->vcpu, 2);
save->rip = 0x0000fff0;
svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
/*
* svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
* It also updates the guest-visible cr0 value.
*/
svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
kvm_mmu_reset_context(&svm->vcpu);
save->cr4 = X86_CR4_PAE;
/* rdx = ?? */
if (npt_enabled) {
/* Setup VMCB for Nested Paging */
control->nested_ctl = 1;
clr_intercept(svm, INTERCEPT_INVLPG);
clr_exception_intercept(svm, PF_VECTOR);
clr_cr_intercept(svm, INTERCEPT_CR3_READ);
clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
save->g_pat = svm->vcpu.arch.pat;
save->cr3 = 0;
save->cr4 = 0;
}
svm->asid_generation = 0;
svm->nested.vmcb = 0;
svm->vcpu.arch.hflags = 0;
if (boot_cpu_has(X86_FEATURE_PAUSEFILTER)) {
control->pause_filter_count = 3000;
set_intercept(svm, INTERCEPT_PAUSE);
}
mark_all_dirty(svm->vmcb);
enable_gif(svm);
}
static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
{
struct vcpu_svm *svm = to_svm(vcpu);
u32 dummy;
u32 eax = 1;
if (!init_event) {
svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
MSR_IA32_APICBASE_ENABLE;
if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
}
init_vmcb(svm);
kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy);
kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
}
static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
{
struct vcpu_svm *svm;
struct page *page;
struct page *msrpm_pages;
struct page *hsave_page;
struct page *nested_msrpm_pages;
int err;
svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!svm) {
err = -ENOMEM;
goto out;
}
err = kvm_vcpu_init(&svm->vcpu, kvm, id);
if (err)
goto free_svm;
err = -ENOMEM;
page = alloc_page(GFP_KERNEL);
if (!page)
goto uninit;
msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!msrpm_pages)
goto free_page1;
nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!nested_msrpm_pages)
goto free_page2;
hsave_page = alloc_page(GFP_KERNEL);
if (!hsave_page)
goto free_page3;
svm->nested.hsave = page_address(hsave_page);
svm->msrpm = page_address(msrpm_pages);
svm_vcpu_init_msrpm(svm->msrpm);
svm->nested.msrpm = page_address(nested_msrpm_pages);
svm_vcpu_init_msrpm(svm->nested.msrpm);
svm->vmcb = page_address(page);
clear_page(svm->vmcb);
svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
svm->asid_generation = 0;
init_vmcb(svm);
svm_init_osvw(&svm->vcpu);
return &svm->vcpu;
free_page3:
__free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
free_page2:
__free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
free_page1:
__free_page(page);
uninit:
kvm_vcpu_uninit(&svm->vcpu);
free_svm:
kmem_cache_free(kvm_vcpu_cache, svm);
out:
return ERR_PTR(err);
}
static void svm_free_vcpu(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
__free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
__free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
__free_page(virt_to_page(svm->nested.hsave));
__free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
kvm_vcpu_uninit(vcpu);
kmem_cache_free(kvm_vcpu_cache, svm);
}
static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
int i;
if (unlikely(cpu != vcpu->cpu)) {
svm->asid_generation = 0;
mark_all_dirty(svm->vmcb);
}
#ifdef CONFIG_X86_64
rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
#endif
savesegment(fs, svm->host.fs);
savesegment(gs, svm->host.gs);
svm->host.ldt = kvm_read_ldt();
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
__this_cpu_write(current_tsc_ratio, tsc_ratio);
wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
}
}
}
static void svm_vcpu_put(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
int i;
++vcpu->stat.host_state_reload;
kvm_load_ldt(svm->host.ldt);
#ifdef CONFIG_X86_64
loadsegment(fs, svm->host.fs);
wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs);
load_gs_index(svm->host.gs);
#else
#ifdef CONFIG_X86_32_LAZY_GS
loadsegment(gs, svm->host.gs);
#endif
#endif
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
}
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
{
return to_svm(vcpu)->vmcb->save.rflags;
}
static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
/*
* Any change of EFLAGS.VM is accompained by a reload of SS
* (caused by either a task switch or an inter-privilege IRET),
* so we do not need to update the CPL here.
*/
to_svm(vcpu)->vmcb->save.rflags = rflags;
}
static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
switch (reg) {
case VCPU_EXREG_PDPTR:
BUG_ON(!npt_enabled);
load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
break;
default:
BUG();
}
}
static void svm_set_vintr(struct vcpu_svm *svm)
{
set_intercept(svm, INTERCEPT_VINTR);
}
static void svm_clear_vintr(struct vcpu_svm *svm)
{
clr_intercept(svm, INTERCEPT_VINTR);
}
static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
{
struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
switch (seg) {
case VCPU_SREG_CS: return &save->cs;
case VCPU_SREG_DS: return &save->ds;
case VCPU_SREG_ES: return &save->es;
case VCPU_SREG_FS: return &save->fs;
case VCPU_SREG_GS: return &save->gs;
case VCPU_SREG_SS: return &save->ss;
case VCPU_SREG_TR: return &save->tr;
case VCPU_SREG_LDTR: return &save->ldtr;
}
BUG();
return NULL;
}
static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
{
struct vmcb_seg *s = svm_seg(vcpu, seg);
return s->base;
}
static void svm_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
struct vmcb_seg *s = svm_seg(vcpu, seg);
var->base = s->base;
var->limit = s->limit;
var->selector = s->selector;
var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
/*
* AMD CPUs circa 2014 track the G bit for all segments except CS.
* However, the SVM spec states that the G bit is not observed by the
* CPU, and some VMware virtual CPUs drop the G bit for all segments.
* So let's synthesize a legal G bit for all segments, this helps
* running KVM nested. It also helps cross-vendor migration, because
* Intel's vmentry has a check on the 'G' bit.
*/
var->g = s->limit > 0xfffff;
/*
* AMD's VMCB does not have an explicit unusable field, so emulate it
* for cross vendor migration purposes by "not present"
*/
var->unusable = !var->present || (var->type == 0);
switch (seg) {
case VCPU_SREG_TR:
/*
* Work around a bug where the busy flag in the tr selector
* isn't exposed
*/
var->type |= 0x2;
break;
case VCPU_SREG_DS:
case VCPU_SREG_ES:
case VCPU_SREG_FS:
case VCPU_SREG_GS:
/*
* The accessed bit must always be set in the segment
* descriptor cache, although it can be cleared in the
* descriptor, the cached bit always remains at 1. Since
* Intel has a check on this, set it here to support
* cross-vendor migration.
*/
if (!var->unusable)
var->type |= 0x1;
break;
case VCPU_SREG_SS:
/*
* On AMD CPUs sometimes the DB bit in the segment
* descriptor is left as 1, although the whole segment has
* been made unusable. Clear it here to pass an Intel VMX
* entry check when cross vendor migrating.
*/
if (var->unusable)
var->db = 0;
var->dpl = to_svm(vcpu)->vmcb->save.cpl;
break;
}
}
static int svm_get_cpl(struct kvm_vcpu *vcpu)
{
struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
return save->cpl;
}
static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
dt->size = svm->vmcb->save.idtr.limit;
dt->address = svm->vmcb->save.idtr.base;
}
static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.idtr.limit = dt->size;
svm->vmcb->save.idtr.base = dt->address ;
mark_dirty(svm->vmcb, VMCB_DT);
}
static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
dt->size = svm->vmcb->save.gdtr.limit;
dt->address = svm->vmcb->save.gdtr.base;
}
static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.gdtr.limit = dt->size;
svm->vmcb->save.gdtr.base = dt->address ;
mark_dirty(svm->vmcb, VMCB_DT);
}
static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
{
}
static void svm_decache_cr3(struct kvm_vcpu *vcpu)
{
}
static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
{
}
static void update_cr0_intercept(struct vcpu_svm *svm)
{
ulong gcr0 = svm->vcpu.arch.cr0;
u64 *hcr0 = &svm->vmcb->save.cr0;
if (!svm->vcpu.fpu_active)
*hcr0 |= SVM_CR0_SELECTIVE_MASK;
else
*hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
| (gcr0 & SVM_CR0_SELECTIVE_MASK);
mark_dirty(svm->vmcb, VMCB_CR);
if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
clr_cr_intercept(svm, INTERCEPT_CR0_READ);
clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
} else {
set_cr_intercept(svm, INTERCEPT_CR0_READ);
set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
}
}
static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
{
struct vcpu_svm *svm = to_svm(vcpu);
#ifdef CONFIG_X86_64
if (vcpu->arch.efer & EFER_LME) {
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
vcpu->arch.efer |= EFER_LMA;
svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
}
if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
vcpu->arch.efer &= ~EFER_LMA;
svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
}
}
#endif
vcpu->arch.cr0 = cr0;
if (!npt_enabled)
cr0 |= X86_CR0_PG | X86_CR0_WP;
if (!vcpu->fpu_active)
cr0 |= X86_CR0_TS;
/*
* re-enable caching here because the QEMU bios
* does not do it - this results in some delay at
* reboot
*/
if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
svm->vmcb->save.cr0 = cr0;
mark_dirty(svm->vmcb, VMCB_CR);
update_cr0_intercept(svm);
}
static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
if (cr4 & X86_CR4_VMXE)
return 1;
if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
svm_flush_tlb(vcpu);
vcpu->arch.cr4 = cr4;
if (!npt_enabled)
cr4 |= X86_CR4_PAE;
cr4 |= host_cr4_mce;
to_svm(vcpu)->vmcb->save.cr4 = cr4;
mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
return 0;
}
static void svm_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_seg *s = svm_seg(vcpu, seg);
s->base = var->base;
s->limit = var->limit;
s->selector = var->selector;
if (var->unusable)
s->attrib = 0;
else {
s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
}
/*
* This is always accurate, except if SYSRET returned to a segment
* with SS.DPL != 3. Intel does not have this quirk, and always
* forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
* would entail passing the CPL to userspace and back.
*/
if (seg == VCPU_SREG_SS)
svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
mark_dirty(svm->vmcb, VMCB_SEG);
}
static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
clr_exception_intercept(svm, DB_VECTOR);
clr_exception_intercept(svm, BP_VECTOR);
if (svm->nmi_singlestep)
set_exception_intercept(svm, DB_VECTOR);
if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
if (vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
set_exception_intercept(svm, DB_VECTOR);
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
set_exception_intercept(svm, BP_VECTOR);
} else
vcpu->guest_debug = 0;
}
static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
{
if (sd->next_asid > sd->max_asid) {
++sd->asid_generation;
sd->next_asid = 1;
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
}
svm->asid_generation = sd->asid_generation;
svm->vmcb->control.asid = sd->next_asid++;
mark_dirty(svm->vmcb, VMCB_ASID);
}
static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
{
return to_svm(vcpu)->vmcb->save.dr6;
}
static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.dr6 = value;
mark_dirty(svm->vmcb, VMCB_DR);
}
static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
get_debugreg(vcpu->arch.db[0], 0);
get_debugreg(vcpu->arch.db[1], 1);
get_debugreg(vcpu->arch.db[2], 2);
get_debugreg(vcpu->arch.db[3], 3);
vcpu->arch.dr6 = svm_get_dr6(vcpu);
vcpu->arch.dr7 = svm->vmcb->save.dr7;
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
set_dr_intercepts(svm);
}
static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.dr7 = value;
mark_dirty(svm->vmcb, VMCB_DR);
}
static int pf_interception(struct vcpu_svm *svm)
{
u64 fault_address = svm->vmcb->control.exit_info_2;
u32 error_code;
int r = 1;
switch (svm->apf_reason) {
default:
error_code = svm->vmcb->control.exit_info_1;
trace_kvm_page_fault(fault_address, error_code);
if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
svm->vmcb->control.insn_bytes,
svm->vmcb->control.insn_len);
break;
case KVM_PV_REASON_PAGE_NOT_PRESENT:
svm->apf_reason = 0;
local_irq_disable();
kvm_async_pf_task_wait(fault_address);
local_irq_enable();
break;
case KVM_PV_REASON_PAGE_READY:
svm->apf_reason = 0;
local_irq_disable();
kvm_async_pf_task_wake(fault_address);
local_irq_enable();
break;
}
return r;
}
static int db_interception(struct vcpu_svm *svm)
{
struct kvm_run *kvm_run = svm->vcpu.run;
if (!(svm->vcpu.guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
!svm->nmi_singlestep) {
kvm_queue_exception(&svm->vcpu, DB_VECTOR);
return 1;
}
if (svm->nmi_singlestep) {
svm->nmi_singlestep = false;
if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
svm->vmcb->save.rflags &=
~(X86_EFLAGS_TF | X86_EFLAGS_RF);
update_db_bp_intercept(&svm->vcpu);
}
if (svm->vcpu.guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.pc =
svm->vmcb->save.cs.base + svm->vmcb->save.rip;
kvm_run->debug.arch.exception = DB_VECTOR;
return 0;
}
return 1;
}
static int bp_interception(struct vcpu_svm *svm)
{
struct kvm_run *kvm_run = svm->vcpu.run;
kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
kvm_run->debug.arch.exception = BP_VECTOR;
return 0;
}
static int ud_interception(struct vcpu_svm *svm)
{
int er;
er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
if (er != EMULATE_DONE)
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
static int ac_interception(struct vcpu_svm *svm)
{
kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
return 1;
}
static void svm_fpu_activate(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
clr_exception_intercept(svm, NM_VECTOR);
svm->vcpu.fpu_active = 1;
update_cr0_intercept(svm);
}
static int nm_interception(struct vcpu_svm *svm)
{
svm_fpu_activate(&svm->vcpu);
return 1;
}
static bool is_erratum_383(void)
{
int err, i;
u64 value;
if (!erratum_383_found)
return false;
value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
if (err)
return false;
/* Bit 62 may or may not be set for this mce */
value &= ~(1ULL << 62);
if (value != 0xb600000000010015ULL)
return false;
/* Clear MCi_STATUS registers */
for (i = 0; i < 6; ++i)
native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
if (!err) {
u32 low, high;
value &= ~(1ULL << 2);
low = lower_32_bits(value);
high = upper_32_bits(value);
native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
}
/* Flush tlb to evict multi-match entries */
__flush_tlb_all();
return true;
}
static void svm_handle_mce(struct vcpu_svm *svm)
{
if (is_erratum_383()) {
/*
* Erratum 383 triggered. Guest state is corrupt so kill the
* guest.
*/
pr_err("KVM: Guest triggered AMD Erratum 383\n");
kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
return;
}
/*
* On an #MC intercept the MCE handler is not called automatically in
* the host. So do it by hand here.
*/
asm volatile (
"int $0x12\n");
/* not sure if we ever come back to this point */
return;
}
static int mc_interception(struct vcpu_svm *svm)
{
return 1;
}
static int shutdown_interception(struct vcpu_svm *svm)
{
struct kvm_run *kvm_run = svm->vcpu.run;
/*
* VMCB is undefined after a SHUTDOWN intercept
* so reinitialize it.
*/
clear_page(svm->vmcb);
init_vmcb(svm);
kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
return 0;
}
static int io_interception(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
int size, in, string;
unsigned port;
++svm->vcpu.stat.io_exits;
string = (io_info & SVM_IOIO_STR_MASK) != 0;
in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
if (string || in)
return emulate_instruction(vcpu, 0) == EMULATE_DONE;
port = io_info >> 16;
size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
svm->next_rip = svm->vmcb->control.exit_info_2;
skip_emulated_instruction(&svm->vcpu);
return kvm_fast_pio_out(vcpu, size, port);
}
static int nmi_interception(struct vcpu_svm *svm)
{
return 1;
}
static int intr_interception(struct vcpu_svm *svm)
{
++svm->vcpu.stat.irq_exits;
return 1;
}
static int nop_on_interception(struct vcpu_svm *svm)
{
return 1;
}
static int halt_interception(struct vcpu_svm *svm)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
return kvm_emulate_halt(&svm->vcpu);
}
static int vmmcall_interception(struct vcpu_svm *svm)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
kvm_emulate_hypercall(&svm->vcpu);
return 1;
}
static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
return svm->nested.nested_cr3;
}
static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 cr3 = svm->nested.nested_cr3;
u64 pdpte;
int ret;
ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(cr3), &pdpte,
offset_in_page(cr3) + index * 8, 8);
if (ret)
return 0;
return pdpte;
}
static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
unsigned long root)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->control.nested_cr3 = root;
mark_dirty(svm->vmcb, VMCB_NPT);
svm_flush_tlb(vcpu);
}
static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
struct x86_exception *fault)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
/*
* TODO: track the cause of the nested page fault, and
* correctly fill in the high bits of exit_info_1.
*/
svm->vmcb->control.exit_code = SVM_EXIT_NPF;
svm->vmcb->control.exit_code_hi = 0;
svm->vmcb->control.exit_info_1 = (1ULL << 32);
svm->vmcb->control.exit_info_2 = fault->address;
}
svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
svm->vmcb->control.exit_info_1 |= fault->error_code;
/*
* The present bit is always zero for page structure faults on real
* hardware.
*/
if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
svm->vmcb->control.exit_info_1 &= ~1;
nested_svm_vmexit(svm);
}
static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
{
WARN_ON(mmu_is_nested(vcpu));
kvm_init_shadow_mmu(vcpu);
vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
vcpu->arch.mmu.shadow_root_level = get_npt_level();
reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
}
static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
{
vcpu->arch.walk_mmu = &vcpu->arch.mmu;
}
static int nested_svm_check_permissions(struct vcpu_svm *svm)
{
if (!(svm->vcpu.arch.efer & EFER_SVME)
|| !is_paging(&svm->vcpu)) {
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
if (svm->vmcb->save.cpl) {
kvm_inject_gp(&svm->vcpu, 0);
return 1;
}
return 0;
}
static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code)
{
int vmexit;
if (!is_guest_mode(&svm->vcpu))
return 0;
svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
svm->vmcb->control.exit_code_hi = 0;
svm->vmcb->control.exit_info_1 = error_code;
svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
vmexit = nested_svm_intercept(svm);
if (vmexit == NESTED_EXIT_DONE)
svm->nested.exit_required = true;
return vmexit;
}
/* This function returns true if it is save to enable the irq window */
static inline bool nested_svm_intr(struct vcpu_svm *svm)
{
if (!is_guest_mode(&svm->vcpu))
return true;
if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
return true;
if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
return false;
/*
* if vmexit was already requested (by intercepted exception
* for instance) do not overwrite it with "external interrupt"
* vmexit.
*/
if (svm->nested.exit_required)
return false;
svm->vmcb->control.exit_code = SVM_EXIT_INTR;
svm->vmcb->control.exit_info_1 = 0;
svm->vmcb->control.exit_info_2 = 0;
if (svm->nested.intercept & 1ULL) {
/*
* The #vmexit can't be emulated here directly because this
* code path runs with irqs and preemption disabled. A
* #vmexit emulation might sleep. Only signal request for
* the #vmexit here.
*/
svm->nested.exit_required = true;
trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
return false;
}
return true;
}
/* This function returns true if it is save to enable the nmi window */
static inline bool nested_svm_nmi(struct vcpu_svm *svm)
{
if (!is_guest_mode(&svm->vcpu))
return true;
if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
return true;
svm->vmcb->control.exit_code = SVM_EXIT_NMI;
svm->nested.exit_required = true;
return false;
}
static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
{
struct page *page;
might_sleep();
page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
if (is_error_page(page))
goto error;
*_page = page;
return kmap(page);
error:
kvm_inject_gp(&svm->vcpu, 0);
return NULL;
}
static void nested_svm_unmap(struct page *page)
{
kunmap(page);
kvm_release_page_dirty(page);
}
static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
{
unsigned port, size, iopm_len;
u16 val, mask;
u8 start_bit;
u64 gpa;
if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
return NESTED_EXIT_HOST;
port = svm->vmcb->control.exit_info_1 >> 16;
size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
SVM_IOIO_SIZE_SHIFT;
gpa = svm->nested.vmcb_iopm + (port / 8);
start_bit = port % 8;
iopm_len = (start_bit + size > 8) ? 2 : 1;
mask = (0xf >> (4 - size)) << start_bit;
val = 0;
if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
return NESTED_EXIT_DONE;
return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
}
static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
{
u32 offset, msr, value;
int write, mask;
if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
return NESTED_EXIT_HOST;
msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
offset = svm_msrpm_offset(msr);
write = svm->vmcb->control.exit_info_1 & 1;
mask = 1 << ((2 * (msr & 0xf)) + write);
if (offset == MSR_INVALID)
return NESTED_EXIT_DONE;
/* Offset is in 32 bit units but need in 8 bit units */
offset *= 4;
if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
return NESTED_EXIT_DONE;
return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
}
static int nested_svm_exit_special(struct vcpu_svm *svm)
{
u32 exit_code = svm->vmcb->control.exit_code;
switch (exit_code) {
case SVM_EXIT_INTR:
case SVM_EXIT_NMI:
case SVM_EXIT_EXCP_BASE + MC_VECTOR:
return NESTED_EXIT_HOST;
case SVM_EXIT_NPF:
/* For now we are always handling NPFs when using them */
if (npt_enabled)
return NESTED_EXIT_HOST;
break;
case SVM_EXIT_EXCP_BASE + PF_VECTOR:
/* When we're shadowing, trap PFs, but not async PF */
if (!npt_enabled && svm->apf_reason == 0)
return NESTED_EXIT_HOST;
break;
case SVM_EXIT_EXCP_BASE + NM_VECTOR:
nm_interception(svm);
break;
default:
break;
}
return NESTED_EXIT_CONTINUE;
}
/*
* If this function returns true, this #vmexit was already handled
*/
static int nested_svm_intercept(struct vcpu_svm *svm)
{
u32 exit_code = svm->vmcb->control.exit_code;
int vmexit = NESTED_EXIT_HOST;
switch (exit_code) {
case SVM_EXIT_MSR:
vmexit = nested_svm_exit_handled_msr(svm);
break;
case SVM_EXIT_IOIO:
vmexit = nested_svm_intercept_ioio(svm);
break;
case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
if (svm->nested.intercept_cr & bit)
vmexit = NESTED_EXIT_DONE;
break;
}
case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
if (svm->nested.intercept_dr & bit)
vmexit = NESTED_EXIT_DONE;
break;
}
case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
if (svm->nested.intercept_exceptions & excp_bits)
vmexit = NESTED_EXIT_DONE;
/* async page fault always cause vmexit */
else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
svm->apf_reason != 0)
vmexit = NESTED_EXIT_DONE;
break;
}
case SVM_EXIT_ERR: {
vmexit = NESTED_EXIT_DONE;
break;
}
default: {
u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
if (svm->nested.intercept & exit_bits)
vmexit = NESTED_EXIT_DONE;
}
}
return vmexit;
}
static int nested_svm_exit_handled(struct vcpu_svm *svm)
{
int vmexit;
vmexit = nested_svm_intercept(svm);
if (vmexit == NESTED_EXIT_DONE)
nested_svm_vmexit(svm);
return vmexit;
}
static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
{
struct vmcb_control_area *dst = &dst_vmcb->control;
struct vmcb_control_area *from = &from_vmcb->control;
dst->intercept_cr = from->intercept_cr;
dst->intercept_dr = from->intercept_dr;
dst->intercept_exceptions = from->intercept_exceptions;
dst->intercept = from->intercept;
dst->iopm_base_pa = from->iopm_base_pa;
dst->msrpm_base_pa = from->msrpm_base_pa;
dst->tsc_offset = from->tsc_offset;
dst->asid = from->asid;
dst->tlb_ctl = from->tlb_ctl;
dst->int_ctl = from->int_ctl;
dst->int_vector = from->int_vector;
dst->int_state = from->int_state;
dst->exit_code = from->exit_code;
dst->exit_code_hi = from->exit_code_hi;
dst->exit_info_1 = from->exit_info_1;
dst->exit_info_2 = from->exit_info_2;
dst->exit_int_info = from->exit_int_info;
dst->exit_int_info_err = from->exit_int_info_err;
dst->nested_ctl = from->nested_ctl;
dst->event_inj = from->event_inj;
dst->event_inj_err = from->event_inj_err;
dst->nested_cr3 = from->nested_cr3;
dst->lbr_ctl = from->lbr_ctl;
}
static int nested_svm_vmexit(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
struct page *page;
trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
vmcb->control.exit_info_1,
vmcb->control.exit_info_2,
vmcb->control.exit_int_info,
vmcb->control.exit_int_info_err,
KVM_ISA_SVM);
nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
if (!nested_vmcb)
return 1;
/* Exit Guest-Mode */
leave_guest_mode(&svm->vcpu);
svm->nested.vmcb = 0;
/* Give the current vmcb to the guest */
disable_gif(svm);
nested_vmcb->save.es = vmcb->save.es;
nested_vmcb->save.cs = vmcb->save.cs;
nested_vmcb->save.ss = vmcb->save.ss;
nested_vmcb->save.ds = vmcb->save.ds;
nested_vmcb->save.gdtr = vmcb->save.gdtr;
nested_vmcb->save.idtr = vmcb->save.idtr;
nested_vmcb->save.efer = svm->vcpu.arch.efer;
nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
nested_vmcb->save.cr2 = vmcb->save.cr2;
nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
nested_vmcb->save.rip = vmcb->save.rip;
nested_vmcb->save.rsp = vmcb->save.rsp;
nested_vmcb->save.rax = vmcb->save.rax;
nested_vmcb->save.dr7 = vmcb->save.dr7;
nested_vmcb->save.dr6 = vmcb->save.dr6;
nested_vmcb->save.cpl = vmcb->save.cpl;
nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
nested_vmcb->control.int_vector = vmcb->control.int_vector;
nested_vmcb->control.int_state = vmcb->control.int_state;
nested_vmcb->control.exit_code = vmcb->control.exit_code;
nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
if (svm->nrips_enabled)
nested_vmcb->control.next_rip = vmcb->control.next_rip;
/*
* If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
* to make sure that we do not lose injected events. So check event_inj
* here and copy it to exit_int_info if it is valid.
* Exit_int_info and event_inj can't be both valid because the case
* below only happens on a VMRUN instruction intercept which has
* no valid exit_int_info set.
*/
if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
struct vmcb_control_area *nc = &nested_vmcb->control;
nc->exit_int_info = vmcb->control.event_inj;
nc->exit_int_info_err = vmcb->control.event_inj_err;
}
nested_vmcb->control.tlb_ctl = 0;
nested_vmcb->control.event_inj = 0;
nested_vmcb->control.event_inj_err = 0;
/* We always set V_INTR_MASKING and remember the old value in hflags */
if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
/* Restore the original control entries */
copy_vmcb_control_area(vmcb, hsave);
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);
svm->nested.nested_cr3 = 0;
/* Restore selected save entries */
svm->vmcb->save.es = hsave->save.es;
svm->vmcb->save.cs = hsave->save.cs;
svm->vmcb->save.ss = hsave->save.ss;
svm->vmcb->save.ds = hsave->save.ds;
svm->vmcb->save.gdtr = hsave->save.gdtr;
svm->vmcb->save.idtr = hsave->save.idtr;
kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
svm_set_efer(&svm->vcpu, hsave->save.efer);
svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
svm_set_cr4(&svm->vcpu, hsave->save.cr4);
if (npt_enabled) {
svm->vmcb->save.cr3 = hsave->save.cr3;
svm->vcpu.arch.cr3 = hsave->save.cr3;
} else {
(void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
}
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
svm->vmcb->save.dr7 = 0;
svm->vmcb->save.cpl = 0;
svm->vmcb->control.exit_int_info = 0;
mark_all_dirty(svm->vmcb);
nested_svm_unmap(page);
nested_svm_uninit_mmu_context(&svm->vcpu);
kvm_mmu_reset_context(&svm->vcpu);
kvm_mmu_load(&svm->vcpu);
return 0;
}
static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
{
/*
* This function merges the msr permission bitmaps of kvm and the
* nested vmcb. It is optimized in that it only merges the parts where
* the kvm msr permission bitmap may contain zero bits
*/
int i;
if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
return true;
for (i = 0; i < MSRPM_OFFSETS; i++) {
u32 value, p;
u64 offset;
if (msrpm_offsets[i] == 0xffffffff)
break;
p = msrpm_offsets[i];
offset = svm->nested.vmcb_msrpm + (p * 4);
if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
return false;
svm->nested.msrpm[p] = svm->msrpm[p] | value;
}
svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
return true;
}
static bool nested_vmcb_checks(struct vmcb *vmcb)
{
if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
return false;
if (vmcb->control.asid == 0)
return false;
if (vmcb->control.nested_ctl && !npt_enabled)
return false;
return true;
}
static bool nested_svm_vmrun(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
struct page *page;
u64 vmcb_gpa;
vmcb_gpa = svm->vmcb->save.rax;
nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return false;
if (!nested_vmcb_checks(nested_vmcb)) {
nested_vmcb->control.exit_code = SVM_EXIT_ERR;
nested_vmcb->control.exit_code_hi = 0;
nested_vmcb->control.exit_info_1 = 0;
nested_vmcb->control.exit_info_2 = 0;
nested_svm_unmap(page);
return false;
}
trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
nested_vmcb->save.rip,
nested_vmcb->control.int_ctl,
nested_vmcb->control.event_inj,
nested_vmcb->control.nested_ctl);
trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
nested_vmcb->control.intercept_cr >> 16,
nested_vmcb->control.intercept_exceptions,
nested_vmcb->control.intercept);
/* Clear internal status */
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);
/*
* Save the old vmcb, so we don't need to pick what we save, but can
* restore everything when a VMEXIT occurs
*/
hsave->save.es = vmcb->save.es;
hsave->save.cs = vmcb->save.cs;
hsave->save.ss = vmcb->save.ss;
hsave->save.ds = vmcb->save.ds;
hsave->save.gdtr = vmcb->save.gdtr;
hsave->save.idtr = vmcb->save.idtr;
hsave->save.efer = svm->vcpu.arch.efer;
hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
hsave->save.cr4 = svm->vcpu.arch.cr4;
hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
hsave->save.rip = kvm_rip_read(&svm->vcpu);
hsave->save.rsp = vmcb->save.rsp;
hsave->save.rax = vmcb->save.rax;
if (npt_enabled)
hsave->save.cr3 = vmcb->save.cr3;
else
hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
copy_vmcb_control_area(hsave, vmcb);
if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
svm->vcpu.arch.hflags |= HF_HIF_MASK;
else
svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
if (nested_vmcb->control.nested_ctl) {
kvm_mmu_unload(&svm->vcpu);
svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
nested_svm_init_mmu_context(&svm->vcpu);
}
/* Load the nested guest state */
svm->vmcb->save.es = nested_vmcb->save.es;
svm->vmcb->save.cs = nested_vmcb->save.cs;
svm->vmcb->save.ss = nested_vmcb->save.ss;
svm->vmcb->save.ds = nested_vmcb->save.ds;
svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
svm->vmcb->save.idtr = nested_vmcb->save.idtr;
kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
if (npt_enabled) {
svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
} else
(void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
/* Guest paging mode is active - reset mmu */
kvm_mmu_reset_context(&svm->vcpu);
svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
/* In case we don't even reach vcpu_run, the fields are not updated */
svm->vmcb->save.rax = nested_vmcb->save.rax;
svm->vmcb->save.rsp = nested_vmcb->save.rsp;
svm->vmcb->save.rip = nested_vmcb->save.rip;
svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
svm->vmcb->save.cpl = nested_vmcb->save.cpl;
svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
/* cache intercepts */
svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
svm->nested.intercept = nested_vmcb->control.intercept;
svm_flush_tlb(&svm->vcpu);
svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
svm->vcpu.arch.hflags |= HF_VINTR_MASK;
else
svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
/* We only want the cr8 intercept bits of the guest */
clr_cr_intercept(svm, INTERCEPT_CR8_READ);
clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
}
/* We don't want to see VMMCALLs from a nested guest */
clr_intercept(svm, INTERCEPT_VMMCALL);
svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl;
svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
svm->vmcb->control.int_state = nested_vmcb->control.int_state;
svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
nested_svm_unmap(page);
/* Enter Guest-Mode */
enter_guest_mode(&svm->vcpu);
/*
* Merge guest and host intercepts - must be called with vcpu in
* guest-mode to take affect here
*/
recalc_intercepts(svm);
svm->nested.vmcb = vmcb_gpa;
enable_gif(svm);
mark_all_dirty(svm->vmcb);
return true;
}
static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
{
to_vmcb->save.fs = from_vmcb->save.fs;
to_vmcb->save.gs = from_vmcb->save.gs;
to_vmcb->save.tr = from_vmcb->save.tr;
to_vmcb->save.ldtr = from_vmcb->save.ldtr;
to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
to_vmcb->save.star = from_vmcb->save.star;
to_vmcb->save.lstar = from_vmcb->save.lstar;
to_vmcb->save.cstar = from_vmcb->save.cstar;
to_vmcb->save.sfmask = from_vmcb->save.sfmask;
to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
}
static int vmload_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
struct page *page;
if (nested_svm_check_permissions(svm))
return 1;
nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return 1;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
nested_svm_unmap(page);
return 1;
}
static int vmsave_interception(struct vcpu_svm *svm)
{
struct vmcb *nested_vmcb;
struct page *page;
if (nested_svm_check_permissions(svm))
return 1;
nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
if (!nested_vmcb)
return 1;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
nested_svm_unmap(page);
return 1;
}
static int vmrun_interception(struct vcpu_svm *svm)
{
if (nested_svm_check_permissions(svm))
return 1;
/* Save rip after vmrun instruction */
kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
if (!nested_svm_vmrun(svm))
return 1;
if (!nested_svm_vmrun_msrpm(svm))
goto failed;
return 1;
failed:
svm->vmcb->control.exit_code = SVM_EXIT_ERR;
svm->vmcb->control.exit_code_hi = 0;
svm->vmcb->control.exit_info_1 = 0;
svm->vmcb->control.exit_info_2 = 0;
nested_svm_vmexit(svm);
return 1;
}
static int stgi_interception(struct vcpu_svm *svm)
{
if (nested_svm_check_permissions(svm))
return 1;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
enable_gif(svm);
return 1;
}
static int clgi_interception(struct vcpu_svm *svm)
{
if (nested_svm_check_permissions(svm))
return 1;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
disable_gif(svm);
/* After a CLGI no interrupts should come */
svm_clear_vintr(svm);
svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
mark_dirty(svm->vmcb, VMCB_INTR);
return 1;
}
static int invlpga_interception(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu = &svm->vcpu;
trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
/* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
return 1;
}
static int skinit_interception(struct vcpu_svm *svm)
{
trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
static int wbinvd_interception(struct vcpu_svm *svm)
{
kvm_emulate_wbinvd(&svm->vcpu);
return 1;
}
static int xsetbv_interception(struct vcpu_svm *svm)
{
u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
skip_emulated_instruction(&svm->vcpu);
}
return 1;
}
static int task_switch_interception(struct vcpu_svm *svm)
{
u16 tss_selector;
int reason;
int int_type = svm->vmcb->control.exit_int_info &
SVM_EXITINTINFO_TYPE_MASK;
int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
uint32_t type =
svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
uint32_t idt_v =
svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
bool has_error_code = false;
u32 error_code = 0;
tss_selector = (u16)svm->vmcb->control.exit_info_1;
if (svm->vmcb->control.exit_info_2 &
(1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
reason = TASK_SWITCH_IRET;
else if (svm->vmcb->control.exit_info_2 &
(1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
reason = TASK_SWITCH_JMP;
else if (idt_v)
reason = TASK_SWITCH_GATE;
else
reason = TASK_SWITCH_CALL;
if (reason == TASK_SWITCH_GATE) {
switch (type) {
case SVM_EXITINTINFO_TYPE_NMI:
svm->vcpu.arch.nmi_injected = false;
break;
case SVM_EXITINTINFO_TYPE_EXEPT:
if (svm->vmcb->control.exit_info_2 &
(1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
has_error_code = true;
error_code =
(u32)svm->vmcb->control.exit_info_2;
}
kvm_clear_exception_queue(&svm->vcpu);
break;
case SVM_EXITINTINFO_TYPE_INTR:
kvm_clear_interrupt_queue(&svm->vcpu);
break;
default:
break;
}
}
if (reason != TASK_SWITCH_GATE ||
int_type == SVM_EXITINTINFO_TYPE_SOFT ||
(int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
(int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
skip_emulated_instruction(&svm->vcpu);
if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
int_vec = -1;
if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
has_error_code, error_code) == EMULATE_FAIL) {
svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
svm->vcpu.run->internal.ndata = 0;
return 0;
}
return 1;
}
static int cpuid_interception(struct vcpu_svm *svm)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
kvm_emulate_cpuid(&svm->vcpu);
return 1;
}
static int iret_interception(struct vcpu_svm *svm)
{
++svm->vcpu.stat.nmi_window_exits;
clr_intercept(svm, INTERCEPT_IRET);
svm->vcpu.arch.hflags |= HF_IRET_MASK;
svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
return 1;
}
static int invlpg_interception(struct vcpu_svm *svm)
{
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
skip_emulated_instruction(&svm->vcpu);
return 1;
}
static int emulate_on_interception(struct vcpu_svm *svm)
{
return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
}
static int rdpmc_interception(struct vcpu_svm *svm)
{
int err;
if (!static_cpu_has(X86_FEATURE_NRIPS))
return emulate_on_interception(svm);
err = kvm_rdpmc(&svm->vcpu);
kvm_complete_insn_gp(&svm->vcpu, err);
return 1;
}
static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
unsigned long val)
{
unsigned long cr0 = svm->vcpu.arch.cr0;
bool ret = false;
u64 intercept;
intercept = svm->nested.intercept;
if (!is_guest_mode(&svm->vcpu) ||
(!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
return false;
cr0 &= ~SVM_CR0_SELECTIVE_MASK;
val &= ~SVM_CR0_SELECTIVE_MASK;
if (cr0 ^ val) {
svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
}
return ret;
}
#define CR_VALID (1ULL << 63)
static int cr_interception(struct vcpu_svm *svm)
{
int reg, cr;
unsigned long val;
int err;
if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
return emulate_on_interception(svm);
if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
return emulate_on_interception(svm);
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
else
cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
err = 0;
if (cr >= 16) { /* mov to cr */
cr -= 16;
val = kvm_register_read(&svm->vcpu, reg);
switch (cr) {
case 0:
if (!check_selective_cr0_intercepted(svm, val))
err = kvm_set_cr0(&svm->vcpu, val);
else
return 1;
break;
case 3:
err = kvm_set_cr3(&svm->vcpu, val);
break;
case 4:
err = kvm_set_cr4(&svm->vcpu, val);
break;
case 8:
err = kvm_set_cr8(&svm->vcpu, val);
break;
default:
WARN(1, "unhandled write to CR%d", cr);
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
} else { /* mov from cr */
switch (cr) {
case 0:
val = kvm_read_cr0(&svm->vcpu);
break;
case 2:
val = svm->vcpu.arch.cr2;
break;
case 3:
val = kvm_read_cr3(&svm->vcpu);
break;
case 4:
val = kvm_read_cr4(&svm->vcpu);
break;
case 8:
val = kvm_get_cr8(&svm->vcpu);
break;
default:
WARN(1, "unhandled read from CR%d", cr);
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
}
kvm_register_write(&svm->vcpu, reg, val);
}
kvm_complete_insn_gp(&svm->vcpu, err);
return 1;
}
static int dr_interception(struct vcpu_svm *svm)
{
int reg, dr;
unsigned long val;
if (svm->vcpu.guest_debug == 0) {
/*
* No more DR vmexits; force a reload of the debug registers
* and reenter on this instruction. The next vmexit will
* retrieve the full state of the debug registers.
*/
clr_dr_intercepts(svm);
svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
return 1;
}
if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
return emulate_on_interception(svm);
reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
if (dr >= 16) { /* mov to DRn */
if (!kvm_require_dr(&svm->vcpu, dr - 16))
return 1;
val = kvm_register_read(&svm->vcpu, reg);
kvm_set_dr(&svm->vcpu, dr - 16, val);
} else {
if (!kvm_require_dr(&svm->vcpu, dr))
return 1;
kvm_get_dr(&svm->vcpu, dr, &val);
kvm_register_write(&svm->vcpu, reg, val);
}
skip_emulated_instruction(&svm->vcpu);
return 1;
}
static int cr8_write_interception(struct vcpu_svm *svm)
{
struct kvm_run *kvm_run = svm->vcpu.run;
int r;
u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
/* instruction emulation calls kvm_set_cr8() */
r = cr_interception(svm);
if (lapic_in_kernel(&svm->vcpu))
return r;
if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
return r;
kvm_run->exit_reason = KVM_EXIT_SET_TPR;
return 0;
}
static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
{
struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
return vmcb->control.tsc_offset + host_tsc;
}
static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
struct vcpu_svm *svm = to_svm(vcpu);
switch (msr_info->index) {
case MSR_IA32_TSC: {
msr_info->data = svm->vmcb->control.tsc_offset +
kvm_scale_tsc(vcpu, rdtsc());
break;
}
case MSR_STAR:
msr_info->data = svm->vmcb->save.star;
break;
#ifdef CONFIG_X86_64
case MSR_LSTAR:
msr_info->data = svm->vmcb->save.lstar;
break;
case MSR_CSTAR:
msr_info->data = svm->vmcb->save.cstar;
break;
case MSR_KERNEL_GS_BASE:
msr_info->data = svm->vmcb->save.kernel_gs_base;
break;
case MSR_SYSCALL_MASK:
msr_info->data = svm->vmcb->save.sfmask;
break;
#endif
case MSR_IA32_SYSENTER_CS:
msr_info->data = svm->vmcb->save.sysenter_cs;
break;
case MSR_IA32_SYSENTER_EIP:
msr_info->data = svm->sysenter_eip;
break;
case MSR_IA32_SYSENTER_ESP:
msr_info->data = svm->sysenter_esp;
break;
/*
* Nobody will change the following 5 values in the VMCB so we can
* safely return them on rdmsr. They will always be 0 until LBRV is
* implemented.
*/
case MSR_IA32_DEBUGCTLMSR:
msr_info->data = svm->vmcb->save.dbgctl;
break;
case MSR_IA32_LASTBRANCHFROMIP:
msr_info->data = svm->vmcb->save.br_from;
break;
case MSR_IA32_LASTBRANCHTOIP:
msr_info->data = svm->vmcb->save.br_to;
break;
case MSR_IA32_LASTINTFROMIP:
msr_info->data = svm->vmcb->save.last_excp_from;
break;
case MSR_IA32_LASTINTTOIP:
msr_info->data = svm->vmcb->save.last_excp_to;
break;
case MSR_VM_HSAVE_PA:
msr_info->data = svm->nested.hsave_msr;
break;
case MSR_VM_CR:
msr_info->data = svm->nested.vm_cr_msr;
break;
case MSR_IA32_UCODE_REV:
msr_info->data = 0x01000065;
break;
default:
return kvm_get_msr_common(vcpu, msr_info);
}
return 0;
}
static int rdmsr_interception(struct vcpu_svm *svm)
{
u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
struct msr_data msr_info;
msr_info.index = ecx;
msr_info.host_initiated = false;
if (svm_get_msr(&svm->vcpu, &msr_info)) {
trace_kvm_msr_read_ex(ecx);
kvm_inject_gp(&svm->vcpu, 0);
} else {
trace_kvm_msr_read(ecx, msr_info.data);
kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
msr_info.data & 0xffffffff);
kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
msr_info.data >> 32);
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
skip_emulated_instruction(&svm->vcpu);
}
return 1;
}
static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
{
struct vcpu_svm *svm = to_svm(vcpu);
int svm_dis, chg_mask;
if (data & ~SVM_VM_CR_VALID_MASK)
return 1;
chg_mask = SVM_VM_CR_VALID_MASK;
if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
svm->nested.vm_cr_msr &= ~chg_mask;
svm->nested.vm_cr_msr |= (data & chg_mask);
svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
/* check for svm_disable while efer.svme is set */
if (svm_dis && (vcpu->arch.efer & EFER_SVME))
return 1;
return 0;
}
static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
struct vcpu_svm *svm = to_svm(vcpu);
u32 ecx = msr->index;
u64 data = msr->data;
switch (ecx) {
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr);
break;
case MSR_STAR:
svm->vmcb->save.star = data;
break;
#ifdef CONFIG_X86_64
case MSR_LSTAR:
svm->vmcb->save.lstar = data;
break;
case MSR_CSTAR:
svm->vmcb->save.cstar = data;
break;
case MSR_KERNEL_GS_BASE:
svm->vmcb->save.kernel_gs_base = data;
break;
case MSR_SYSCALL_MASK:
svm->vmcb->save.sfmask = data;
break;
#endif
case MSR_IA32_SYSENTER_CS:
svm->vmcb->save.sysenter_cs = data;
break;
case MSR_IA32_SYSENTER_EIP:
svm->sysenter_eip = data;
svm->vmcb->save.sysenter_eip = data;
break;
case MSR_IA32_SYSENTER_ESP:
svm->sysenter_esp = data;
svm->vmcb->save.sysenter_esp = data;
break;
case MSR_IA32_DEBUGCTLMSR:
if (!boot_cpu_has(X86_FEATURE_LBRV)) {
vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
__func__, data);
break;
}
if (data & DEBUGCTL_RESERVED_BITS)
return 1;
svm->vmcb->save.dbgctl = data;
mark_dirty(svm->vmcb, VMCB_LBR);
if (data & (1ULL<<0))
svm_enable_lbrv(svm);
else
svm_disable_lbrv(svm);
break;
case MSR_VM_HSAVE_PA:
svm->nested.hsave_msr = data;
break;
case MSR_VM_CR:
return svm_set_vm_cr(vcpu, data);
case MSR_VM_IGNNE:
vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
break;
default:
return kvm_set_msr_common(vcpu, msr);
}
return 0;
}
static int wrmsr_interception(struct vcpu_svm *svm)
{
struct msr_data msr;
u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
u64 data = kvm_read_edx_eax(&svm->vcpu);
msr.data = data;
msr.index = ecx;
msr.host_initiated = false;
svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
if (kvm_set_msr(&svm->vcpu, &msr)) {
trace_kvm_msr_write_ex(ecx, data);
kvm_inject_gp(&svm->vcpu, 0);
} else {
trace_kvm_msr_write(ecx, data);
skip_emulated_instruction(&svm->vcpu);
}
return 1;
}
static int msr_interception(struct vcpu_svm *svm)
{
if (svm->vmcb->control.exit_info_1)
return wrmsr_interception(svm);
else
return rdmsr_interception(svm);
}
static int interrupt_window_interception(struct vcpu_svm *svm)
{
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
svm_clear_vintr(svm);
svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
mark_dirty(svm->vmcb, VMCB_INTR);
++svm->vcpu.stat.irq_window_exits;
return 1;
}
static int pause_interception(struct vcpu_svm *svm)
{
kvm_vcpu_on_spin(&(svm->vcpu));
return 1;
}
static int nop_interception(struct vcpu_svm *svm)
{
skip_emulated_instruction(&(svm->vcpu));
return 1;
}
static int monitor_interception(struct vcpu_svm *svm)
{
printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
return nop_interception(svm);
}
static int mwait_interception(struct vcpu_svm *svm)
{
printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
return nop_interception(svm);
}
static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_READ_CR0] = cr_interception,
[SVM_EXIT_READ_CR3] = cr_interception,
[SVM_EXIT_READ_CR4] = cr_interception,
[SVM_EXIT_READ_CR8] = cr_interception,
[SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
[SVM_EXIT_WRITE_CR0] = cr_interception,
[SVM_EXIT_WRITE_CR3] = cr_interception,
[SVM_EXIT_WRITE_CR4] = cr_interception,
[SVM_EXIT_WRITE_CR8] = cr8_write_interception,
[SVM_EXIT_READ_DR0] = dr_interception,
[SVM_EXIT_READ_DR1] = dr_interception,
[SVM_EXIT_READ_DR2] = dr_interception,
[SVM_EXIT_READ_DR3] = dr_interception,
[SVM_EXIT_READ_DR4] = dr_interception,
[SVM_EXIT_READ_DR5] = dr_interception,
[SVM_EXIT_READ_DR6] = dr_interception,
[SVM_EXIT_READ_DR7] = dr_interception,
[SVM_EXIT_WRITE_DR0] = dr_interception,
[SVM_EXIT_WRITE_DR1] = dr_interception,
[SVM_EXIT_WRITE_DR2] = dr_interception,
[SVM_EXIT_WRITE_DR3] = dr_interception,
[SVM_EXIT_WRITE_DR4] = dr_interception,
[SVM_EXIT_WRITE_DR5] = dr_interception,
[SVM_EXIT_WRITE_DR6] = dr_interception,
[SVM_EXIT_WRITE_DR7] = dr_interception,
[SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
[SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
[SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
[SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
[SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
[SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
[SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
[SVM_EXIT_INTR] = intr_interception,
[SVM_EXIT_NMI] = nmi_interception,
[SVM_EXIT_SMI] = nop_on_interception,
[SVM_EXIT_INIT] = nop_on_interception,
[SVM_EXIT_VINTR] = interrupt_window_interception,
[SVM_EXIT_RDPMC] = rdpmc_interception,
[SVM_EXIT_CPUID] = cpuid_interception,
[SVM_EXIT_IRET] = iret_interception,
[SVM_EXIT_INVD] = emulate_on_interception,
[SVM_EXIT_PAUSE] = pause_interception,
[SVM_EXIT_HLT] = halt_interception,
[SVM_EXIT_INVLPG] = invlpg_interception,
[SVM_EXIT_INVLPGA] = invlpga_interception,
[SVM_EXIT_IOIO] = io_interception,
[SVM_EXIT_MSR] = msr_interception,
[SVM_EXIT_TASK_SWITCH] = task_switch_interception,
[SVM_EXIT_SHUTDOWN] = shutdown_interception,
[SVM_EXIT_VMRUN] = vmrun_interception,
[SVM_EXIT_VMMCALL] = vmmcall_interception,
[SVM_EXIT_VMLOAD] = vmload_interception,
[SVM_EXIT_VMSAVE] = vmsave_interception,
[SVM_EXIT_STGI] = stgi_interception,
[SVM_EXIT_CLGI] = clgi_interception,
[SVM_EXIT_SKINIT] = skinit_interception,
[SVM_EXIT_WBINVD] = wbinvd_interception,
[SVM_EXIT_MONITOR] = monitor_interception,
[SVM_EXIT_MWAIT] = mwait_interception,
[SVM_EXIT_XSETBV] = xsetbv_interception,
[SVM_EXIT_NPF] = pf_interception,
[SVM_EXIT_RSM] = emulate_on_interception,
};
static void dump_vmcb(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control;
struct vmcb_save_area *save = &svm->vmcb->save;
pr_err("VMCB Control Area:\n");
pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
pr_err("%-20s%d\n", "asid:", control->asid);
pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
pr_err("%-20s%08x\n", "int_state:", control->int_state);
pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
pr_err("%-20s%lld\n", "lbr_ctl:", control->lbr_ctl);
pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
pr_err("VMCB State Save Area:\n");
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"es:",
save->es.selector, save->es.attrib,
save->es.limit, save->es.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"cs:",
save->cs.selector, save->cs.attrib,
save->cs.limit, save->cs.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"ss:",
save->ss.selector, save->ss.attrib,
save->ss.limit, save->ss.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"ds:",
save->ds.selector, save->ds.attrib,
save->ds.limit, save->ds.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"fs:",
save->fs.selector, save->fs.attrib,
save->fs.limit, save->fs.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"gs:",
save->gs.selector, save->gs.attrib,
save->gs.limit, save->gs.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"gdtr:",
save->gdtr.selector, save->gdtr.attrib,
save->gdtr.limit, save->gdtr.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"ldtr:",
save->ldtr.selector, save->ldtr.attrib,
save->ldtr.limit, save->ldtr.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"idtr:",
save->idtr.selector, save->idtr.attrib,
save->idtr.limit, save->idtr.base);
pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
"tr:",
save->tr.selector, save->tr.attrib,
save->tr.limit, save->tr.base);
pr_err("cpl: %d efer: %016llx\n",
save->cpl, save->efer);
pr_err("%-15s %016llx %-13s %016llx\n",
"cr0:", save->cr0, "cr2:", save->cr2);
pr_err("%-15s %016llx %-13s %016llx\n",
"cr3:", save->cr3, "cr4:", save->cr4);
pr_err("%-15s %016llx %-13s %016llx\n",
"dr6:", save->dr6, "dr7:", save->dr7);
pr_err("%-15s %016llx %-13s %016llx\n",
"rip:", save->rip, "rflags:", save->rflags);
pr_err("%-15s %016llx %-13s %016llx\n",
"rsp:", save->rsp, "rax:", save->rax);
pr_err("%-15s %016llx %-13s %016llx\n",
"star:", save->star, "lstar:", save->lstar);
pr_err("%-15s %016llx %-13s %016llx\n",
"cstar:", save->cstar, "sfmask:", save->sfmask);
pr_err("%-15s %016llx %-13s %016llx\n",
"kernel_gs_base:", save->kernel_gs_base,
"sysenter_cs:", save->sysenter_cs);
pr_err("%-15s %016llx %-13s %016llx\n",
"sysenter_esp:", save->sysenter_esp,
"sysenter_eip:", save->sysenter_eip);
pr_err("%-15s %016llx %-13s %016llx\n",
"gpat:", save->g_pat, "dbgctl:", save->dbgctl);
pr_err("%-15s %016llx %-13s %016llx\n",
"br_from:", save->br_from, "br_to:", save->br_to);
pr_err("%-15s %016llx %-13s %016llx\n",
"excp_from:", save->last_excp_from,
"excp_to:", save->last_excp_to);
}
static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
{
struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
*info1 = control->exit_info_1;
*info2 = control->exit_info_2;
}
static int handle_exit(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct kvm_run *kvm_run = vcpu->run;
u32 exit_code = svm->vmcb->control.exit_code;
if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
vcpu->arch.cr0 = svm->vmcb->save.cr0;
if (npt_enabled)
vcpu->arch.cr3 = svm->vmcb->save.cr3;
if (unlikely(svm->nested.exit_required)) {
nested_svm_vmexit(svm);
svm->nested.exit_required = false;
return 1;
}
if (is_guest_mode(vcpu)) {
int vmexit;
trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
svm->vmcb->control.exit_info_1,
svm->vmcb->control.exit_info_2,
svm->vmcb->control.exit_int_info,
svm->vmcb->control.exit_int_info_err,
KVM_ISA_SVM);
vmexit = nested_svm_exit_special(svm);
if (vmexit == NESTED_EXIT_CONTINUE)
vmexit = nested_svm_exit_handled(svm);
if (vmexit == NESTED_EXIT_DONE)
return 1;
}
svm_complete_interrupts(svm);
if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason
= svm->vmcb->control.exit_code;
pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
dump_vmcb(vcpu);
return 0;
}
if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
"exit_code 0x%x\n",
__func__, svm->vmcb->control.exit_int_info,
exit_code);
if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
|| !svm_exit_handlers[exit_code]) {
WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
return svm_exit_handlers[exit_code](svm);
}
static void reload_tss(struct kvm_vcpu *vcpu)
{
int cpu = raw_smp_processor_id();
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
sd->tss_desc->type = 9; /* available 32/64-bit TSS */
load_TR_desc();
}
static void pre_svm_run(struct vcpu_svm *svm)
{
int cpu = raw_smp_processor_id();
struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
/* FIXME: handle wraparound of asid_generation */
if (svm->asid_generation != sd->asid_generation)
new_asid(svm, sd);
}
static void svm_inject_nmi(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
vcpu->arch.hflags |= HF_NMI_MASK;
set_intercept(svm, INTERCEPT_IRET);
++vcpu->stat.nmi_injections;
}
static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
{
struct vmcb_control_area *control;
control = &svm->vmcb->control;
control->int_vector = irq;
control->int_ctl &= ~V_INTR_PRIO_MASK;
control->int_ctl |= V_IRQ_MASK |
((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
mark_dirty(svm->vmcb, VMCB_INTR);
}
static void svm_set_irq(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
BUG_ON(!(gif_set(svm)));
trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
++vcpu->stat.irq_injections;
svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
}
static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
return;
clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
if (irr == -1)
return;
if (tpr >= irr)
set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
}
static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
{
return;
}
static int svm_cpu_uses_apicv(struct kvm_vcpu *vcpu)
{
return 0;
}
static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu)
{
return;
}
static void svm_sync_pir_to_irr(struct kvm_vcpu *vcpu)
{
return;
}
static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;
int ret;
ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
!(svm->vcpu.arch.hflags & HF_NMI_MASK);
ret = ret && gif_set(svm) && nested_svm_nmi(svm);
return ret;
}
static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
}
static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (masked) {
svm->vcpu.arch.hflags |= HF_NMI_MASK;
set_intercept(svm, INTERCEPT_IRET);
} else {
svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
clr_intercept(svm, INTERCEPT_IRET);
}
}
static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb *vmcb = svm->vmcb;
int ret;
if (!gif_set(svm) ||
(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
return 0;
ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
if (is_guest_mode(vcpu))
return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
return ret;
}
static void enable_irq_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
/*
* In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
* 1, because that's a separate STGI/VMRUN intercept. The next time we
* get that intercept, this function will be called again though and
* we'll get the vintr intercept.
*/
if (gif_set(svm) && nested_svm_intr(svm)) {
svm_set_vintr(svm);
svm_inject_irq(svm, 0x0);
}
}
static void enable_nmi_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
== HF_NMI_MASK)
return; /* IRET will cause a vm exit */
/*
* Something prevents NMI from been injected. Single step over possible
* problem (IRET or exception injection or interrupt shadow)
*/
svm->nmi_singlestep = true;
svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
update_db_bp_intercept(vcpu);
}
static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
{
return 0;
}
static void svm_flush_tlb(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
else
svm->asid_generation--;
}
static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
{
}
static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
return;
if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
kvm_set_cr8(vcpu, cr8);
}
}
static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
u64 cr8;
if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
return;
cr8 = kvm_get_cr8(vcpu);
svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
}
static void svm_complete_interrupts(struct vcpu_svm *svm)
{
u8 vector;
int type;
u32 exitintinfo = svm->vmcb->control.exit_int_info;
unsigned int3_injected = svm->int3_injected;
svm->int3_injected = 0;
/*
* If we've made progress since setting HF_IRET_MASK, we've
* executed an IRET and can allow NMI injection.
*/
if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
&& kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
}
svm->vcpu.arch.nmi_injected = false;
kvm_clear_exception_queue(&svm->vcpu);
kvm_clear_interrupt_queue(&svm->vcpu);
if (!(exitintinfo & SVM_EXITINTINFO_VALID))
return;
kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
switch (type) {
case SVM_EXITINTINFO_TYPE_NMI:
svm->vcpu.arch.nmi_injected = true;
break;
case SVM_EXITINTINFO_TYPE_EXEPT:
/*
* In case of software exceptions, do not reinject the vector,
* but re-execute the instruction instead. Rewind RIP first
* if we emulated INT3 before.
*/
if (kvm_exception_is_soft(vector)) {
if (vector == BP_VECTOR && int3_injected &&
kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
kvm_rip_write(&svm->vcpu,
kvm_rip_read(&svm->vcpu) -
int3_injected);
break;
}
if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
u32 err = svm->vmcb->control.exit_int_info_err;
kvm_requeue_exception_e(&svm->vcpu, vector, err);
} else
kvm_requeue_exception(&svm->vcpu, vector);
break;
case SVM_EXITINTINFO_TYPE_INTR:
kvm_queue_interrupt(&svm->vcpu, vector, false);
break;
default:
break;
}
}
static void svm_cancel_injection(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control;
control->exit_int_info = control->event_inj;
control->exit_int_info_err = control->event_inj_err;
control->event_inj = 0;
svm_complete_interrupts(svm);
}
static void svm_vcpu_run(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
/*
* A vmexit emulation is required before the vcpu can be executed
* again.
*/
if (unlikely(svm->nested.exit_required))
return;
pre_svm_run(svm);
sync_lapic_to_cr8(vcpu);
svm->vmcb->save.cr2 = vcpu->arch.cr2;
clgi();
local_irq_enable();
asm volatile (
"push %%" _ASM_BP "; \n\t"
"mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
"mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
"mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
"mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
"mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
"mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
#ifdef CONFIG_X86_64
"mov %c[r8](%[svm]), %%r8 \n\t"
"mov %c[r9](%[svm]), %%r9 \n\t"
"mov %c[r10](%[svm]), %%r10 \n\t"
"mov %c[r11](%[svm]), %%r11 \n\t"
"mov %c[r12](%[svm]), %%r12 \n\t"
"mov %c[r13](%[svm]), %%r13 \n\t"
"mov %c[r14](%[svm]), %%r14 \n\t"
"mov %c[r15](%[svm]), %%r15 \n\t"
#endif
/* Enter guest mode */
"push %%" _ASM_AX " \n\t"
"mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
__ex(SVM_VMLOAD) "\n\t"
__ex(SVM_VMRUN) "\n\t"
__ex(SVM_VMSAVE) "\n\t"
"pop %%" _ASM_AX " \n\t"
/* Save guest registers, load host registers */
"mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
"mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
"mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
"mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
"mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
"mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
#ifdef CONFIG_X86_64
"mov %%r8, %c[r8](%[svm]) \n\t"
"mov %%r9, %c[r9](%[svm]) \n\t"
"mov %%r10, %c[r10](%[svm]) \n\t"
"mov %%r11, %c[r11](%[svm]) \n\t"
"mov %%r12, %c[r12](%[svm]) \n\t"
"mov %%r13, %c[r13](%[svm]) \n\t"
"mov %%r14, %c[r14](%[svm]) \n\t"
"mov %%r15, %c[r15](%[svm]) \n\t"
#endif
"pop %%" _ASM_BP
:
: [svm]"a"(svm),
[vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
[rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
[rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
[rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
[rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
[rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
[rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
#ifdef CONFIG_X86_64
, [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
[r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
[r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
[r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
[r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
[r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
[r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
[r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
#endif
: "cc", "memory"
#ifdef CONFIG_X86_64
, "rbx", "rcx", "rdx", "rsi", "rdi"
, "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
#else
, "ebx", "ecx", "edx", "esi", "edi"
#endif
);
#ifdef CONFIG_X86_64
wrmsrl(MSR_GS_BASE, svm->host.gs_base);
#else
loadsegment(fs, svm->host.fs);
#ifndef CONFIG_X86_32_LAZY_GS
loadsegment(gs, svm->host.gs);
#endif
#endif
reload_tss(vcpu);
local_irq_disable();
vcpu->arch.cr2 = svm->vmcb->save.cr2;
vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_before_handle_nmi(&svm->vcpu);
stgi();
/* Any pending NMI will happen here */
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
kvm_after_handle_nmi(&svm->vcpu);
sync_cr8_to_lapic(vcpu);
svm->next_rip = 0;
svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
/* if exit due to PF check for async PF */
if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
svm->apf_reason = kvm_read_and_reset_pf_reason();
if (npt_enabled) {
vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
}
/*
* We need to handle MC intercepts here before the vcpu has a chance to
* change the physical cpu
*/
if (unlikely(svm->vmcb->control.exit_code ==
SVM_EXIT_EXCP_BASE + MC_VECTOR))
svm_handle_mce(svm);
mark_all_clean(svm->vmcb);
}
static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.cr3 = root;
mark_dirty(svm->vmcb, VMCB_CR);
svm_flush_tlb(vcpu);
}
static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
{
struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->control.nested_cr3 = root;
mark_dirty(svm->vmcb, VMCB_NPT);
/* Also sync guest cr3 here in case we live migrate */
svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
mark_dirty(svm->vmcb, VMCB_CR);
svm_flush_tlb(vcpu);
}
static int is_disabled(void)
{
u64 vm_cr;
rdmsrl(MSR_VM_CR, vm_cr);
if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
return 1;
return 0;
}
static void
svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
{
/*
* Patch in the VMMCALL instruction:
*/
hypercall[0] = 0x0f;
hypercall[1] = 0x01;
hypercall[2] = 0xd9;
}
static void svm_check_processor_compat(void *rtn)
{
*(int *)rtn = 0;
}
static bool svm_cpu_has_accelerated_tpr(void)
{
return false;
}
static bool svm_has_high_real_mode_segbase(void)
{
return true;
}
static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{
return 0;
}
static void svm_cpuid_update(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
/* Update nrips enabled cache */
svm->nrips_enabled = !!guest_cpuid_has_nrips(&svm->vcpu);
}
static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
{
switch (func) {
case 0x80000001:
if (nested)
entry->ecx |= (1 << 2); /* Set SVM bit */
break;
case 0x8000000A:
entry->eax = 1; /* SVM revision 1 */
entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
ASID emulation to nested SVM */
entry->ecx = 0; /* Reserved */
entry->edx = 0; /* Per default do not support any
additional features */
/* Support next_rip if host supports it */
if (boot_cpu_has(X86_FEATURE_NRIPS))
entry->edx |= SVM_FEATURE_NRIP;
/* Support NPT for the guest if enabled */
if (npt_enabled)
entry->edx |= SVM_FEATURE_NPT;
break;
}
}
static int svm_get_lpage_level(void)
{
return PT_PDPE_LEVEL;
}
static bool svm_rdtscp_supported(void)
{
return false;
}
static bool svm_invpcid_supported(void)
{
return false;
}
static bool svm_mpx_supported(void)
{
return false;
}
static bool svm_xsaves_supported(void)
{
return false;
}
static bool svm_has_wbinvd_exit(void)
{
return true;
}
static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
set_exception_intercept(svm, NM_VECTOR);
update_cr0_intercept(svm);
}
#define PRE_EX(exit) { .exit_code = (exit), \
.stage = X86_ICPT_PRE_EXCEPT, }
#define POST_EX(exit) { .exit_code = (exit), \
.stage = X86_ICPT_POST_EXCEPT, }
#define POST_MEM(exit) { .exit_code = (exit), \
.stage = X86_ICPT_POST_MEMACCESS, }
static const struct __x86_intercept {
u32 exit_code;
enum x86_intercept_stage stage;
} x86_intercept_map[] = {
[x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
[x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
[x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
[x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
[x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
[x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
[x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
[x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
[x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
[x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
[x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
[x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
[x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
[x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
[x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
[x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
[x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
[x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
[x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
[x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
[x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
[x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
[x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
[x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
[x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
[x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
[x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
[x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
[x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
[x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
[x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
[x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
[x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
[x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
[x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
[x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
[x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
[x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
[x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
[x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
[x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
[x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
[x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
[x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
[x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
[x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
};
#undef PRE_EX
#undef POST_EX
#undef POST_MEM
static int svm_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
{
struct vcpu_svm *svm = to_svm(vcpu);
int vmexit, ret = X86EMUL_CONTINUE;
struct __x86_intercept icpt_info;
struct vmcb *vmcb = svm->vmcb;
if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
goto out;
icpt_info = x86_intercept_map[info->intercept];
if (stage != icpt_info.stage)
goto out;
switch (icpt_info.exit_code) {
case SVM_EXIT_READ_CR0:
if (info->intercept == x86_intercept_cr_read)
icpt_info.exit_code += info->modrm_reg;
break;
case SVM_EXIT_WRITE_CR0: {
unsigned long cr0, val;
u64 intercept;
if (info->intercept == x86_intercept_cr_write)
icpt_info.exit_code += info->modrm_reg;
if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
info->intercept == x86_intercept_clts)
break;
intercept = svm->nested.intercept;
if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
break;
cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
if (info->intercept == x86_intercept_lmsw) {
cr0 &= 0xfUL;
val &= 0xfUL;
/* lmsw can't clear PE - catch this here */
if (cr0 & X86_CR0_PE)
val |= X86_CR0_PE;
}
if (cr0 ^ val)
icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
break;
}
case SVM_EXIT_READ_DR0:
case SVM_EXIT_WRITE_DR0:
icpt_info.exit_code += info->modrm_reg;
break;
case SVM_EXIT_MSR:
if (info->intercept == x86_intercept_wrmsr)
vmcb->control.exit_info_1 = 1;
else
vmcb->control.exit_info_1 = 0;
break;
case SVM_EXIT_PAUSE:
/*
* We get this for NOP only, but pause
* is rep not, check this here
*/
if (info->rep_prefix != REPE_PREFIX)
goto out;
case SVM_EXIT_IOIO: {
u64 exit_info;
u32 bytes;
if (info->intercept == x86_intercept_in ||
info->intercept == x86_intercept_ins) {
exit_info = ((info->src_val & 0xffff) << 16) |
SVM_IOIO_TYPE_MASK;
bytes = info->dst_bytes;
} else {
exit_info = (info->dst_val & 0xffff) << 16;
bytes = info->src_bytes;
}
if (info->intercept == x86_intercept_outs ||
info->intercept == x86_intercept_ins)
exit_info |= SVM_IOIO_STR_MASK;
if (info->rep_prefix)
exit_info |= SVM_IOIO_REP_MASK;
bytes = min(bytes, 4u);
exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
vmcb->control.exit_info_1 = exit_info;
vmcb->control.exit_info_2 = info->next_rip;
break;
}
default:
break;
}
/* TODO: Advertise NRIPS to guest hypervisor unconditionally */
if (static_cpu_has(X86_FEATURE_NRIPS))
vmcb->control.next_rip = info->next_rip;
vmcb->control.exit_code = icpt_info.exit_code;
vmexit = nested_svm_exit_handled(svm);
ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
: X86EMUL_CONTINUE;
out:
return ret;
}
static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
{
local_irq_enable();
}
static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
{
}
static struct kvm_x86_ops svm_x86_ops = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
.hardware_setup = svm_hardware_setup,
.hardware_unsetup = svm_hardware_unsetup,
.check_processor_compatibility = svm_check_processor_compat,
.hardware_enable = svm_hardware_enable,
.hardware_disable = svm_hardware_disable,
.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
.cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
.vcpu_create = svm_create_vcpu,
.vcpu_free = svm_free_vcpu,
.vcpu_reset = svm_vcpu_reset,
.prepare_guest_switch = svm_prepare_guest_switch,
.vcpu_load = svm_vcpu_load,
.vcpu_put = svm_vcpu_put,
.update_db_bp_intercept = update_db_bp_intercept,
.get_msr = svm_get_msr,
.set_msr = svm_set_msr,
.get_segment_base = svm_get_segment_base,
.get_segment = svm_get_segment,
.set_segment = svm_set_segment,
.get_cpl = svm_get_cpl,
.get_cs_db_l_bits = kvm_get_cs_db_l_bits,
.decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
.decache_cr3 = svm_decache_cr3,
.decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
.set_cr0 = svm_set_cr0,
.set_cr3 = svm_set_cr3,
.set_cr4 = svm_set_cr4,
.set_efer = svm_set_efer,
.get_idt = svm_get_idt,
.set_idt = svm_set_idt,
.get_gdt = svm_get_gdt,
.set_gdt = svm_set_gdt,
.get_dr6 = svm_get_dr6,
.set_dr6 = svm_set_dr6,
.set_dr7 = svm_set_dr7,
.sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
.cache_reg = svm_cache_reg,
.get_rflags = svm_get_rflags,
.set_rflags = svm_set_rflags,
.fpu_activate = svm_fpu_activate,
.fpu_deactivate = svm_fpu_deactivate,
.tlb_flush = svm_flush_tlb,
.run = svm_vcpu_run,
.handle_exit = handle_exit,
.skip_emulated_instruction = skip_emulated_instruction,
.set_interrupt_shadow = svm_set_interrupt_shadow,
.get_interrupt_shadow = svm_get_interrupt_shadow,
.patch_hypercall = svm_patch_hypercall,
.set_irq = svm_set_irq,
.set_nmi = svm_inject_nmi,
.queue_exception = svm_queue_exception,
.cancel_injection = svm_cancel_injection,
.interrupt_allowed = svm_interrupt_allowed,
.nmi_allowed = svm_nmi_allowed,
.get_nmi_mask = svm_get_nmi_mask,
.set_nmi_mask = svm_set_nmi_mask,
.enable_nmi_window = enable_nmi_window,
.enable_irq_window = enable_irq_window,
.update_cr8_intercept = update_cr8_intercept,
.set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
.cpu_uses_apicv = svm_cpu_uses_apicv,
.load_eoi_exitmap = svm_load_eoi_exitmap,
.sync_pir_to_irr = svm_sync_pir_to_irr,
.set_tss_addr = svm_set_tss_addr,
.get_tdp_level = get_npt_level,
.get_mt_mask = svm_get_mt_mask,
.get_exit_info = svm_get_exit_info,
.get_lpage_level = svm_get_lpage_level,
.cpuid_update = svm_cpuid_update,
.rdtscp_supported = svm_rdtscp_supported,
.invpcid_supported = svm_invpcid_supported,
.mpx_supported = svm_mpx_supported,
.xsaves_supported = svm_xsaves_supported,
.set_supported_cpuid = svm_set_supported_cpuid,
.has_wbinvd_exit = svm_has_wbinvd_exit,
.read_tsc_offset = svm_read_tsc_offset,
.write_tsc_offset = svm_write_tsc_offset,
.adjust_tsc_offset_guest = svm_adjust_tsc_offset_guest,
.read_l1_tsc = svm_read_l1_tsc,
.set_tdp_cr3 = set_tdp_cr3,
.check_intercept = svm_check_intercept,
.handle_external_intr = svm_handle_external_intr,
.sched_in = svm_sched_in,
.pmu_ops = &amd_pmu_ops,
};
static int __init svm_init(void)
{
return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
__alignof__(struct vcpu_svm), THIS_MODULE);
}
static void __exit svm_exit(void)
{
kvm_exit();
}
module_init(svm_init)
module_exit(svm_exit)
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_1774_0 |
crossvul-cpp_data_bad_5721_2 | /*
* UDP over IPv6
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* Based on linux/ipv4/udp.c
*
* Fixes:
* Hideaki YOSHIFUJI : sin6_scope_id support
* YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
* Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
* a single port at the same time.
* Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
* YOSHIFUJI Hideaki @USAGI: convert /proc/net/udp6 to seq_file.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/ipv6.h>
#include <linux/icmpv6.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <net/ndisc.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/raw.h>
#include <net/tcp_states.h>
#include <net/ip6_checksum.h>
#include <net/xfrm.h>
#include <net/inet6_hashtables.h>
#include <net/ll_poll.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <trace/events/skb.h>
#include "udp_impl.h"
int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
{
const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
__be32 sk1_rcv_saddr = sk_rcv_saddr(sk);
__be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
int sk_ipv6only = ipv6_only_sock(sk);
int sk2_ipv6only = inet_v6_ipv6only(sk2);
int addr_type = ipv6_addr_type(sk_rcv_saddr6);
int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
/* if both are mapped, treat as IPv4 */
if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED)
return (!sk2_ipv6only &&
(!sk1_rcv_saddr || !sk2_rcv_saddr ||
sk1_rcv_saddr == sk2_rcv_saddr));
if (addr_type2 == IPV6_ADDR_ANY &&
!(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
return 1;
if (addr_type == IPV6_ADDR_ANY &&
!(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
return 1;
if (sk2_rcv_saddr6 &&
ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6))
return 1;
return 0;
}
static unsigned int udp6_portaddr_hash(struct net *net,
const struct in6_addr *addr6,
unsigned int port)
{
unsigned int hash, mix = net_hash_mix(net);
if (ipv6_addr_any(addr6))
hash = jhash_1word(0, mix);
else if (ipv6_addr_v4mapped(addr6))
hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
else
hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
return hash ^ port;
}
int udp_v6_get_port(struct sock *sk, unsigned short snum)
{
unsigned int hash2_nulladdr =
udp6_portaddr_hash(sock_net(sk), &in6addr_any, snum);
unsigned int hash2_partial =
udp6_portaddr_hash(sock_net(sk), &inet6_sk(sk)->rcv_saddr, 0);
/* precompute partial secondary hash */
udp_sk(sk)->udp_portaddr_hash = hash2_partial;
return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
}
static void udp_v6_rehash(struct sock *sk)
{
u16 new_hash = udp6_portaddr_hash(sock_net(sk),
&inet6_sk(sk)->rcv_saddr,
inet_sk(sk)->inet_num);
udp_lib_rehash(sk, new_hash);
}
static inline int compute_score(struct sock *sk, struct net *net,
unsigned short hnum,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif)
{
int score = -1;
if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
score = 0;
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score++;
}
if (!ipv6_addr_any(&np->rcv_saddr)) {
if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
return -1;
score++;
}
if (!ipv6_addr_any(&np->daddr)) {
if (!ipv6_addr_equal(&np->daddr, saddr))
return -1;
score++;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score++;
}
}
return score;
}
#define SCORE2_MAX (1 + 1 + 1)
static inline int compute_score2(struct sock *sk, struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned short hnum,
int dif)
{
int score = -1;
if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum &&
sk->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
return -1;
score = 0;
if (inet->inet_dport) {
if (inet->inet_dport != sport)
return -1;
score++;
}
if (!ipv6_addr_any(&np->daddr)) {
if (!ipv6_addr_equal(&np->daddr, saddr))
return -1;
score++;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
return -1;
score++;
}
}
return score;
}
/* called with read_rcu_lock() */
static struct sock *udp6_lib_lookup2(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned int hnum, int dif,
struct udp_hslot *hslot2, unsigned int slot2)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
begin:
result = NULL;
badness = -1;
udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) {
score = compute_score2(sk, net, saddr, sport,
daddr, hnum, dif);
if (score > badness) {
result = sk;
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = inet6_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
} else if (score == SCORE2_MAX)
goto exact_match;
} else if (score == badness && reuseport) {
matches++;
if (((u64)hash * matches) >> 32 == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot2)
goto begin;
if (result) {
exact_match:
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score2(result, net, saddr, sport,
daddr, hnum, dif) < badness)) {
sock_put(result);
goto begin;
}
}
return result;
}
struct sock *__udp6_lib_lookup(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport,
int dif, struct udp_table *udptable)
{
struct sock *sk, *result;
struct hlist_nulls_node *node;
unsigned short hnum = ntohs(dport);
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
rcu_read_lock();
if (hslot->count > 10) {
hash2 = udp6_portaddr_hash(net, daddr, hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
hslot2, slot2);
if (!result) {
hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum);
slot2 = hash2 & udptable->mask;
hslot2 = &udptable->hash2[slot2];
if (hslot->count < hslot2->count)
goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
&in6addr_any, hnum, dif,
hslot2, slot2);
}
rcu_read_unlock();
return result;
}
begin:
result = NULL;
badness = -1;
sk_nulls_for_each_rcu(sk, node, &hslot->head) {
score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif);
if (score > badness) {
result = sk;
badness = score;
reuseport = sk->sk_reuseport;
if (reuseport) {
hash = inet6_ehashfn(net, daddr, hnum,
saddr, sport);
matches = 1;
}
} else if (score == badness && reuseport) {
matches++;
if (((u64)hash * matches) >> 32 == 0)
result = sk;
hash = next_pseudo_random32(hash);
}
}
/*
* if the nulls value we got at the end of this lookup is
* not the expected one, we must restart lookup.
* We probably met an item that was moved to another chain.
*/
if (get_nulls_value(node) != slot)
goto begin;
if (result) {
if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
result = NULL;
else if (unlikely(compute_score(result, net, hnum, saddr, sport,
daddr, dport, dif) < badness)) {
sock_put(result);
goto begin;
}
}
rcu_read_unlock();
return result;
}
EXPORT_SYMBOL_GPL(__udp6_lib_lookup);
static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport,
struct udp_table *udptable)
{
struct sock *sk;
const struct ipv6hdr *iph = ipv6_hdr(skb);
if (unlikely(sk = skb_steal_sock(skb)))
return sk;
return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport,
&iph->daddr, dport, inet6_iif(skb),
udptable);
}
struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, __be16 dport, int dif)
{
return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table);
}
EXPORT_SYMBOL_GPL(udp6_lib_lookup);
/*
* This should be easy, if there is something there we
* return it, otherwise we block.
*/
int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len,
int noblock, int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
unsigned int ulen, copied;
int peeked, off = 0;
int err;
int is_udplite = IS_UDPLITE(sk);
int is_udp4;
bool slow;
if (addr_len)
*addr_len = sizeof(struct sockaddr_in6);
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len);
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
return ipv6_recv_rxpmtu(sk, msg, len);
try_again:
skb = __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
&peeked, &off, &err);
if (!skb)
goto out;
ulen = skb->len - sizeof(struct udphdr);
copied = len;
if (copied > ulen)
copied = ulen;
else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
is_udp4 = (skb->protocol == htons(ETH_P_IP));
/*
* If checksum is needed at all, try to do it while copying the
* data. If the data is truncated, or if we only want a partial
* coverage checksum (UDP-Lite), do it before the copy.
*/
if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
msg->msg_iov, copied);
else {
err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
if (err == -EINVAL)
goto csum_copy_err;
}
if (unlikely(err)) {
trace_kfree_skb(skb, udpv6_recvmsg);
if (!peeked) {
atomic_inc(&sk->sk_drops);
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS,
is_udplite);
else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS,
is_udplite);
}
goto out_free;
}
if (!peeked) {
if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite);
}
sock_recv_ts_and_drops(msg, sk, skb);
/* Copy the address. */
if (msg->msg_name) {
struct sockaddr_in6 *sin6;
sin6 = (struct sockaddr_in6 *) msg->msg_name;
sin6->sin6_family = AF_INET6;
sin6->sin6_port = udp_hdr(skb)->source;
sin6->sin6_flowinfo = 0;
if (is_udp4) {
ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
&sin6->sin6_addr);
sin6->sin6_scope_id = 0;
} else {
sin6->sin6_addr = ipv6_hdr(skb)->saddr;
sin6->sin6_scope_id =
ipv6_iface_scope_id(&sin6->sin6_addr,
IP6CB(skb)->iif);
}
}
if (is_udp4) {
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
} else {
if (np->rxopt.all)
ip6_datagram_recv_ctl(sk, msg, skb);
}
err = copied;
if (flags & MSG_TRUNC)
err = ulen;
out_free:
skb_free_datagram_locked(sk, skb);
out:
return err;
csum_copy_err:
slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags)) {
if (is_udp4) {
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite);
UDP_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
} else {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite);
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_INERRORS, is_udplite);
}
}
unlock_sock_fast(sk, slow);
if (noblock)
return -EAGAIN;
/* starting over for a new packet */
msg->msg_flags &= ~MSG_TRUNC;
goto try_again;
}
void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info,
struct udp_table *udptable)
{
struct ipv6_pinfo *np;
const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
const struct in6_addr *saddr = &hdr->saddr;
const struct in6_addr *daddr = &hdr->daddr;
struct udphdr *uh = (struct udphdr*)(skb->data+offset);
struct sock *sk;
int err;
sk = __udp6_lib_lookup(dev_net(skb->dev), daddr, uh->dest,
saddr, uh->source, inet6_iif(skb), udptable);
if (sk == NULL)
return;
if (type == ICMPV6_PKT_TOOBIG)
ip6_sk_update_pmtu(skb, sk, info);
if (type == NDISC_REDIRECT)
ip6_sk_redirect(skb, sk);
np = inet6_sk(sk);
if (!icmpv6_err_convert(type, code, &err) && !np->recverr)
goto out;
if (sk->sk_state != TCP_ESTABLISHED && !np->recverr)
goto out;
if (np->recverr)
ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1));
sk->sk_err = err;
sk->sk_error_report(sk);
out:
sock_put(sk);
}
static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
int rc;
if (!ipv6_addr_any(&inet6_sk(sk)->daddr))
sock_rps_save_rxhash(sk, skb);
rc = sock_queue_rcv_skb(sk, skb);
if (rc < 0) {
int is_udplite = IS_UDPLITE(sk);
/* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM)
UDP6_INC_STATS_BH(sock_net(sk),
UDP_MIB_RCVBUFERRORS, is_udplite);
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
kfree_skb(skb);
return -1;
}
return 0;
}
static __inline__ void udpv6_err(struct sk_buff *skb,
struct inet6_skb_parm *opt, u8 type,
u8 code, int offset, __be32 info )
{
__udp6_lib_err(skb, opt, type, code, offset, info, &udp_table);
}
static struct static_key udpv6_encap_needed __read_mostly;
void udpv6_encap_enable(void)
{
if (!static_key_enabled(&udpv6_encap_needed))
static_key_slow_inc(&udpv6_encap_needed);
}
EXPORT_SYMBOL(udpv6_encap_enable);
int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct udp_sock *up = udp_sk(sk);
int rc;
int is_udplite = IS_UDPLITE(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto drop;
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
/*
* This is an encapsulation socket so pass the skb to
* the socket's udp_encap_rcv() hook. Otherwise, just
* fall through and pass this up the UDP socket.
* up->encap_rcv() returns the following value:
* =0 if skb was successfully passed to the encap
* handler or was discarded by it.
* >0 if skb should be passed on to UDP.
* <0 if skb should be resubmitted as proto -N
*/
/* if we're overly short, let UDP handle it */
encap_rcv = ACCESS_ONCE(up->encap_rcv);
if (skb->len > sizeof(struct udphdr) && encap_rcv != NULL) {
int ret;
ret = encap_rcv(sk, skb);
if (ret <= 0) {
UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INDATAGRAMS,
is_udplite);
return -ret;
}
}
/* FALLTHROUGH -- it's a UDP Packet */
}
/*
* UDP-Lite specific tests, ignored on UDP sockets (see net/ipv4/udp.c).
*/
if ((is_udplite & UDPLITE_RECV_CC) && UDP_SKB_CB(skb)->partial_cov) {
if (up->pcrlen == 0) { /* full coverage was set */
LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: partial coverage"
" %d while full coverage %d requested\n",
UDP_SKB_CB(skb)->cscov, skb->len);
goto drop;
}
if (UDP_SKB_CB(skb)->cscov < up->pcrlen) {
LIMIT_NETDEBUG(KERN_WARNING "UDPLITE6: coverage %d "
"too small, need min %d\n",
UDP_SKB_CB(skb)->cscov, up->pcrlen);
goto drop;
}
}
if (rcu_access_pointer(sk->sk_filter)) {
if (udp_lib_checksum_complete(skb))
goto csum_error;
}
if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
goto drop;
skb_dst_drop(skb);
bh_lock_sock(sk);
rc = 0;
if (!sock_owned_by_user(sk))
rc = __udpv6_queue_rcv_skb(sk, skb);
else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
bh_unlock_sock(sk);
goto drop;
}
bh_unlock_sock(sk);
return rc;
csum_error:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return -1;
}
static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
__be16 loc_port, const struct in6_addr *loc_addr,
__be16 rmt_port, const struct in6_addr *rmt_addr,
int dif)
{
struct hlist_nulls_node *node;
struct sock *s = sk;
unsigned short num = ntohs(loc_port);
sk_nulls_for_each_from(s, node) {
struct inet_sock *inet = inet_sk(s);
if (!net_eq(sock_net(s), net))
continue;
if (udp_sk(s)->udp_port_hash == num &&
s->sk_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(s);
if (inet->inet_dport) {
if (inet->inet_dport != rmt_port)
continue;
}
if (!ipv6_addr_any(&np->daddr) &&
!ipv6_addr_equal(&np->daddr, rmt_addr))
continue;
if (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)
continue;
if (!ipv6_addr_any(&np->rcv_saddr)) {
if (!ipv6_addr_equal(&np->rcv_saddr, loc_addr))
continue;
}
if (!inet6_mc_check(s, loc_addr, rmt_addr))
continue;
return s;
}
}
return NULL;
}
static void flush_stack(struct sock **stack, unsigned int count,
struct sk_buff *skb, unsigned int final)
{
struct sk_buff *skb1 = NULL;
struct sock *sk;
unsigned int i;
for (i = 0; i < count; i++) {
sk = stack[i];
if (likely(skb1 == NULL))
skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
if (!skb1) {
atomic_inc(&sk->sk_drops);
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk));
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk));
}
if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
skb1 = NULL;
}
if (unlikely(skb1))
kfree_skb(skb1);
}
/*
* Note: called only from the BH handler context,
* so we don't need to lock the hashes.
*/
static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
const struct in6_addr *saddr, const struct in6_addr *daddr,
struct udp_table *udptable)
{
struct sock *sk, *stack[256 / sizeof(struct sock *)];
const struct udphdr *uh = udp_hdr(skb);
struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
int dif;
unsigned int i, count = 0;
spin_lock(&hslot->lock);
sk = sk_nulls_head(&hslot->head);
dif = inet6_iif(skb);
sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
while (sk) {
stack[count++] = sk;
sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr,
uh->source, saddr, dif);
if (unlikely(count == ARRAY_SIZE(stack))) {
if (!sk)
break;
flush_stack(stack, count, skb, ~0);
count = 0;
}
}
/*
* before releasing the lock, we must take reference on sockets
*/
for (i = 0; i < count; i++)
sock_hold(stack[i]);
spin_unlock(&hslot->lock);
if (count) {
flush_stack(stack, count, skb, count - 1);
for (i = 0; i < count; i++)
sock_put(stack[i]);
} else {
kfree_skb(skb);
}
return 0;
}
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
int proto)
{
struct net *net = dev_net(skb->dev);
struct sock *sk;
struct udphdr *uh;
const struct in6_addr *saddr, *daddr;
u32 ulen = 0;
if (!pskb_may_pull(skb, sizeof(struct udphdr)))
goto discard;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
ulen = ntohs(uh->len);
if (ulen > skb->len)
goto short_packet;
if (proto == IPPROTO_UDP) {
/* UDP validates ulen. */
/* Check for jumbo payload */
if (ulen == 0)
ulen = skb->len;
if (ulen < sizeof(*uh))
goto short_packet;
if (ulen < skb->len) {
if (pskb_trim_rcsum(skb, ulen))
goto short_packet;
saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr;
uh = udp_hdr(skb);
}
}
if (udp6_csum_init(skb, uh, proto))
goto csum_error;
/*
* Multicast receive code
*/
if (ipv6_addr_is_multicast(daddr))
return __udp6_lib_mcast_deliver(net, skb,
saddr, daddr, udptable);
/* Unicast */
/*
* check socket cache ... must talk to Alan about his plans
* for sock caches... i'll skip this for now.
*/
sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
if (sk != NULL) {
int ret;
sk_mark_ll(sk, skb);
ret = udpv6_queue_rcv_skb(sk, skb);
sock_put(sk);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
*/
if (ret > 0)
return -ret;
return 0;
}
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
if (udp_lib_checksum_complete(skb))
goto csum_error;
UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
kfree_skb(skb);
return 0;
short_packet:
LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n",
proto == IPPROTO_UDPLITE ? "-Lite" : "",
saddr,
ntohs(uh->source),
ulen,
skb->len,
daddr,
ntohs(uh->dest));
goto discard;
csum_error:
UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
discard:
UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
kfree_skb(skb);
return 0;
}
static __inline__ int udpv6_rcv(struct sk_buff *skb)
{
return __udp6_lib_rcv(skb, &udp_table, IPPROTO_UDP);
}
/*
* Throw away all pending data and cancel the corking. Socket is locked.
*/
static void udp_v6_flush_pending_frames(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
if (up->pending == AF_INET)
udp_flush_pending_frames(sk);
else if (up->pending) {
up->len = 0;
up->pending = 0;
ip6_flush_pending_frames(sk);
}
}
/**
* udp6_hwcsum_outgoing - handle outgoing HW checksumming
* @sk: socket we are sending on
* @skb: sk_buff containing the filled-in UDP header
* (checksum field must be zeroed out)
*/
static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb,
const struct in6_addr *saddr,
const struct in6_addr *daddr, int len)
{
unsigned int offset;
struct udphdr *uh = udp_hdr(skb);
__wsum csum = 0;
if (skb_queue_len(&sk->sk_write_queue) == 1) {
/* Only one fragment on the socket. */
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0);
} else {
/*
* HW-checksum won't work as there are two or more
* fragments on the socket so that all csums of sk_buffs
* should be together
*/
offset = skb_transport_offset(skb);
skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
skb->ip_summed = CHECKSUM_NONE;
skb_queue_walk(&sk->sk_write_queue, skb) {
csum = csum_add(csum, skb->csum);
}
uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP,
csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
}
}
/*
* Sending
*/
static int udp_v6_push_pending_frames(struct sock *sk)
{
struct sk_buff *skb;
struct udphdr *uh;
struct udp_sock *up = udp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
int err = 0;
int is_udplite = IS_UDPLITE(sk);
__wsum csum = 0;
/* Grab the skbuff where UDP header space exists. */
if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
goto out;
/*
* Create a UDP header
*/
uh = udp_hdr(skb);
uh->source = fl6->fl6_sport;
uh->dest = fl6->fl6_dport;
uh->len = htons(up->len);
uh->check = 0;
if (is_udplite)
csum = udplite_csum_outgoing(sk, skb);
else if (skb->ip_summed == CHECKSUM_PARTIAL) { /* UDP hardware csum */
udp6_hwcsum_outgoing(sk, skb, &fl6->saddr, &fl6->daddr,
up->len);
goto send;
} else
csum = udp_csum_outgoing(sk, skb);
/* add protocol-dependent pseudo-header */
uh->check = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
up->len, fl6->flowi6_proto, csum);
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
send:
err = ip6_push_pending_frames(sk);
if (err) {
if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0;
}
} else
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite);
out:
up->len = 0;
up->pending = 0;
return err;
}
int udpv6_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len)
{
struct ipv6_txoptions opt_space;
struct udp_sock *up = udp_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) msg->msg_name;
struct in6_addr *daddr, *final_p, final;
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct flowi6 fl6;
struct dst_entry *dst;
int addr_len = msg->msg_namelen;
int ulen = len;
int hlimit = -1;
int tclass = -1;
int dontfrag = -1;
int corkreq = up->corkflag || msg->msg_flags&MSG_MORE;
int err;
int connected = 0;
int is_udplite = IS_UDPLITE(sk);
int (*getfrag)(void *, char *, int, int, int, struct sk_buff *);
/* destination address check */
if (sin6) {
if (addr_len < offsetof(struct sockaddr, sa_data))
return -EINVAL;
switch (sin6->sin6_family) {
case AF_INET6:
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
daddr = &sin6->sin6_addr;
break;
case AF_INET:
goto do_udp_sendmsg;
case AF_UNSPEC:
msg->msg_name = sin6 = NULL;
msg->msg_namelen = addr_len = 0;
daddr = NULL;
break;
default:
return -EINVAL;
}
} else if (!up->pending) {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
daddr = &np->daddr;
} else
daddr = NULL;
if (daddr) {
if (ipv6_addr_v4mapped(daddr)) {
struct sockaddr_in sin;
sin.sin_family = AF_INET;
sin.sin_port = sin6 ? sin6->sin6_port : inet->inet_dport;
sin.sin_addr.s_addr = daddr->s6_addr32[3];
msg->msg_name = &sin;
msg->msg_namelen = sizeof(sin);
do_udp_sendmsg:
if (__ipv6_only_sock(sk))
return -ENETUNREACH;
return udp_sendmsg(iocb, sk, msg, len);
}
}
if (up->pending == AF_INET)
return udp_sendmsg(iocb, sk, msg, len);
/* Rough check on arithmetic overflow,
better check is made in ip6_append_data().
*/
if (len > INT_MAX - sizeof(struct udphdr))
return -EMSGSIZE;
if (up->pending) {
/*
* There are pending frames.
* The socket lock must be held while it's corked.
*/
lock_sock(sk);
if (likely(up->pending)) {
if (unlikely(up->pending != AF_INET6)) {
release_sock(sk);
return -EAFNOSUPPORT;
}
dst = NULL;
goto do_append_data;
}
release_sock(sk);
}
ulen += sizeof(struct udphdr);
memset(&fl6, 0, sizeof(fl6));
if (sin6) {
if (sin6->sin6_port == 0)
return -EINVAL;
fl6.fl6_dport = sin6->sin6_port;
daddr = &sin6->sin6_addr;
if (np->sndflow) {
fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
daddr = &flowlabel->dst;
}
}
/*
* Otherwise it will be difficult to maintain
* sk->sk_dst_cache.
*/
if (sk->sk_state == TCP_ESTABLISHED &&
ipv6_addr_equal(daddr, &np->daddr))
daddr = &np->daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
sin6->sin6_scope_id &&
__ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr)))
fl6.flowi6_oif = sin6->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
fl6.fl6_dport = inet->inet_dport;
daddr = &np->daddr;
fl6.flowlabel = np->flow_label;
connected = 1;
}
if (!fl6.flowi6_oif)
fl6.flowi6_oif = sk->sk_bound_dev_if;
if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
fl6.flowi6_mark = sk->sk_mark;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(*opt);
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
&hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
}
if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
connected = 0;
}
if (opt == NULL)
opt = np->opt;
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = sk->sk_protocol;
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
fl6.fl6_sport = inet->inet_sport;
final_p = fl6_update_dst(&fl6, opt, &final);
if (final_p)
connected = 0;
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) {
fl6.flowi6_oif = np->mcast_oif;
connected = 0;
} else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_sk_dst_lookup_flow(sk, &fl6, final_p, true);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
dst = NULL;
goto out;
}
if (hlimit < 0) {
if (ipv6_addr_is_multicast(&fl6.daddr))
hlimit = np->mcast_hops;
else
hlimit = np->hop_limit;
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
}
if (tclass < 0)
tclass = np->tclass;
if (dontfrag < 0)
dontfrag = np->dontfrag;
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
lock_sock(sk);
if (unlikely(up->pending)) {
/* The socket is already corked while preparing it. */
/* ... which is an evident application bug. --ANK */
release_sock(sk);
LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n");
err = -EINVAL;
goto out;
}
up->pending = AF_INET6;
do_append_data:
up->len += ulen;
getfrag = is_udplite ? udplite_getfrag : ip_generic_getfrag;
err = ip6_append_data(sk, getfrag, msg->msg_iov, ulen,
sizeof(struct udphdr), hlimit, tclass, opt, &fl6,
(struct rt6_info*)dst,
corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags, dontfrag);
if (err)
udp_v6_flush_pending_frames(sk);
else if (!corkreq)
err = udp_v6_push_pending_frames(sk);
else if (unlikely(skb_queue_empty(&sk->sk_write_queue)))
up->pending = 0;
if (dst) {
if (connected) {
ip6_dst_store(sk, dst,
ipv6_addr_equal(&fl6.daddr, &np->daddr) ?
&np->daddr : NULL,
#ifdef CONFIG_IPV6_SUBTREES
ipv6_addr_equal(&fl6.saddr, &np->saddr) ?
&np->saddr :
#endif
NULL);
} else {
dst_release(dst);
}
dst = NULL;
}
if (err > 0)
err = np->recverr ? net_xmit_errno(err) : 0;
release_sock(sk);
out:
dst_release(dst);
fl6_sock_release(flowlabel);
if (!err)
return len;
/*
* ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting
* ENOBUFS might not be good (it's not tunable per se), but otherwise
* we don't have a good statistic (IpOutDiscards but it can be too many
* things). We could add another new stat but at least for now that
* seems like overkill.
*/
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
UDP6_INC_STATS_USER(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite);
}
return err;
do_confirm:
dst_confirm(dst);
if (!(msg->msg_flags&MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto out;
}
void udpv6_destroy_sock(struct sock *sk)
{
struct udp_sock *up = udp_sk(sk);
lock_sock(sk);
udp_v6_flush_pending_frames(sk);
release_sock(sk);
if (static_key_false(&udpv6_encap_needed) && up->encap_type) {
void (*encap_destroy)(struct sock *sk);
encap_destroy = ACCESS_ONCE(up->encap_destroy);
if (encap_destroy)
encap_destroy(sk);
}
inet6_destroy_sock(sk);
}
/*
* Socket option code for UDP
*/
int udpv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_v6_push_pending_frames);
return ipv6_setsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
int compat_udpv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_setsockopt(sk, level, optname, optval, optlen,
udp_v6_push_pending_frames);
return compat_ipv6_setsockopt(sk, level, optname, optval, optlen);
}
#endif
int udpv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
if (level == SOL_UDP || level == SOL_UDPLITE)
return udp_lib_getsockopt(sk, level, optname, optval, optlen);
return compat_ipv6_getsockopt(sk, level, optname, optval, optlen);
}
#endif
static const struct inet6_protocol udpv6_protocol = {
.handler = udpv6_rcv,
.err_handler = udpv6_err,
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
/* ------------------------------------------------------------------------ */
#ifdef CONFIG_PROC_FS
int udp6_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN) {
seq_puts(seq, IPV6_SEQ_DGRAM_HEADER);
} else {
int bucket = ((struct udp_iter_state *)seq->private)->bucket;
struct inet_sock *inet = inet_sk(v);
__u16 srcp = ntohs(inet->inet_sport);
__u16 destp = ntohs(inet->inet_dport);
ip6_dgram_sock_seq_show(seq, v, srcp, destp, bucket);
}
return 0;
}
static const struct file_operations udp6_afinfo_seq_fops = {
.owner = THIS_MODULE,
.open = udp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net
};
static struct udp_seq_afinfo udp6_seq_afinfo = {
.name = "udp6",
.family = AF_INET6,
.udp_table = &udp_table,
.seq_fops = &udp6_afinfo_seq_fops,
.seq_ops = {
.show = udp6_seq_show,
},
};
int __net_init udp6_proc_init(struct net *net)
{
return udp_proc_register(net, &udp6_seq_afinfo);
}
void udp6_proc_exit(struct net *net) {
udp_proc_unregister(net, &udp6_seq_afinfo);
}
#endif /* CONFIG_PROC_FS */
void udp_v6_clear_sk(struct sock *sk, int size)
{
struct inet_sock *inet = inet_sk(sk);
/* we do not want to clear pinet6 field, because of RCU lookups */
sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
memset(&inet->pinet6 + 1, 0, size);
}
/* ------------------------------------------------------------------------ */
struct proto udpv6_prot = {
.name = "UDPv6",
.owner = THIS_MODULE,
.close = udp_lib_close,
.connect = ip6_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = udp_ioctl,
.destroy = udpv6_destroy_sock,
.setsockopt = udpv6_setsockopt,
.getsockopt = udpv6_getsockopt,
.sendmsg = udpv6_sendmsg,
.recvmsg = udpv6_recvmsg,
.backlog_rcv = __udpv6_queue_rcv_skb,
.hash = udp_lib_hash,
.unhash = udp_lib_unhash,
.rehash = udp_v6_rehash,
.get_port = udp_v6_get_port,
.memory_allocated = &udp_memory_allocated,
.sysctl_mem = sysctl_udp_mem,
.sysctl_wmem = &sysctl_udp_wmem_min,
.sysctl_rmem = &sysctl_udp_rmem_min,
.obj_size = sizeof(struct udp6_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.h.udp_table = &udp_table,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_udpv6_setsockopt,
.compat_getsockopt = compat_udpv6_getsockopt,
#endif
.clear_sk = udp_v6_clear_sk,
};
static struct inet_protosw udpv6_protosw = {
.type = SOCK_DGRAM,
.protocol = IPPROTO_UDP,
.prot = &udpv6_prot,
.ops = &inet6_dgram_ops,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_PERMANENT,
};
int __init udpv6_init(void)
{
int ret;
ret = inet6_add_protocol(&udpv6_protocol, IPPROTO_UDP);
if (ret)
goto out;
ret = inet6_register_protosw(&udpv6_protosw);
if (ret)
goto out_udpv6_protocol;
out:
return ret;
out_udpv6_protocol:
inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
goto out;
}
void udpv6_exit(void)
{
inet6_unregister_protosw(&udpv6_protosw);
inet6_del_protocol(&udpv6_protocol, IPPROTO_UDP);
}
| ./CrossVul/dataset_final_sorted/CWE-399/c/bad_5721_2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.