idx int64 | func_before string | Vulnerability Classification string | vul int64 | func_after string | patch string | CWE ID string | lines_before string | lines_after string |
|---|---|---|---|---|---|---|---|---|
16,700 | static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
{
if (vcpu->mmio_read_completed) {
memcpy(val, vcpu->mmio_data, bytes);
trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
vcpu->mmio_phys_addr, *(u64 *)val);
vcpu->mmio_read_completed = 0;
return 1;
}
return 0;
}
| DoS | 0 | static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
{
if (vcpu->mmio_read_completed) {
memcpy(val, vcpu->mmio_data, bytes);
trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
vcpu->mmio_phys_addr, *(u64 *)val);
vcpu->mmio_read_completed = 0;
return 1;
}
return 0;
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,701 | static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
unsigned long cr2, int emulation_type)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
last_retry_eip = vcpu->arch.last_retry_eip;
last_retry_addr = vcpu->arch.last_retry_addr;
/*
* If the emulation is caused by #PF and it is non-page_table
* writing instruction, it means the VM-EXIT is caused by shadow
* page protected, we can zap the shadow page and retry this
* instruction directly.
*
* Note: if the guest uses a non-page-table modifying instruction
* on the PDE that points to the instruction, then we will unmap
* the instruction and go to an infinite loop. So, we cache the
* last retried eip and the last fault address, if we meet the eip
* and the address again, we can break out of the potential infinite
* loop.
*/
vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
if (!(emulation_type & EMULTYPE_RETRY))
return false;
if (x86_page_table_writing_insn(ctxt))
return false;
if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
return false;
vcpu->arch.last_retry_eip = ctxt->eip;
vcpu->arch.last_retry_addr = cr2;
if (!vcpu->arch.mmu.direct_map)
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
return true;
}
| DoS | 0 | static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
unsigned long cr2, int emulation_type)
{
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
last_retry_eip = vcpu->arch.last_retry_eip;
last_retry_addr = vcpu->arch.last_retry_addr;
/*
* If the emulation is caused by #PF and it is non-page_table
* writing instruction, it means the VM-EXIT is caused by shadow
* page protected, we can zap the shadow page and retry this
* instruction directly.
*
* Note: if the guest uses a non-page-table modifying instruction
* on the PDE that points to the instruction, then we will unmap
* the instruction and go to an infinite loop. So, we cache the
* last retried eip and the last fault address, if we meet the eip
* and the address again, we can break out of the potential infinite
* loop.
*/
vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
if (!(emulation_type & EMULTYPE_RETRY))
return false;
if (x86_page_table_writing_insn(ctxt))
return false;
if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
return false;
vcpu->arch.last_retry_eip = ctxt->eip;
vcpu->arch.last_retry_addr = cr2;
if (!vcpu->arch.mmu.direct_map)
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
return true;
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,702 | static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
u64 old_efer = vcpu->arch.efer;
if (efer & efer_reserved_bits)
return 1;
if (is_paging(vcpu)
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
return 1;
if (efer & EFER_FFXSR) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
return 1;
}
if (efer & EFER_SVME) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
return 1;
}
efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA;
kvm_x86_ops->set_efer(vcpu, efer);
vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
/* Update reserved bits */
if ((efer ^ old_efer) & EFER_NX)
kvm_mmu_reset_context(vcpu);
return 0;
}
| DoS | 0 | static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
u64 old_efer = vcpu->arch.efer;
if (efer & efer_reserved_bits)
return 1;
if (is_paging(vcpu)
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
return 1;
if (efer & EFER_FFXSR) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
return 1;
}
if (efer & EFER_SVME) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
return 1;
}
efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA;
kvm_x86_ops->set_efer(vcpu, efer);
vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
/* Update reserved bits */
if ((efer ^ old_efer) & EFER_NX)
kvm_mmu_reset_context(vcpu);
return 0;
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,703 | static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
struct kvm *kvm = vcpu->kvm;
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
kvm->arch.hv_guest_os_id = data;
/* setting guest os id to zero disables hypercall page */
if (!kvm->arch.hv_guest_os_id)
kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
break;
case HV_X64_MSR_HYPERCALL: {
u64 gfn;
unsigned long addr;
u8 instructions[4];
/* if guest os id is not set hypercall should remain disabled */
if (!kvm->arch.hv_guest_os_id)
break;
if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
kvm->arch.hv_hypercall = data;
break;
}
gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return 1;
kvm_x86_ops->patch_hypercall(vcpu, instructions);
((unsigned char *)instructions)[3] = 0xc3; /* ret */
if (__copy_to_user((void __user *)addr, instructions, 4))
return 1;
kvm->arch.hv_hypercall = data;
break;
}
default:
pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
"data 0x%llx\n", msr, data);
return 1;
}
return 0;
}
| DoS | 0 | static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
struct kvm *kvm = vcpu->kvm;
switch (msr) {
case HV_X64_MSR_GUEST_OS_ID:
kvm->arch.hv_guest_os_id = data;
/* setting guest os id to zero disables hypercall page */
if (!kvm->arch.hv_guest_os_id)
kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
break;
case HV_X64_MSR_HYPERCALL: {
u64 gfn;
unsigned long addr;
u8 instructions[4];
/* if guest os id is not set hypercall should remain disabled */
if (!kvm->arch.hv_guest_os_id)
break;
if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
kvm->arch.hv_hypercall = data;
break;
}
gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
addr = gfn_to_hva(kvm, gfn);
if (kvm_is_error_hva(addr))
return 1;
kvm_x86_ops->patch_hypercall(vcpu, instructions);
((unsigned char *)instructions)[3] = 0xc3; /* ret */
if (__copy_to_user((void __user *)addr, instructions, 4))
return 1;
kvm->arch.hv_hypercall = data;
break;
}
default:
pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
"data 0x%llx\n", msr, data);
return 1;
}
return 0;
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,704 | static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
if (!mtrr_valid(vcpu, msr, data))
return 1;
if (msr == MSR_MTRRdefType) {
vcpu->arch.mtrr_state.def_type = data;
vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
} else if (msr == MSR_MTRRfix64K_00000)
p[0] = data;
else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
p[1 + msr - MSR_MTRRfix16K_80000] = data;
else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
p[3 + msr - MSR_MTRRfix4K_C0000] = data;
else if (msr == MSR_IA32_CR_PAT)
vcpu->arch.pat = data;
else { /* Variable MTRRs */
int idx, is_mtrr_mask;
u64 *pt;
idx = (msr - 0x200) / 2;
is_mtrr_mask = msr - 0x200 - 2 * idx;
if (!is_mtrr_mask)
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
else
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
*pt = data;
}
kvm_mmu_reset_context(vcpu);
return 0;
}
| DoS | 0 | static int set_msr_mtrr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
{
u64 *p = (u64 *)&vcpu->arch.mtrr_state.fixed_ranges;
if (!mtrr_valid(vcpu, msr, data))
return 1;
if (msr == MSR_MTRRdefType) {
vcpu->arch.mtrr_state.def_type = data;
vcpu->arch.mtrr_state.enabled = (data & 0xc00) >> 10;
} else if (msr == MSR_MTRRfix64K_00000)
p[0] = data;
else if (msr == MSR_MTRRfix16K_80000 || msr == MSR_MTRRfix16K_A0000)
p[1 + msr - MSR_MTRRfix16K_80000] = data;
else if (msr >= MSR_MTRRfix4K_C0000 && msr <= MSR_MTRRfix4K_F8000)
p[3 + msr - MSR_MTRRfix4K_C0000] = data;
else if (msr == MSR_IA32_CR_PAT)
vcpu->arch.pat = data;
else { /* Variable MTRRs */
int idx, is_mtrr_mask;
u64 *pt;
idx = (msr - 0x200) / 2;
is_mtrr_mask = msr - 0x200 - 2 * idx;
if (!is_mtrr_mask)
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].base_lo;
else
pt =
(u64 *)&vcpu->arch.mtrr_state.var_ranges[idx].mask_lo;
*pt = data;
}
kvm_mmu_reset_context(vcpu);
return 0;
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,705 | static void shared_msr_update(unsigned slot, u32 msr)
{
struct kvm_shared_msrs *smsr;
u64 value;
smsr = &__get_cpu_var(shared_msrs);
/* only read, and nobody should modify it at this time,
* so don't need lock */
if (slot >= shared_msrs_global.nr) {
printk(KERN_ERR "kvm: invalid MSR slot!");
return;
}
rdmsrl_safe(msr, &value);
smsr->values[slot].host = value;
smsr->values[slot].curr = value;
}
| DoS | 0 | static void shared_msr_update(unsigned slot, u32 msr)
{
struct kvm_shared_msrs *smsr;
u64 value;
smsr = &__get_cpu_var(shared_msrs);
/* only read, and nobody should modify it at this time,
* so don't need lock */
if (slot >= shared_msrs_global.nr) {
printk(KERN_ERR "kvm: invalid MSR slot!");
return;
}
rdmsrl_safe(msr, &value);
smsr->values[slot].host = value;
smsr->values[slot].curr = value;
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,706 | static void tsc_khz_changed(void *data)
{
struct cpufreq_freqs *freq = data;
unsigned long khz = 0;
if (data)
khz = freq->new;
else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
khz = cpufreq_quick_get(raw_smp_processor_id());
if (!khz)
khz = tsc_khz;
__this_cpu_write(cpu_tsc_khz, khz);
}
| DoS | 0 | static void tsc_khz_changed(void *data)
{
struct cpufreq_freqs *freq = data;
unsigned long khz = 0;
if (data)
khz = freq->new;
else if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
khz = cpufreq_quick_get(raw_smp_processor_id());
if (!khz)
khz = tsc_khz;
__this_cpu_write(cpu_tsc_khz, khz);
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,707 | static bool valid_pat_type(unsigned t)
{
return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
}
| DoS | 0 | static bool valid_pat_type(unsigned t)
{
return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,708 | static void vapic_enter(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct page *page;
if (!apic || !apic->vapic_addr)
return;
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
vcpu->arch.apic->vapic_page = page;
}
| DoS | 0 | static void vapic_enter(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct page *page;
if (!apic || !apic->vapic_addr)
return;
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
vcpu->arch.apic->vapic_page = page;
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,709 | static void vapic_exit(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int idx;
if (!apic || !apic->vapic_addr)
return;
idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_release_page_dirty(apic->vapic_page);
mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
| DoS | 0 | static void vapic_exit(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
int idx;
if (!apic || !apic->vapic_addr)
return;
idx = srcu_read_lock(&vcpu->kvm->srcu);
kvm_release_page_dirty(apic->vapic_page);
mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,710 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
vcpu->run->request_interrupt_window;
bool req_immediate_exit = 0;
if (vcpu->requests) {
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
kvm_mmu_unload(vcpu);
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
__kvm_migrate_timers(vcpu);
if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
r = kvm_guest_time_update(vcpu);
if (unlikely(r))
goto out;
}
if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
kvm_mmu_sync_roots(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvm_x86_ops->tlb_flush(vcpu);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
r = 0;
goto out;
}
if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
r = 0;
goto out;
}
if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
vcpu->fpu_active = 0;
kvm_x86_ops->fpu_deactivate(vcpu);
}
if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
/* Page is swapped out. Do synthetic halt */
vcpu->arch.apf.halted = true;
r = 1;
goto out;
}
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
record_steal_time(vcpu);
if (kvm_check_request(KVM_REQ_NMI, vcpu))
process_nmi(vcpu);
req_immediate_exit =
kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
if (kvm_check_request(KVM_REQ_PMU, vcpu))
kvm_handle_pmu_event(vcpu);
if (kvm_check_request(KVM_REQ_PMI, vcpu))
kvm_deliver_pmi(vcpu);
}
r = kvm_mmu_reload(vcpu);
if (unlikely(r))
goto out;
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
inject_pending_event(vcpu);
/* enable NMI/IRQ window open exits if needed */
if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu);
else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
kvm_x86_ops->enable_irq_window(vcpu);
if (kvm_lapic_enabled(vcpu)) {
update_cr8_intercept(vcpu);
kvm_lapic_sync_to_vapic(vcpu);
}
}
preempt_disable();
kvm_x86_ops->prepare_guest_switch(vcpu);
if (vcpu->fpu_active)
kvm_load_guest_fpu(vcpu);
kvm_load_guest_xcr0(vcpu);
vcpu->mode = IN_GUEST_MODE;
/* We should set ->mode before check ->requests,
* see the comment in make_all_cpus_request.
*/
smp_mb();
local_irq_disable();
if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
|| need_resched() || signal_pending(current)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
local_irq_enable();
preempt_enable();
kvm_x86_ops->cancel_injection(vcpu);
r = 1;
goto out;
}
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (req_immediate_exit)
smp_send_reschedule(vcpu->cpu);
kvm_guest_enter();
if (unlikely(vcpu->arch.switch_db_regs)) {
set_debugreg(0, 7);
set_debugreg(vcpu->arch.eff_db[0], 0);
set_debugreg(vcpu->arch.eff_db[1], 1);
set_debugreg(vcpu->arch.eff_db[2], 2);
set_debugreg(vcpu->arch.eff_db[3], 3);
}
trace_kvm_entry(vcpu->vcpu_id);
kvm_x86_ops->run(vcpu);
/*
* If the guest has used debug registers, at least dr7
* will be disabled while returning to the host.
* If we don't have active breakpoints in the host, we don't
* care about the messed up debug address registers. But if
* we have some of them active, restore the old state.
*/
if (hw_breakpoint_active())
hw_breakpoint_restore();
vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
local_irq_enable();
++vcpu->stat.exits;
/*
* We must have an instruction between local_irq_enable() and
* kvm_guest_exit(), so the timer interrupt isn't delayed by
* the interrupt shadow. The stat.exits increment will do nicely.
* But we need to prevent reordering, hence this barrier():
*/
barrier();
kvm_guest_exit();
preempt_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
/*
* Profile KVM exit RIPs:
*/
if (unlikely(prof_on == KVM_PROFILING)) {
unsigned long rip = kvm_rip_read(vcpu);
profile_hit(KVM_PROFILING, (void *)rip);
}
kvm_lapic_sync_from_vapic(vcpu);
r = kvm_x86_ops->handle_exit(vcpu);
out:
return r;
}
| DoS | 0 | static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
{
int r;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
vcpu->run->request_interrupt_window;
bool req_immediate_exit = 0;
if (vcpu->requests) {
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
kvm_mmu_unload(vcpu);
if (kvm_check_request(KVM_REQ_MIGRATE_TIMER, vcpu))
__kvm_migrate_timers(vcpu);
if (kvm_check_request(KVM_REQ_CLOCK_UPDATE, vcpu)) {
r = kvm_guest_time_update(vcpu);
if (unlikely(r))
goto out;
}
if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
kvm_mmu_sync_roots(vcpu);
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
kvm_x86_ops->tlb_flush(vcpu);
if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
r = 0;
goto out;
}
if (kvm_check_request(KVM_REQ_TRIPLE_FAULT, vcpu)) {
vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
r = 0;
goto out;
}
if (kvm_check_request(KVM_REQ_DEACTIVATE_FPU, vcpu)) {
vcpu->fpu_active = 0;
kvm_x86_ops->fpu_deactivate(vcpu);
}
if (kvm_check_request(KVM_REQ_APF_HALT, vcpu)) {
/* Page is swapped out. Do synthetic halt */
vcpu->arch.apf.halted = true;
r = 1;
goto out;
}
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
record_steal_time(vcpu);
if (kvm_check_request(KVM_REQ_NMI, vcpu))
process_nmi(vcpu);
req_immediate_exit =
kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
if (kvm_check_request(KVM_REQ_PMU, vcpu))
kvm_handle_pmu_event(vcpu);
if (kvm_check_request(KVM_REQ_PMI, vcpu))
kvm_deliver_pmi(vcpu);
}
r = kvm_mmu_reload(vcpu);
if (unlikely(r))
goto out;
if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
inject_pending_event(vcpu);
/* enable NMI/IRQ window open exits if needed */
if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu);
else if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
kvm_x86_ops->enable_irq_window(vcpu);
if (kvm_lapic_enabled(vcpu)) {
update_cr8_intercept(vcpu);
kvm_lapic_sync_to_vapic(vcpu);
}
}
preempt_disable();
kvm_x86_ops->prepare_guest_switch(vcpu);
if (vcpu->fpu_active)
kvm_load_guest_fpu(vcpu);
kvm_load_guest_xcr0(vcpu);
vcpu->mode = IN_GUEST_MODE;
/* We should set ->mode before check ->requests,
* see the comment in make_all_cpus_request.
*/
smp_mb();
local_irq_disable();
if (vcpu->mode == EXITING_GUEST_MODE || vcpu->requests
|| need_resched() || signal_pending(current)) {
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
local_irq_enable();
preempt_enable();
kvm_x86_ops->cancel_injection(vcpu);
r = 1;
goto out;
}
srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
if (req_immediate_exit)
smp_send_reschedule(vcpu->cpu);
kvm_guest_enter();
if (unlikely(vcpu->arch.switch_db_regs)) {
set_debugreg(0, 7);
set_debugreg(vcpu->arch.eff_db[0], 0);
set_debugreg(vcpu->arch.eff_db[1], 1);
set_debugreg(vcpu->arch.eff_db[2], 2);
set_debugreg(vcpu->arch.eff_db[3], 3);
}
trace_kvm_entry(vcpu->vcpu_id);
kvm_x86_ops->run(vcpu);
/*
* If the guest has used debug registers, at least dr7
* will be disabled while returning to the host.
* If we don't have active breakpoints in the host, we don't
* care about the messed up debug address registers. But if
* we have some of them active, restore the old state.
*/
if (hw_breakpoint_active())
hw_breakpoint_restore();
vcpu->arch.last_guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
smp_wmb();
local_irq_enable();
++vcpu->stat.exits;
/*
* We must have an instruction between local_irq_enable() and
* kvm_guest_exit(), so the timer interrupt isn't delayed by
* the interrupt shadow. The stat.exits increment will do nicely.
* But we need to prevent reordering, hence this barrier():
*/
barrier();
kvm_guest_exit();
preempt_enable();
vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
/*
* Profile KVM exit RIPs:
*/
if (unlikely(prof_on == KVM_PROFILING)) {
unsigned long rip = kvm_rip_read(vcpu);
profile_hit(KVM_PROFILING, (void *)rip);
}
kvm_lapic_sync_from_vapic(vcpu);
r = kvm_x86_ops->handle_exit(vcpu);
out:
return r;
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,711 | static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
struct kvm_tpr_access_ctl *tac)
{
if (tac->flags)
return -EINVAL;
vcpu->arch.tpr_access_reporting = !!tac->enabled;
return 0;
}
| DoS | 0 | static int vcpu_ioctl_tpr_access_reporting(struct kvm_vcpu *vcpu,
struct kvm_tpr_access_ctl *tac)
{
if (tac->flags)
return -EINVAL;
vcpu->arch.tpr_access_reporting = !!tac->enabled;
return 0;
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,712 | static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
{
int handled = 0;
int n;
do {
n = min(len, 8);
if (!(vcpu->arch.apic &&
!kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
&& kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
break;
trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
handled += n;
addr += n;
len -= n;
v += n;
} while (len);
return handled;
}
| DoS | 0 | static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
{
int handled = 0;
int n;
do {
n = min(len, 8);
if (!(vcpu->arch.apic &&
!kvm_iodevice_read(&vcpu->arch.apic->dev, addr, n, v))
&& kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, n, v))
break;
trace_kvm_mmio(KVM_TRACE_MMIO_READ, n, addr, *(u64 *)v);
handled += n;
addr += n;
len -= n;
v += n;
} while (len);
return handled;
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,713 | u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.virtual_tsc_khz)
return vcpu->arch.virtual_tsc_khz;
else
return __this_cpu_read(cpu_tsc_khz);
}
| DoS | 0 | u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu)
{
if (vcpu->arch.virtual_tsc_khz)
return vcpu->arch.virtual_tsc_khz;
else
return __this_cpu_read(cpu_tsc_khz);
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,714 | static void wbinvd_ipi(void *garbage)
{
wbinvd();
}
| DoS | 0 | static void wbinvd_ipi(void *garbage)
{
wbinvd();
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,715 | static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{
return emulator_write_phys(vcpu, gpa, val, bytes);
}
| DoS | 0 | static int write_emulate(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{
return emulator_write_phys(vcpu, gpa, val, bytes);
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,716 | static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{
memcpy(vcpu->mmio_data, val, bytes);
memcpy(vcpu->run->mmio.data, vcpu->mmio_data, 8);
return X86EMUL_CONTINUE;
}
| DoS | 0 | static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
void *val, int bytes)
{
memcpy(vcpu->mmio_data, val, bytes);
memcpy(vcpu->run->mmio.data, vcpu->mmio_data, 8);
return X86EMUL_CONTINUE;
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,717 | static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
{
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
return vcpu_mmio_write(vcpu, gpa, bytes, val);
}
| DoS | 0 | static int write_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes, void *val)
{
trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, bytes, gpa, *(u64 *)val);
return vcpu_mmio_write(vcpu, gpa, bytes, val);
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,718 | static void write_protect_slot(struct kvm *kvm,
struct kvm_memory_slot *memslot,
unsigned long *dirty_bitmap,
unsigned long nr_dirty_pages)
{
spin_lock(&kvm->mmu_lock);
/* Not many dirty pages compared to # of shadow pages. */
if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
unsigned long gfn_offset;
for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) {
unsigned long gfn = memslot->base_gfn + gfn_offset;
kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
}
kvm_flush_remote_tlbs(kvm);
} else
kvm_mmu_slot_remove_write_access(kvm, memslot->id);
spin_unlock(&kvm->mmu_lock);
}
| DoS | 0 | static void write_protect_slot(struct kvm *kvm,
struct kvm_memory_slot *memslot,
unsigned long *dirty_bitmap,
unsigned long nr_dirty_pages)
{
spin_lock(&kvm->mmu_lock);
/* Not many dirty pages compared to # of shadow pages. */
if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
unsigned long gfn_offset;
for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) {
unsigned long gfn = memslot->base_gfn + gfn_offset;
kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
}
kvm_flush_remote_tlbs(kvm);
} else
kvm_mmu_slot_remove_write_access(kvm, memslot->id);
spin_unlock(&kvm->mmu_lock);
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,719 | int x86_emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2,
int emulation_type,
void *insn,
int insn_len)
{
int r;
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
bool writeback = true;
kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
init_emulate_ctxt(vcpu);
ctxt->interruptibility = 0;
ctxt->have_exception = false;
ctxt->perm_ok = false;
ctxt->only_vendor_specific_insn
= emulation_type & EMULTYPE_TRAP_UD;
r = x86_decode_insn(ctxt, insn, insn_len);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
if (r != EMULATION_OK) {
if (emulation_type & EMULTYPE_TRAP_UD)
return EMULATE_FAIL;
if (reexecute_instruction(vcpu, cr2))
return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL;
return handle_emulation_failure(vcpu);
}
}
if (emulation_type & EMULTYPE_SKIP) {
kvm_rip_write(vcpu, ctxt->_eip);
return EMULATE_DONE;
}
if (retry_instruction(ctxt, cr2, emulation_type))
return EMULATE_DONE;
/* this is needed for vmware backdoor interface to work since it
changes registers values during IO operation */
if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
memcpy(ctxt->regs, vcpu->arch.regs, sizeof ctxt->regs);
}
restart:
r = x86_emulate_insn(ctxt);
if (r == EMULATION_INTERCEPTED)
return EMULATE_DONE;
if (r == EMULATION_FAILED) {
if (reexecute_instruction(vcpu, cr2))
return EMULATE_DONE;
return handle_emulation_failure(vcpu);
}
if (ctxt->have_exception) {
inject_emulated_exception(vcpu);
r = EMULATE_DONE;
} else if (vcpu->arch.pio.count) {
if (!vcpu->arch.pio.in)
vcpu->arch.pio.count = 0;
else
writeback = false;
r = EMULATE_DO_MMIO;
} else if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
writeback = false;
r = EMULATE_DO_MMIO;
} else if (r == EMULATION_RESTART)
goto restart;
else
r = EMULATE_DONE;
if (writeback) {
toggle_interruptibility(vcpu, ctxt->interruptibility);
kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
} else
vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
return r;
}
| DoS | 0 | int x86_emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2,
int emulation_type,
void *insn,
int insn_len)
{
int r;
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
bool writeback = true;
kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
init_emulate_ctxt(vcpu);
ctxt->interruptibility = 0;
ctxt->have_exception = false;
ctxt->perm_ok = false;
ctxt->only_vendor_specific_insn
= emulation_type & EMULTYPE_TRAP_UD;
r = x86_decode_insn(ctxt, insn, insn_len);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
if (r != EMULATION_OK) {
if (emulation_type & EMULTYPE_TRAP_UD)
return EMULATE_FAIL;
if (reexecute_instruction(vcpu, cr2))
return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL;
return handle_emulation_failure(vcpu);
}
}
if (emulation_type & EMULTYPE_SKIP) {
kvm_rip_write(vcpu, ctxt->_eip);
return EMULATE_DONE;
}
if (retry_instruction(ctxt, cr2, emulation_type))
return EMULATE_DONE;
/* this is needed for vmware backdoor interface to work since it
changes registers values during IO operation */
if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
memcpy(ctxt->regs, vcpu->arch.regs, sizeof ctxt->regs);
}
restart:
r = x86_emulate_insn(ctxt);
if (r == EMULATION_INTERCEPTED)
return EMULATE_DONE;
if (r == EMULATION_FAILED) {
if (reexecute_instruction(vcpu, cr2))
return EMULATE_DONE;
return handle_emulation_failure(vcpu);
}
if (ctxt->have_exception) {
inject_emulated_exception(vcpu);
r = EMULATE_DONE;
} else if (vcpu->arch.pio.count) {
if (!vcpu->arch.pio.in)
vcpu->arch.pio.count = 0;
else
writeback = false;
r = EMULATE_DO_MMIO;
} else if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
writeback = false;
r = EMULATE_DO_MMIO;
} else if (r == EMULATION_RESTART)
goto restart;
else
r = EMULATE_DONE;
if (writeback) {
toggle_interruptibility(vcpu, ctxt->interruptibility);
kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
} else
vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
return r;
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,720 | static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
int lm = is_long_mode(vcpu);
u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
: (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
: kvm->arch.xen_hvm_config.blob_size_32;
u32 page_num = data & ~PAGE_MASK;
u64 page_addr = data & PAGE_MASK;
u8 *page;
int r;
r = -E2BIG;
if (page_num >= blob_size)
goto out;
r = -ENOMEM;
page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
if (IS_ERR(page)) {
r = PTR_ERR(page);
goto out;
}
if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
goto out_free;
r = 0;
out_free:
kfree(page);
out:
return r;
}
| DoS | 0 | static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
{
struct kvm *kvm = vcpu->kvm;
int lm = is_long_mode(vcpu);
u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
: (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
: kvm->arch.xen_hvm_config.blob_size_32;
u32 page_num = data & ~PAGE_MASK;
u64 page_addr = data & PAGE_MASK;
u8 *page;
int r;
r = -E2BIG;
if (page_num >= blob_size)
goto out;
r = -ENOMEM;
page = memdup_user(blob_addr + (page_num * PAGE_SIZE), PAGE_SIZE);
if (IS_ERR(page)) {
r = PTR_ERR(page);
goto out;
}
if (kvm_write_guest(kvm, page_addr, page, PAGE_SIZE))
goto out_free;
r = 0;
out_free:
kfree(page);
out:
return r;
}
| @@ -3131,6 +3131,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
+ r = -EINVAL;
+ if (atomic_read(&kvm->online_vcpus))
+ goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
@@ -5956,6 +5959,11 @@ void kvm_arch_check_processor_compat(void *rtn)
kvm_x86_ops->check_processor_compatibility(rtn);
}
+bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu)
+{
+ return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL);
+}
+
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
{
struct page *page; | CWE-399 | null | null |
16,721 | int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
int r;
gfn_t base_gfn;
unsigned long npages;
unsigned long i;
struct kvm_memory_slot *memslot;
struct kvm_memory_slot old, new;
struct kvm_memslots *slots, *old_memslots;
r = -EINVAL;
/* General sanity checks */
if (mem->memory_size & (PAGE_SIZE - 1))
goto out;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out;
/* We can read the guest memory with __xxx_user() later on. */
if (user_alloc &&
((mem->userspace_addr & (PAGE_SIZE - 1)) ||
!access_ok(VERIFY_WRITE,
(void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size)))
goto out;
if (mem->slot >= KVM_MEM_SLOTS_NUM)
goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
memslot = id_to_memslot(kvm->memslots, mem->slot);
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
r = -EINVAL;
if (npages > KVM_MEM_MAX_NR_PAGES)
goto out;
if (!npages)
mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
new = old = *memslot;
new.id = mem->slot;
new.base_gfn = base_gfn;
new.npages = npages;
new.flags = mem->flags;
/* Disallow changing a memory slot's size. */
r = -EINVAL;
if (npages && old.npages && npages != old.npages)
goto out_free;
/* Check for overlaps */
r = -EEXIST;
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
if (s == memslot || !s->npages)
continue;
if (!((base_gfn + npages <= s->base_gfn) ||
(base_gfn >= s->base_gfn + s->npages)))
goto out_free;
}
/* Free page dirty bitmap if unneeded */
if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
new.dirty_bitmap = NULL;
r = -ENOMEM;
/* Allocate if a slot is being created */
#ifndef CONFIG_S390
if (npages && !new.rmap) {
new.rmap = vzalloc(npages * sizeof(*new.rmap));
if (!new.rmap)
goto out_free;
new.user_alloc = user_alloc;
new.userspace_addr = mem->userspace_addr;
}
if (!npages)
goto skip_lpage;
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
unsigned long ugfn;
unsigned long j;
int lpages;
int level = i + 2;
/* Avoid unused variable warning if no large pages */
(void)level;
if (new.lpage_info[i])
continue;
lpages = 1 + ((base_gfn + npages - 1)
>> KVM_HPAGE_GFN_SHIFT(level));
lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i]));
if (!new.lpage_info[i])
goto out_free;
if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
new.lpage_info[i][0].write_count = 1;
if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
new.lpage_info[i][lpages - 1].write_count = 1;
ugfn = new.userspace_addr >> PAGE_SHIFT;
/*
* If the gfn and userspace address are not aligned wrt each
* other, or if explicitly asked to, disable large page
* support for this slot
*/
if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
!largepages_enabled)
for (j = 0; j < lpages; ++j)
new.lpage_info[i][j].write_count = 1;
}
skip_lpage:
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
if (kvm_create_dirty_bitmap(&new) < 0)
goto out_free;
/* destroy any largepage mappings for dirty tracking */
}
#else /* not defined CONFIG_S390 */
new.user_alloc = user_alloc;
if (user_alloc)
new.userspace_addr = mem->userspace_addr;
#endif /* not defined CONFIG_S390 */
if (!npages) {
struct kvm_memory_slot *slot;
r = -ENOMEM;
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
if (!slots)
goto out_free;
slot = id_to_memslot(slots, mem->slot);
slot->flags |= KVM_MEMSLOT_INVALID;
update_memslots(slots, NULL);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
/* From this point no new shadow pages pointing to a deleted
* memslot will be created.
*
* validation of sp->gfn happens in:
* - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
* - kvm_is_visible_gfn (mmu_check_roots)
*/
kvm_arch_flush_shadow(kvm);
kfree(old_memslots);
}
r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
if (r)
goto out_free;
/* map/unmap the pages in iommu page table */
if (npages) {
r = kvm_iommu_map_pages(kvm, &new);
if (r)
goto out_free;
} else
kvm_iommu_unmap_pages(kvm, &old);
r = -ENOMEM;
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
if (!slots)
goto out_free;
/* actual memory is freed via old in kvm_free_physmem_slot below */
if (!npages) {
new.rmap = NULL;
new.dirty_bitmap = NULL;
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
new.lpage_info[i] = NULL;
}
update_memslots(slots, &new);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
/*
* If the new memory slot is created, we need to clear all
* mmio sptes.
*/
if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT)
kvm_arch_flush_shadow(kvm);
kvm_free_physmem_slot(&old, &new);
kfree(old_memslots);
return 0;
out_free:
kvm_free_physmem_slot(&new, &old);
out:
return r;
}
| DoS | 0 | int __kvm_set_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
int user_alloc)
{
int r;
gfn_t base_gfn;
unsigned long npages;
unsigned long i;
struct kvm_memory_slot *memslot;
struct kvm_memory_slot old, new;
struct kvm_memslots *slots, *old_memslots;
r = -EINVAL;
/* General sanity checks */
if (mem->memory_size & (PAGE_SIZE - 1))
goto out;
if (mem->guest_phys_addr & (PAGE_SIZE - 1))
goto out;
/* We can read the guest memory with __xxx_user() later on. */
if (user_alloc &&
((mem->userspace_addr & (PAGE_SIZE - 1)) ||
!access_ok(VERIFY_WRITE,
(void __user *)(unsigned long)mem->userspace_addr,
mem->memory_size)))
goto out;
if (mem->slot >= KVM_MEM_SLOTS_NUM)
goto out;
if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
goto out;
memslot = id_to_memslot(kvm->memslots, mem->slot);
base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
npages = mem->memory_size >> PAGE_SHIFT;
r = -EINVAL;
if (npages > KVM_MEM_MAX_NR_PAGES)
goto out;
if (!npages)
mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
new = old = *memslot;
new.id = mem->slot;
new.base_gfn = base_gfn;
new.npages = npages;
new.flags = mem->flags;
/* Disallow changing a memory slot's size. */
r = -EINVAL;
if (npages && old.npages && npages != old.npages)
goto out_free;
/* Check for overlaps */
r = -EEXIST;
for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
struct kvm_memory_slot *s = &kvm->memslots->memslots[i];
if (s == memslot || !s->npages)
continue;
if (!((base_gfn + npages <= s->base_gfn) ||
(base_gfn >= s->base_gfn + s->npages)))
goto out_free;
}
/* Free page dirty bitmap if unneeded */
if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
new.dirty_bitmap = NULL;
r = -ENOMEM;
/* Allocate if a slot is being created */
#ifndef CONFIG_S390
if (npages && !new.rmap) {
new.rmap = vzalloc(npages * sizeof(*new.rmap));
if (!new.rmap)
goto out_free;
new.user_alloc = user_alloc;
new.userspace_addr = mem->userspace_addr;
}
if (!npages)
goto skip_lpage;
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i) {
unsigned long ugfn;
unsigned long j;
int lpages;
int level = i + 2;
/* Avoid unused variable warning if no large pages */
(void)level;
if (new.lpage_info[i])
continue;
lpages = 1 + ((base_gfn + npages - 1)
>> KVM_HPAGE_GFN_SHIFT(level));
lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
new.lpage_info[i] = vzalloc(lpages * sizeof(*new.lpage_info[i]));
if (!new.lpage_info[i])
goto out_free;
if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
new.lpage_info[i][0].write_count = 1;
if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
new.lpage_info[i][lpages - 1].write_count = 1;
ugfn = new.userspace_addr >> PAGE_SHIFT;
/*
* If the gfn and userspace address are not aligned wrt each
* other, or if explicitly asked to, disable large page
* support for this slot
*/
if ((base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
!largepages_enabled)
for (j = 0; j < lpages; ++j)
new.lpage_info[i][j].write_count = 1;
}
skip_lpage:
/* Allocate page dirty bitmap if needed */
if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
if (kvm_create_dirty_bitmap(&new) < 0)
goto out_free;
/* destroy any largepage mappings for dirty tracking */
}
#else /* not defined CONFIG_S390 */
new.user_alloc = user_alloc;
if (user_alloc)
new.userspace_addr = mem->userspace_addr;
#endif /* not defined CONFIG_S390 */
if (!npages) {
struct kvm_memory_slot *slot;
r = -ENOMEM;
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
if (!slots)
goto out_free;
slot = id_to_memslot(slots, mem->slot);
slot->flags |= KVM_MEMSLOT_INVALID;
update_memslots(slots, NULL);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
/* From this point no new shadow pages pointing to a deleted
* memslot will be created.
*
* validation of sp->gfn happens in:
* - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
* - kvm_is_visible_gfn (mmu_check_roots)
*/
kvm_arch_flush_shadow(kvm);
kfree(old_memslots);
}
r = kvm_arch_prepare_memory_region(kvm, &new, old, mem, user_alloc);
if (r)
goto out_free;
/* map/unmap the pages in iommu page table */
if (npages) {
r = kvm_iommu_map_pages(kvm, &new);
if (r)
goto out_free;
} else
kvm_iommu_unmap_pages(kvm, &old);
r = -ENOMEM;
slots = kmemdup(kvm->memslots, sizeof(struct kvm_memslots),
GFP_KERNEL);
if (!slots)
goto out_free;
/* actual memory is freed via old in kvm_free_physmem_slot below */
if (!npages) {
new.rmap = NULL;
new.dirty_bitmap = NULL;
for (i = 0; i < KVM_NR_PAGE_SIZES - 1; ++i)
new.lpage_info[i] = NULL;
}
update_memslots(slots, &new);
old_memslots = kvm->memslots;
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
kvm_arch_commit_memory_region(kvm, mem, old, user_alloc);
/*
* If the new memory slot is created, we need to clear all
* mmio sptes.
*/
if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT)
kvm_arch_flush_shadow(kvm);
kvm_free_physmem_slot(&old, &new);
kfree(old_memslots);
return 0;
out_free:
kvm_free_physmem_slot(&new, &old);
out:
return r;
}
| @@ -1720,6 +1720,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
goto vcpu_destroy;
mutex_lock(&kvm->lock);
+ if (!kvm_vcpu_compatible(vcpu)) {
+ r = -EINVAL;
+ goto unlock_vcpu_destroy;
+ }
if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
r = -EINVAL;
goto unlock_vcpu_destroy; | CWE-399 | null | null |
16,722 | static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int young, idx;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
young = kvm_age_hva(kvm, address);
if (young)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
return young;
}
| DoS | 0 | static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int young, idx;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
young = kvm_age_hva(kvm, address);
if (young)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
return young;
}
| @@ -1720,6 +1720,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
goto vcpu_destroy;
mutex_lock(&kvm->lock);
+ if (!kvm_vcpu_compatible(vcpu)) {
+ r = -EINVAL;
+ goto unlock_vcpu_destroy;
+ }
if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
r = -EINVAL;
goto unlock_vcpu_destroy; | CWE-399 | null | null |
16,723 | static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush, idx;
/*
* When ->invalidate_page runs, the linux pte has been zapped
* already but the page is still allocated until
* ->invalidate_page returns. So if we increase the sequence
* here the kvm page fault will notice if the spte can't be
* established because the page is going to be freed. If
* instead the kvm page fault establishes the spte before
* ->invalidate_page runs, kvm_unmap_hva will release it
* before returning.
*
* The sequence increase only need to be seen at spin_unlock
* time, and not at spin_lock time.
*
* Increasing the sequence after the spin_unlock would be
* unsafe because the kvm page fault could then establish the
* pte after kvm_unmap_hva returned, without noticing the page
* is going to be freed.
*/
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
| DoS | 0 | static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush, idx;
/*
* When ->invalidate_page runs, the linux pte has been zapped
* already but the page is still allocated until
* ->invalidate_page returns. So if we increase the sequence
* here the kvm page fault will notice if the spte can't be
* established because the page is going to be freed. If
* instead the kvm page fault establishes the spte before
* ->invalidate_page runs, kvm_unmap_hva will release it
* before returning.
*
* The sequence increase only need to be seen at spin_unlock
* time, and not at spin_lock time.
*
* Increasing the sequence after the spin_unlock would be
* unsafe because the kvm page fault could then establish the
* pte after kvm_unmap_hva returned, without noticing the page
* is going to be freed.
*/
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
kvm->mmu_notifier_seq++;
need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
| @@ -1720,6 +1720,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
goto vcpu_destroy;
mutex_lock(&kvm->lock);
+ if (!kvm_vcpu_compatible(vcpu)) {
+ r = -EINVAL;
+ goto unlock_vcpu_destroy;
+ }
if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
r = -EINVAL;
goto unlock_vcpu_destroy; | CWE-399 | null | null |
16,724 | static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush = 0, idx;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
/*
* The count increase must become visible at unlock time as no
* spte can be established without taking the mmu_lock and
* count is also read inside the mmu_lock critical section.
*/
kvm->mmu_notifier_count++;
for (; start < end; start += PAGE_SIZE)
need_tlb_flush |= kvm_unmap_hva(kvm, start);
need_tlb_flush |= kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
| DoS | 0 | static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush = 0, idx;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
/*
* The count increase must become visible at unlock time as no
* spte can be established without taking the mmu_lock and
* count is also read inside the mmu_lock critical section.
*/
kvm->mmu_notifier_count++;
for (; start < end; start += PAGE_SIZE)
need_tlb_flush |= kvm_unmap_hva(kvm, start);
need_tlb_flush |= kvm->tlbs_dirty;
/* we've to flush the tlb before the pages can be freed */
if (need_tlb_flush)
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
}
| @@ -1720,6 +1720,10 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
goto vcpu_destroy;
mutex_lock(&kvm->lock);
+ if (!kvm_vcpu_compatible(vcpu)) {
+ r = -EINVAL;
+ goto unlock_vcpu_destroy;
+ }
if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
r = -EINVAL;
goto unlock_vcpu_destroy; | CWE-399 | null | null |
16,725 | __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
{
struct xfrm6_tunnel_spi *x6spi;
u32 spi;
write_lock_bh(&xfrm6_tunnel_spi_lock);
x6spi = __xfrm6_tunnel_spi_lookup(saddr);
if (x6spi) {
atomic_inc(&x6spi->refcnt);
spi = x6spi->spi;
} else
spi = __xfrm6_tunnel_alloc_spi(saddr);
write_unlock_bh(&xfrm6_tunnel_spi_lock);
return htonl(spi);
}
| DoS | 0 | __be32 xfrm6_tunnel_alloc_spi(xfrm_address_t *saddr)
{
struct xfrm6_tunnel_spi *x6spi;
u32 spi;
write_lock_bh(&xfrm6_tunnel_spi_lock);
x6spi = __xfrm6_tunnel_spi_lookup(saddr);
if (x6spi) {
atomic_inc(&x6spi->refcnt);
spi = x6spi->spi;
} else
spi = __xfrm6_tunnel_alloc_spi(saddr);
write_unlock_bh(&xfrm6_tunnel_spi_lock);
return htonl(spi);
}
| @@ -261,7 +261,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
__be32 spi;
spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
- return xfrm6_rcv_spi(skb, spi);
+ return xfrm6_rcv_spi(skb, spi) > 0 ? : 0;
}
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | CWE-399 | null | null |
16,726 | static void __exit xfrm6_tunnel_fini(void)
{
xfrm6_tunnel_spi_fini();
xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
}
| DoS | 0 | static void __exit xfrm6_tunnel_fini(void)
{
xfrm6_tunnel_spi_fini();
xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
}
| @@ -261,7 +261,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
__be32 spi;
spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
- return xfrm6_rcv_spi(skb, spi);
+ return xfrm6_rcv_spi(skb, spi) > 0 ? : 0;
}
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | CWE-399 | null | null |
16,727 | void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
{
struct xfrm6_tunnel_spi *x6spi;
struct hlist_node *pos, *n;
write_lock_bh(&xfrm6_tunnel_spi_lock);
hlist_for_each_entry_safe(x6spi, pos, n,
&xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
list_byaddr)
{
if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
if (atomic_dec_and_test(&x6spi->refcnt)) {
hlist_del(&x6spi->list_byaddr);
hlist_del(&x6spi->list_byspi);
kmem_cache_free(xfrm6_tunnel_spi_kmem, x6spi);
break;
}
}
}
write_unlock_bh(&xfrm6_tunnel_spi_lock);
}
| DoS | 0 | void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
{
struct xfrm6_tunnel_spi *x6spi;
struct hlist_node *pos, *n;
write_lock_bh(&xfrm6_tunnel_spi_lock);
hlist_for_each_entry_safe(x6spi, pos, n,
&xfrm6_tunnel_spi_byaddr[xfrm6_tunnel_spi_hash_byaddr(saddr)],
list_byaddr)
{
if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
if (atomic_dec_and_test(&x6spi->refcnt)) {
hlist_del(&x6spi->list_byaddr);
hlist_del(&x6spi->list_byspi);
kmem_cache_free(xfrm6_tunnel_spi_kmem, x6spi);
break;
}
}
}
write_unlock_bh(&xfrm6_tunnel_spi_lock);
}
| @@ -261,7 +261,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
__be32 spi;
spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
- return xfrm6_rcv_spi(skb, spi);
+ return xfrm6_rcv_spi(skb, spi) > 0 ? : 0;
}
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | CWE-399 | null | null |
16,728 | static int __init xfrm6_tunnel_init(void)
{
if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0)
return -EAGAIN;
if (xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6)) {
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
return -EAGAIN;
}
if (xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET)) {
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
return -EAGAIN;
}
if (xfrm6_tunnel_spi_init() < 0) {
xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
return -EAGAIN;
}
return 0;
}
| DoS | 0 | static int __init xfrm6_tunnel_init(void)
{
if (xfrm_register_type(&xfrm6_tunnel_type, AF_INET6) < 0)
return -EAGAIN;
if (xfrm6_tunnel_register(&xfrm6_tunnel_handler, AF_INET6)) {
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
return -EAGAIN;
}
if (xfrm6_tunnel_register(&xfrm46_tunnel_handler, AF_INET)) {
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
return -EAGAIN;
}
if (xfrm6_tunnel_spi_init() < 0) {
xfrm6_tunnel_deregister(&xfrm46_tunnel_handler, AF_INET);
xfrm6_tunnel_deregister(&xfrm6_tunnel_handler, AF_INET6);
xfrm_unregister_type(&xfrm6_tunnel_type, AF_INET6);
return -EAGAIN;
}
return 0;
}
| @@ -261,7 +261,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
__be32 spi;
spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
- return xfrm6_rcv_spi(skb, spi);
+ return xfrm6_rcv_spi(skb, spi) > 0 ? : 0;
}
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | CWE-399 | null | null |
16,729 | static int xfrm6_tunnel_init_state(struct xfrm_state *x)
{
if (x->props.mode != XFRM_MODE_TUNNEL)
return -EINVAL;
if (x->encap)
return -EINVAL;
x->props.header_len = sizeof(struct ipv6hdr);
return 0;
}
| DoS | 0 | static int xfrm6_tunnel_init_state(struct xfrm_state *x)
{
if (x->props.mode != XFRM_MODE_TUNNEL)
return -EINVAL;
if (x->encap)
return -EINVAL;
x->props.header_len = sizeof(struct ipv6hdr);
return 0;
}
| @@ -261,7 +261,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
__be32 spi;
spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
- return xfrm6_rcv_spi(skb, spi);
+ return xfrm6_rcv_spi(skb, spi) > 0 ? : 0;
}
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | CWE-399 | null | null |
16,730 | static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
{
return 0;
}
| DoS | 0 | static int xfrm6_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
{
return 0;
}
| @@ -261,7 +261,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
__be32 spi;
spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
- return xfrm6_rcv_spi(skb, spi);
+ return xfrm6_rcv_spi(skb, spi) > 0 ? : 0;
}
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | CWE-399 | null | null |
16,731 | static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
{
struct ipv6hdr *top_iph;
top_iph = (struct ipv6hdr *)skb->data;
top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
return 0;
}
| DoS | 0 | static int xfrm6_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
{
struct ipv6hdr *top_iph;
top_iph = (struct ipv6hdr *)skb->data;
top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
return 0;
}
| @@ -261,7 +261,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
__be32 spi;
spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
- return xfrm6_rcv_spi(skb, spi);
+ return xfrm6_rcv_spi(skb, spi) > 0 ? : 0;
}
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | CWE-399 | null | null |
16,732 | static void xfrm6_tunnel_spi_fini(void)
{
int i;
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
return;
}
for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
return;
}
kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
xfrm6_tunnel_spi_kmem = NULL;
}
| DoS | 0 | static void xfrm6_tunnel_spi_fini(void)
{
int i;
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) {
if (!hlist_empty(&xfrm6_tunnel_spi_byaddr[i]))
return;
}
for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++) {
if (!hlist_empty(&xfrm6_tunnel_spi_byspi[i]))
return;
}
kmem_cache_destroy(xfrm6_tunnel_spi_kmem);
xfrm6_tunnel_spi_kmem = NULL;
}
| @@ -261,7 +261,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
__be32 spi;
spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
- return xfrm6_rcv_spi(skb, spi);
+ return xfrm6_rcv_spi(skb, spi) > 0 ? : 0;
}
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | CWE-399 | null | null |
16,733 | static int xfrm6_tunnel_spi_init(void)
{
int i;
xfrm6_tunnel_spi = 0;
xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
sizeof(struct xfrm6_tunnel_spi),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
if (!xfrm6_tunnel_spi_kmem)
return -ENOMEM;
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byspi[i]);
return 0;
}
| DoS | 0 | static int xfrm6_tunnel_spi_init(void)
{
int i;
xfrm6_tunnel_spi = 0;
xfrm6_tunnel_spi_kmem = kmem_cache_create("xfrm6_tunnel_spi",
sizeof(struct xfrm6_tunnel_spi),
0, SLAB_HWCACHE_ALIGN,
NULL, NULL);
if (!xfrm6_tunnel_spi_kmem)
return -ENOMEM;
for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++)
INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byaddr[i]);
for (i = 0; i < XFRM6_TUNNEL_SPI_BYSPI_HSIZE; i++)
INIT_HLIST_HEAD(&xfrm6_tunnel_spi_byspi[i]);
return 0;
}
| @@ -261,7 +261,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
__be32 spi;
spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
- return xfrm6_rcv_spi(skb, spi);
+ return xfrm6_rcv_spi(skb, spi) > 0 ? : 0;
}
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | CWE-399 | null | null |
16,734 | __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
{
struct xfrm6_tunnel_spi *x6spi;
u32 spi;
read_lock_bh(&xfrm6_tunnel_spi_lock);
x6spi = __xfrm6_tunnel_spi_lookup(saddr);
spi = x6spi ? x6spi->spi : 0;
read_unlock_bh(&xfrm6_tunnel_spi_lock);
return htonl(spi);
}
| DoS | 0 | __be32 xfrm6_tunnel_spi_lookup(xfrm_address_t *saddr)
{
struct xfrm6_tunnel_spi *x6spi;
u32 spi;
read_lock_bh(&xfrm6_tunnel_spi_lock);
x6spi = __xfrm6_tunnel_spi_lookup(saddr);
spi = x6spi ? x6spi->spi : 0;
read_unlock_bh(&xfrm6_tunnel_spi_lock);
return htonl(spi);
}
| @@ -261,7 +261,7 @@ static int xfrm6_tunnel_rcv(struct sk_buff *skb)
__be32 spi;
spi = xfrm6_tunnel_spi_lookup((xfrm_address_t *)&iph->saddr);
- return xfrm6_rcv_spi(skb, spi);
+ return xfrm6_rcv_spi(skb, spi) > 0 ? : 0;
}
static int xfrm6_tunnel_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | CWE-399 | null | null |
16,735 | cdf_dump_property_info(const cdf_property_info_t *info, size_t count)
{
cdf_timestamp_t tp;
struct timespec ts;
char buf[64];
size_t i, j;
for (i = 0; i < count; i++) {
cdf_print_property_name(buf, sizeof(buf), info[i].pi_id);
(void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf);
switch (info[i].pi_type) {
case CDF_NULL:
break;
case CDF_SIGNED16:
(void)fprintf(stderr, "signed 16 [%hd]\n",
info[i].pi_s16);
break;
case CDF_SIGNED32:
(void)fprintf(stderr, "signed 32 [%d]\n",
info[i].pi_s32);
break;
case CDF_UNSIGNED32:
(void)fprintf(stderr, "unsigned 32 [%u]\n",
info[i].pi_u32);
break;
case CDF_FLOAT:
(void)fprintf(stderr, "float [%g]\n",
info[i].pi_f);
break;
case CDF_DOUBLE:
(void)fprintf(stderr, "double [%g]\n",
info[i].pi_d);
break;
case CDF_LENGTH32_STRING:
(void)fprintf(stderr, "string %u [%.*s]\n",
info[i].pi_str.s_len,
info[i].pi_str.s_len, info[i].pi_str.s_buf);
break;
case CDF_LENGTH32_WSTRING:
(void)fprintf(stderr, "string %u [",
info[i].pi_str.s_len);
for (j = 0; j < info[i].pi_str.s_len - 1; j++)
(void)fputc(info[i].pi_str.s_buf[j << 1], stderr);
(void)fprintf(stderr, "]\n");
break;
case CDF_FILETIME:
tp = info[i].pi_tp;
if (tp < 1000000000000000LL) {
cdf_print_elapsed_time(buf, sizeof(buf), tp);
(void)fprintf(stderr, "timestamp %s\n", buf);
} else {
cdf_timestamp_to_timespec(&ts, tp);
(void)fprintf(stderr, "timestamp %s",
cdf_ctime(&ts.tv_sec));
}
break;
case CDF_CLIPBOARD:
(void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32);
break;
default:
DPRINTF(("Don't know how to deal with %x\n",
info[i].pi_type));
break;
}
}
}
| DoS Overflow | 0 | cdf_dump_property_info(const cdf_property_info_t *info, size_t count)
{
cdf_timestamp_t tp;
struct timespec ts;
char buf[64];
size_t i, j;
for (i = 0; i < count; i++) {
cdf_print_property_name(buf, sizeof(buf), info[i].pi_id);
(void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf);
switch (info[i].pi_type) {
case CDF_NULL:
break;
case CDF_SIGNED16:
(void)fprintf(stderr, "signed 16 [%hd]\n",
info[i].pi_s16);
break;
case CDF_SIGNED32:
(void)fprintf(stderr, "signed 32 [%d]\n",
info[i].pi_s32);
break;
case CDF_UNSIGNED32:
(void)fprintf(stderr, "unsigned 32 [%u]\n",
info[i].pi_u32);
break;
case CDF_FLOAT:
(void)fprintf(stderr, "float [%g]\n",
info[i].pi_f);
break;
case CDF_DOUBLE:
(void)fprintf(stderr, "double [%g]\n",
info[i].pi_d);
break;
case CDF_LENGTH32_STRING:
(void)fprintf(stderr, "string %u [%.*s]\n",
info[i].pi_str.s_len,
info[i].pi_str.s_len, info[i].pi_str.s_buf);
break;
case CDF_LENGTH32_WSTRING:
(void)fprintf(stderr, "string %u [",
info[i].pi_str.s_len);
for (j = 0; j < info[i].pi_str.s_len - 1; j++)
(void)fputc(info[i].pi_str.s_buf[j << 1], stderr);
(void)fprintf(stderr, "]\n");
break;
case CDF_FILETIME:
tp = info[i].pi_tp;
if (tp < 1000000000000000LL) {
cdf_print_elapsed_time(buf, sizeof(buf), tp);
(void)fprintf(stderr, "timestamp %s\n", buf);
} else {
cdf_timestamp_to_timespec(&ts, tp);
(void)fprintf(stderr, "timestamp %s",
cdf_ctime(&ts.tv_sec));
}
break;
case CDF_CLIPBOARD:
(void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32);
break;
default:
DPRINTF(("Don't know how to deal with %x\n",
info[i].pi_type));
break;
}
}
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,736 | cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size)
{
size_t i, j, s = size / sizeof(cdf_secid_t);
for (i = 0; i < sat->sat_len; i++) {
(void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6"
SIZE_T_FORMAT "u: ", prefix, i, i * s);
for (j = 0; j < s; j++) {
(void)fprintf(stderr, "%5d, ",
CDF_TOLE4(sat->sat_tab[s * i + j]));
if ((j + 1) % 10 == 0)
(void)fprintf(stderr, "\n%.6" SIZE_T_FORMAT
"u: ", i * s + j + 1);
}
(void)fprintf(stderr, "\n");
}
}
| DoS Overflow | 0 | cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size)
{
size_t i, j, s = size / sizeof(cdf_secid_t);
for (i = 0; i < sat->sat_len; i++) {
(void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6"
SIZE_T_FORMAT "u: ", prefix, i, i * s);
for (j = 0; j < s; j++) {
(void)fprintf(stderr, "%5d, ",
CDF_TOLE4(sat->sat_tab[s * i + j]));
if ((j + 1) % 10 == 0)
(void)fprintf(stderr, "\n%.6" SIZE_T_FORMAT
"u: ", i * s + j + 1);
}
(void)fprintf(stderr, "\n");
}
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,737 | cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst)
{
size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ?
CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h);
cdf_dump(sst->sst_tab, ss * sst->sst_len);
}
| DoS Overflow | 0 | cdf_dump_stream(const cdf_header_t *h, const cdf_stream_t *sst)
{
size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ?
CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h);
cdf_dump(sst->sst_tab, ss * sst->sst_len);
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,738 | cdf_getuint32(const uint8_t *p, size_t offs)
{
uint32_t rv;
(void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv));
return CDF_TOLE4(rv);
}
| DoS Overflow | 0 | cdf_getuint32(const uint8_t *p, size_t offs)
{
uint32_t rv;
(void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv));
return CDF_TOLE4(rv);
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,739 | cdf_namecmp(const char *d, const uint16_t *s, size_t l)
{
for (; l--; d++, s++)
if (*d != CDF_TOLE2(*s))
return (unsigned char)*d - CDF_TOLE2(*s);
return 0;
}
| DoS Overflow | 0 | cdf_namecmp(const char *d, const uint16_t *s, size_t l)
{
for (; l--; d++, s++)
if (*d != CDF_TOLE2(*s))
return (unsigned char)*d - CDF_TOLE2(*s);
return 0;
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,740 | cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id)
{
return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-"
"%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0],
id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0],
id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4],
id->cl_six[5]);
}
| DoS Overflow | 0 | cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id)
{
return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-"
"%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0],
id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0],
id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4],
id->cl_six[5]);
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,741 | cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts)
{
int len = 0;
int days, hours, mins, secs;
ts /= CDF_TIME_PREC;
secs = (int)(ts % 60);
ts /= 60;
mins = (int)(ts % 60);
ts /= 60;
hours = (int)(ts % 24);
ts /= 24;
days = (int)ts;
if (days) {
len += snprintf(buf + len, bufsiz - len, "%dd+", days);
if ((size_t)len >= bufsiz)
return len;
}
if (days || hours) {
len += snprintf(buf + len, bufsiz - len, "%.2d:", hours);
if ((size_t)len >= bufsiz)
return len;
}
len += snprintf(buf + len, bufsiz - len, "%.2d:", mins);
if ((size_t)len >= bufsiz)
return len;
len += snprintf(buf + len, bufsiz - len, "%.2d", secs);
return len;
}
| DoS Overflow | 0 | cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts)
{
int len = 0;
int days, hours, mins, secs;
ts /= CDF_TIME_PREC;
secs = (int)(ts % 60);
ts /= 60;
mins = (int)(ts % 60);
ts /= 60;
hours = (int)(ts % 24);
ts /= 24;
days = (int)ts;
if (days) {
len += snprintf(buf + len, bufsiz - len, "%dd+", days);
if ((size_t)len >= bufsiz)
return len;
}
if (days || hours) {
len += snprintf(buf + len, bufsiz - len, "%.2d:", hours);
if ((size_t)len >= bufsiz)
return len;
}
len += snprintf(buf + len, bufsiz - len, "%.2d:", mins);
if ((size_t)len >= bufsiz)
return len;
len += snprintf(buf + len, bufsiz - len, "%.2d", secs);
return len;
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,742 | cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_dir_t *dir)
{
size_t i, j;
size_t ss = CDF_SEC_SIZE(h), ns, nd;
char *buf;
cdf_secid_t sid = h->h_secid_first_directory;
ns = cdf_count_chain(sat, sid, ss);
if (ns == (size_t)-1)
return -1;
nd = ss / CDF_DIRECTORY_SIZE;
dir->dir_len = ns * nd;
dir->dir_tab = CAST(cdf_directory_t *,
calloc(dir->dir_len, sizeof(dir->dir_tab[0])));
if (dir->dir_tab == NULL)
return -1;
if ((buf = CAST(char *, malloc(ss))) == NULL) {
free(dir->dir_tab);
return -1;
}
for (j = i = 0; i < ns; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read dir loop limit"));
errno = EFTYPE;
goto out;
}
if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) {
DPRINTF(("Reading directory sector %d", sid));
goto out;
}
for (j = 0; j < nd; j++) {
cdf_unpack_dir(&dir->dir_tab[i * nd + j],
&buf[j * CDF_DIRECTORY_SIZE]);
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
if (NEED_SWAP)
for (i = 0; i < dir->dir_len; i++)
cdf_swap_dir(&dir->dir_tab[i]);
free(buf);
return 0;
out:
free(dir->dir_tab);
free(buf);
return -1;
}
| DoS Overflow | 0 | cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_dir_t *dir)
{
size_t i, j;
size_t ss = CDF_SEC_SIZE(h), ns, nd;
char *buf;
cdf_secid_t sid = h->h_secid_first_directory;
ns = cdf_count_chain(sat, sid, ss);
if (ns == (size_t)-1)
return -1;
nd = ss / CDF_DIRECTORY_SIZE;
dir->dir_len = ns * nd;
dir->dir_tab = CAST(cdf_directory_t *,
calloc(dir->dir_len, sizeof(dir->dir_tab[0])));
if (dir->dir_tab == NULL)
return -1;
if ((buf = CAST(char *, malloc(ss))) == NULL) {
free(dir->dir_tab);
return -1;
}
for (j = i = 0; i < ns; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read dir loop limit"));
errno = EFTYPE;
goto out;
}
if (cdf_read_sector(info, buf, 0, ss, h, sid) != (ssize_t)ss) {
DPRINTF(("Reading directory sector %d", sid));
goto out;
}
for (j = 0; j < nd; j++) {
cdf_unpack_dir(&dir->dir_tab[i * nd + j],
&buf[j * CDF_DIRECTORY_SIZE]);
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
if (NEED_SWAP)
for (i = 0; i < dir->dir_len; i++)
cdf_swap_dir(&dir->dir_tab[i]);
free(buf);
return 0;
out:
free(dir->dir_tab);
free(buf);
return -1;
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,743 | cdf_read_header(const cdf_info_t *info, cdf_header_t *h)
{
char buf[512];
(void)memcpy(cdf_bo.s, "\01\02\03\04", 4);
if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1)
return -1;
cdf_unpack_header(h, buf);
cdf_swap_header(h);
if (h->h_magic != CDF_MAGIC) {
DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%"
INT64_T_FORMAT "x\n",
(unsigned long long)h->h_magic,
(unsigned long long)CDF_MAGIC));
goto out;
}
if (h->h_sec_size_p2 > 20) {
DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2));
goto out;
}
if (h->h_short_sec_size_p2 > 20) {
DPRINTF(("Bad short sector size 0x%u\n",
h->h_short_sec_size_p2));
goto out;
}
return 0;
out:
errno = EFTYPE;
return -1;
}
| DoS Overflow | 0 | cdf_read_header(const cdf_info_t *info, cdf_header_t *h)
{
char buf[512];
(void)memcpy(cdf_bo.s, "\01\02\03\04", 4);
if (cdf_read(info, (off_t)0, buf, sizeof(buf)) == -1)
return -1;
cdf_unpack_header(h, buf);
cdf_swap_header(h);
if (h->h_magic != CDF_MAGIC) {
DPRINTF(("Bad magic 0x%" INT64_T_FORMAT "x != 0x%"
INT64_T_FORMAT "x\n",
(unsigned long long)h->h_magic,
(unsigned long long)CDF_MAGIC));
goto out;
}
if (h->h_sec_size_p2 > 20) {
DPRINTF(("Bad sector size 0x%u\n", h->h_sec_size_p2));
goto out;
}
if (h->h_short_sec_size_p2 > 20) {
DPRINTF(("Bad short sector size 0x%u\n",
h->h_short_sec_size_p2));
goto out;
}
return 0;
out:
errno = EFTYPE;
return -1;
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,744 | cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len,
const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SEC_SIZE(h);
size_t pos = CDF_SEC_POS(h, id);
assert(ss == len);
return cdf_read(info, (off_t)pos, ((char *)buf) + offs, len);
}
| DoS Overflow | 0 | cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len,
const cdf_header_t *h, cdf_secid_t id)
{
size_t ss = CDF_SEC_SIZE(h);
size_t pos = CDF_SEC_POS(h, id);
assert(ss == len);
return cdf_read(info, (off_t)pos, ((char *)buf) + offs, len);
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,745 | cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL)
return cdf_read_short_sector_chain(h, ssat, sst, sid, len,
scn);
else
return cdf_read_long_sector_chain(info, h, sat, sid, len, scn);
}
| DoS Overflow | 0 | cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL)
return cdf_read_short_sector_chain(h, ssat, sst, sid, len,
scn);
else
return cdf_read_long_sector_chain(info, h, sat, sid, len, scn);
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,746 | cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_sat_t *ssat)
{
size_t i, j;
size_t ss = CDF_SEC_SIZE(h);
cdf_secid_t sid = h->h_secid_first_sector_in_short_sat;
ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h));
if (ssat->sat_len == (size_t)-1)
return -1;
ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss));
if (ssat->sat_tab == NULL)
return -1;
for (j = i = 0; sid >= 0; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read short sat sector loop limit"));
errno = EFTYPE;
goto out;
}
if (i >= ssat->sat_len) {
DPRINTF(("Out of bounds reading short sector chain "
"%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i,
ssat->sat_len));
errno = EFTYPE;
goto out;
}
if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) !=
(ssize_t)ss) {
DPRINTF(("Reading short sat sector %d", sid));
goto out;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
return 0;
out:
free(ssat->sat_tab);
return -1;
}
| DoS Overflow | 0 | cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, cdf_sat_t *ssat)
{
size_t i, j;
size_t ss = CDF_SEC_SIZE(h);
cdf_secid_t sid = h->h_secid_first_sector_in_short_sat;
ssat->sat_len = cdf_count_chain(sat, sid, CDF_SEC_SIZE(h));
if (ssat->sat_len == (size_t)-1)
return -1;
ssat->sat_tab = CAST(cdf_secid_t *, calloc(ssat->sat_len, ss));
if (ssat->sat_tab == NULL)
return -1;
for (j = i = 0; sid >= 0; i++, j++) {
if (j >= CDF_LOOP_LIMIT) {
DPRINTF(("Read short sat sector loop limit"));
errno = EFTYPE;
goto out;
}
if (i >= ssat->sat_len) {
DPRINTF(("Out of bounds reading short sector chain "
"%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i,
ssat->sat_len));
errno = EFTYPE;
goto out;
}
if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) !=
(ssize_t)ss) {
DPRINTF(("Reading short sat sector %d", sid));
goto out;
}
sid = CDF_TOLE4((uint32_t)sat->sat_tab[sid]);
}
return 0;
out:
free(ssat->sat_tab);
return -1;
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,747 | cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
const cdf_dir_t *dir, cdf_stream_t *scn)
{
size_t i;
const cdf_directory_t *d;
static const char name[] = "\05SummaryInformation";
for (i = dir->dir_len; i > 0; i--)
if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM &&
cdf_namecmp(name, dir->dir_tab[i - 1].d_name, sizeof(name))
== 0)
break;
if (i == 0) {
DPRINTF(("Cannot find summary information section\n"));
errno = ESRCH;
return -1;
}
d = &dir->dir_tab[i - 1];
return cdf_read_sector_chain(info, h, sat, ssat, sst,
d->d_stream_first_sector, d->d_size, scn);
}
| DoS Overflow | 0 | cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h,
const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
const cdf_dir_t *dir, cdf_stream_t *scn)
{
size_t i;
const cdf_directory_t *d;
static const char name[] = "\05SummaryInformation";
for (i = dir->dir_len; i > 0; i--)
if (dir->dir_tab[i - 1].d_type == CDF_DIR_TYPE_USER_STREAM &&
cdf_namecmp(name, dir->dir_tab[i - 1].d_name, sizeof(name))
== 0)
break;
if (i == 0) {
DPRINTF(("Cannot find summary information section\n"));
errno = ESRCH;
return -1;
}
d = &dir->dir_tab[i - 1];
return cdf_read_sector_chain(info, h, sat, ssat, sst,
d->d_stream_first_sector, d->d_size, scn);
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,748 | cdf_swap_dir(cdf_directory_t *d)
{
d->d_namelen = CDF_TOLE2(d->d_namelen);
d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child);
d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child);
d->d_storage = CDF_TOLE4((uint32_t)d->d_storage);
d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]);
d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]);
d->d_flags = CDF_TOLE4(d->d_flags);
d->d_created = CDF_TOLE8((uint64_t)d->d_created);
d->d_modified = CDF_TOLE8((uint64_t)d->d_modified);
d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector);
d->d_size = CDF_TOLE4(d->d_size);
}
| DoS Overflow | 0 | cdf_swap_dir(cdf_directory_t *d)
{
d->d_namelen = CDF_TOLE2(d->d_namelen);
d->d_left_child = CDF_TOLE4((uint32_t)d->d_left_child);
d->d_right_child = CDF_TOLE4((uint32_t)d->d_right_child);
d->d_storage = CDF_TOLE4((uint32_t)d->d_storage);
d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]);
d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]);
d->d_flags = CDF_TOLE4(d->d_flags);
d->d_created = CDF_TOLE8((uint64_t)d->d_created);
d->d_modified = CDF_TOLE8((uint64_t)d->d_modified);
d->d_stream_first_sector = CDF_TOLE4((uint32_t)d->d_stream_first_sector);
d->d_size = CDF_TOLE4(d->d_size);
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,749 | cdf_tole2(uint16_t sv)
{
return CDF_TOLE2(sv);
}
| DoS Overflow | 0 | cdf_tole2(uint16_t sv)
{
return CDF_TOLE2(sv);
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,750 | cdf_tole8(uint64_t sv)
{
return CDF_TOLE8(sv);
}
| DoS Overflow | 0 | cdf_tole8(uint64_t sv)
{
return CDF_TOLE8(sv);
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,751 | cdf_unpack_dir(cdf_directory_t *d, char *buf)
{
size_t len = 0;
CDF_UNPACKA(d->d_name);
CDF_UNPACK(d->d_namelen);
CDF_UNPACK(d->d_type);
CDF_UNPACK(d->d_color);
CDF_UNPACK(d->d_left_child);
CDF_UNPACK(d->d_right_child);
CDF_UNPACK(d->d_storage);
CDF_UNPACKA(d->d_storage_uuid);
CDF_UNPACK(d->d_flags);
CDF_UNPACK(d->d_created);
CDF_UNPACK(d->d_modified);
CDF_UNPACK(d->d_stream_first_sector);
CDF_UNPACK(d->d_size);
CDF_UNPACK(d->d_unused0);
}
| DoS Overflow | 0 | cdf_unpack_dir(cdf_directory_t *d, char *buf)
{
size_t len = 0;
CDF_UNPACKA(d->d_name);
CDF_UNPACK(d->d_namelen);
CDF_UNPACK(d->d_type);
CDF_UNPACK(d->d_color);
CDF_UNPACK(d->d_left_child);
CDF_UNPACK(d->d_right_child);
CDF_UNPACK(d->d_storage);
CDF_UNPACKA(d->d_storage_uuid);
CDF_UNPACK(d->d_flags);
CDF_UNPACK(d->d_created);
CDF_UNPACK(d->d_modified);
CDF_UNPACK(d->d_stream_first_sector);
CDF_UNPACK(d->d_size);
CDF_UNPACK(d->d_unused0);
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,752 | cdf_unpack_header(cdf_header_t *h, char *buf)
{
size_t i;
size_t len = 0;
CDF_UNPACK(h->h_magic);
CDF_UNPACKA(h->h_uuid);
CDF_UNPACK(h->h_revision);
CDF_UNPACK(h->h_version);
CDF_UNPACK(h->h_byte_order);
CDF_UNPACK(h->h_sec_size_p2);
CDF_UNPACK(h->h_short_sec_size_p2);
CDF_UNPACKA(h->h_unused0);
CDF_UNPACK(h->h_num_sectors_in_sat);
CDF_UNPACK(h->h_secid_first_directory);
CDF_UNPACKA(h->h_unused1);
CDF_UNPACK(h->h_min_size_standard_stream);
CDF_UNPACK(h->h_secid_first_sector_in_short_sat);
CDF_UNPACK(h->h_num_sectors_in_short_sat);
CDF_UNPACK(h->h_secid_first_sector_in_master_sat);
CDF_UNPACK(h->h_num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++)
CDF_UNPACK(h->h_master_sat[i]);
}
| DoS Overflow | 0 | cdf_unpack_header(cdf_header_t *h, char *buf)
{
size_t i;
size_t len = 0;
CDF_UNPACK(h->h_magic);
CDF_UNPACKA(h->h_uuid);
CDF_UNPACK(h->h_revision);
CDF_UNPACK(h->h_version);
CDF_UNPACK(h->h_byte_order);
CDF_UNPACK(h->h_sec_size_p2);
CDF_UNPACK(h->h_short_sec_size_p2);
CDF_UNPACKA(h->h_unused0);
CDF_UNPACK(h->h_num_sectors_in_sat);
CDF_UNPACK(h->h_secid_first_directory);
CDF_UNPACKA(h->h_unused1);
CDF_UNPACK(h->h_min_size_standard_stream);
CDF_UNPACK(h->h_secid_first_sector_in_short_sat);
CDF_UNPACK(h->h_num_sectors_in_short_sat);
CDF_UNPACK(h->h_secid_first_sector_in_master_sat);
CDF_UNPACK(h->h_num_sectors_in_master_sat);
for (i = 0; i < __arraycount(h->h_master_sat); i++)
CDF_UNPACK(h->h_master_sat[i]);
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,753 | cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h,
cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count)
{
size_t i, maxcount;
const cdf_summary_info_header_t *si =
CAST(const cdf_summary_info_header_t *, sst->sst_tab);
const cdf_section_declaration_t *sd =
CAST(const cdf_section_declaration_t *, (const void *)
((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET));
if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 ||
cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1)
return -1;
ssi->si_byte_order = CDF_TOLE2(si->si_byte_order);
ssi->si_os_version = CDF_TOLE2(si->si_os_version);
ssi->si_os = CDF_TOLE2(si->si_os);
ssi->si_class = si->si_class;
cdf_swap_class(&ssi->si_class);
ssi->si_count = CDF_TOLE2(si->si_count);
*count = 0;
maxcount = 0;
*info = NULL;
for (i = 0; i < CDF_TOLE4(si->si_count); i++) {
if (i >= CDF_LOOP_LIMIT) {
DPRINTF(("Unpack summary info loop limit"));
errno = EFTYPE;
return -1;
}
if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset),
info, count, &maxcount) == -1) {
return -1;
}
}
return 0;
}
| DoS Overflow | 0 | cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h,
cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count)
{
size_t i, maxcount;
const cdf_summary_info_header_t *si =
CAST(const cdf_summary_info_header_t *, sst->sst_tab);
const cdf_section_declaration_t *sd =
CAST(const cdf_section_declaration_t *, (const void *)
((const char *)sst->sst_tab + CDF_SECTION_DECLARATION_OFFSET));
if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 ||
cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1)
return -1;
ssi->si_byte_order = CDF_TOLE2(si->si_byte_order);
ssi->si_os_version = CDF_TOLE2(si->si_os_version);
ssi->si_os = CDF_TOLE2(si->si_os);
ssi->si_class = si->si_class;
cdf_swap_class(&ssi->si_class);
ssi->si_count = CDF_TOLE2(si->si_count);
*count = 0;
maxcount = 0;
*info = NULL;
for (i = 0; i < CDF_TOLE4(si->si_count); i++) {
if (i >= CDF_LOOP_LIMIT) {
DPRINTF(("Unpack summary info loop limit"));
errno = EFTYPE;
return -1;
}
if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset),
info, count, &maxcount) == -1) {
return -1;
}
}
return 0;
}
| @@ -35,7 +35,7 @@
#include "file.h"
#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.48 2012/02/17 05:27:45 christos Exp $")
+FILE_RCSID("@(#)$File: cdf.c,v 1.49 2012/02/20 20:04:37 christos Exp $")
#endif
#include <assert.h>
@@ -75,6 +75,7 @@ static union {
#define CDF_TOLE2(x) ((uint16_t)(NEED_SWAP ? _cdf_tole2(x) : (uint16_t)(x)))
#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
+
/*
* swap a short
*/
@@ -351,13 +352,13 @@ ssize_t
cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
size_t len, const cdf_header_t *h, cdf_secid_t id)
{
- size_t ss = CDF_SEC_SIZE(h);
+ size_t ss = CDF_SHORT_SEC_SIZE(h);
size_t pos = CDF_SHORT_SEC_POS(h, id);
assert(ss == len);
- if (pos > ss * sst->sst_len) {
+ if (pos > CDF_SEC_SIZE(h) * sst->sst_len) {
DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
SIZE_T_FORMAT "u\n",
- pos, ss * sst->sst_len));
+ pos, CDF_SEC_SIZE(h) * sst->sst_len));
return -1;
}
(void)memcpy(((char *)buf) + offs,
@@ -529,7 +530,7 @@ cdf_read_short_sector_chain(const cdf_header_t *h,
const cdf_sat_t *ssat, const cdf_stream_t *sst,
cdf_secid_t sid, size_t len, cdf_stream_t *scn)
{
- size_t ss = CDF_SEC_SIZE(h), i, j;
+ size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
scn->sst_dirlen = len;
@@ -798,18 +799,18 @@ cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
if (cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
goto out;
for (i = 0; i < sh.sh_properties; i++) {
+ size_t ofs = CDF_GETUINT32(p, (i << 1) + 1);
q = (const uint8_t *)(const void *)
- ((const char *)(const void *)p +
- CDF_GETUINT32(p, (i << 1) + 1)) - 2 * sizeof(uint32_t);
+ ((const char *)(const void *)p + ofs
+ - 2 * sizeof(uint32_t));
if (q > e) {
DPRINTF(("Ran of the end %p > %p\n", q, e));
goto out;
}
inp[i].pi_id = CDF_GETUINT32(p, i << 1);
inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n", i,
- inp[i].pi_id, inp[i].pi_type, q - p,
- CDF_GETUINT32(p, (i << 1) + 1)));
+ DPRINTF(("%" SIZE_T_FORMAT "u) id=%x type=%x offs=0x%tx,0x%x\n",
+ i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
if (inp[i].pi_type & CDF_VECTOR) {
nelements = CDF_GETUINT32(q, 1);
o = 2; | CWE-119 | null | null |
16,754 | static inline void clear_AC(struct kernel_vm86_regs *regs)
{
regs->pt.flags &= ~X86_EFLAGS_AC;
}
| DoS | 0 | static inline void clear_AC(struct kernel_vm86_regs *regs)
{
regs->pt.flags &= ~X86_EFLAGS_AC;
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,755 | static inline void clear_TF(struct kernel_vm86_regs *regs)
{
regs->pt.flags &= ~X86_EFLAGS_TF;
}
| DoS | 0 | static inline void clear_TF(struct kernel_vm86_regs *regs)
{
regs->pt.flags &= ~X86_EFLAGS_TF;
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,756 | static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
const struct vm86_regs __user *user,
unsigned extra)
{
int ret = 0;
/* copy ax-fs inclusive */
ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax));
/* copy orig_ax-__gsh+extra */
ret += copy_from_user(®s->pt.orig_ax, &user->orig_eax,
sizeof(struct kernel_vm86_regs) -
offsetof(struct kernel_vm86_regs, pt.orig_ax) +
extra);
return ret;
}
| DoS | 0 | static int copy_vm86_regs_from_user(struct kernel_vm86_regs *regs,
const struct vm86_regs __user *user,
unsigned extra)
{
int ret = 0;
/* copy ax-fs inclusive */
ret += copy_from_user(regs, user, offsetof(struct kernel_vm86_regs, pt.orig_ax));
/* copy orig_ax-__gsh+extra */
ret += copy_from_user(®s->pt.orig_ax, &user->orig_eax,
sizeof(struct kernel_vm86_regs) -
offsetof(struct kernel_vm86_regs, pt.orig_ax) +
extra);
return ret;
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,757 | static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
const struct kernel_vm86_regs *regs)
{
int ret = 0;
/*
* kernel_vm86_regs is missing gs, so copy everything up to
* (but not including) orig_eax, and then rest including orig_eax.
*/
ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax));
ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax,
sizeof(struct kernel_vm86_regs) -
offsetof(struct kernel_vm86_regs, pt.orig_ax));
return ret;
}
| DoS | 0 | static int copy_vm86_regs_to_user(struct vm86_regs __user *user,
const struct kernel_vm86_regs *regs)
{
int ret = 0;
/*
* kernel_vm86_regs is missing gs, so copy everything up to
* (but not including) orig_eax, and then rest including orig_eax.
*/
ret += copy_to_user(user, regs, offsetof(struct kernel_vm86_regs, pt.orig_ax));
ret += copy_to_user(&user->orig_eax, ®s->pt.orig_ax,
sizeof(struct kernel_vm86_regs) -
offsetof(struct kernel_vm86_regs, pt.orig_ax));
return ret;
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,758 | static void do_int(struct kernel_vm86_regs *regs, int i,
unsigned char __user *ssp, unsigned short sp)
{
unsigned long __user *intr_ptr;
unsigned long segoffs;
if (regs->pt.cs == BIOSSEG)
goto cannot_handle;
if (is_revectored(i, &KVM86->int_revectored))
goto cannot_handle;
if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
goto cannot_handle;
intr_ptr = (unsigned long __user *) (i << 2);
if (get_user(segoffs, intr_ptr))
goto cannot_handle;
if ((segoffs >> 16) == BIOSSEG)
goto cannot_handle;
pushw(ssp, sp, get_vflags(regs), cannot_handle);
pushw(ssp, sp, regs->pt.cs, cannot_handle);
pushw(ssp, sp, IP(regs), cannot_handle);
regs->pt.cs = segoffs >> 16;
SP(regs) -= 6;
IP(regs) = segoffs & 0xffff;
clear_TF(regs);
clear_IF(regs);
clear_AC(regs);
return;
cannot_handle:
return_to_32bit(regs, VM86_INTx + (i << 8));
}
| DoS | 0 | static void do_int(struct kernel_vm86_regs *regs, int i,
unsigned char __user *ssp, unsigned short sp)
{
unsigned long __user *intr_ptr;
unsigned long segoffs;
if (regs->pt.cs == BIOSSEG)
goto cannot_handle;
if (is_revectored(i, &KVM86->int_revectored))
goto cannot_handle;
if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
goto cannot_handle;
intr_ptr = (unsigned long __user *) (i << 2);
if (get_user(segoffs, intr_ptr))
goto cannot_handle;
if ((segoffs >> 16) == BIOSSEG)
goto cannot_handle;
pushw(ssp, sp, get_vflags(regs), cannot_handle);
pushw(ssp, sp, regs->pt.cs, cannot_handle);
pushw(ssp, sp, IP(regs), cannot_handle);
regs->pt.cs = segoffs >> 16;
SP(regs) -= 6;
IP(regs) = segoffs & 0xffff;
clear_TF(regs);
clear_IF(regs);
clear_AC(regs);
return;
cannot_handle:
return_to_32bit(regs, VM86_INTx + (i << 8));
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,759 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
{
struct tss_struct *tss;
/*
* make sure the vm86() system call doesn't try to do anything silly
*/
info->regs.pt.ds = 0;
info->regs.pt.es = 0;
info->regs.pt.fs = 0;
#ifndef CONFIG_X86_32_LAZY_GS
info->regs.pt.gs = 0;
#endif
/*
* The flags register is also special: we cannot trust that the user
* has set it up safely, so this makes sure interrupt etc flags are
* inherited from protected mode.
*/
VEFLAGS = info->regs.pt.flags;
info->regs.pt.flags &= SAFE_MASK;
info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
info->regs.pt.flags |= X86_VM_MASK;
switch (info->cpu_type) {
case CPU_286:
tsk->thread.v86mask = 0;
break;
case CPU_386:
tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
case CPU_486:
tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
default:
tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
}
/*
* Save old state, set default return value (%ax) to 0 (VM86_SIGNAL)
*/
info->regs32->ax = VM86_SIGNAL;
tsk->thread.saved_sp0 = tsk->thread.sp0;
tsk->thread.saved_fs = info->regs32->fs;
tsk->thread.saved_gs = get_user_gs(info->regs32);
tss = &per_cpu(init_tss, get_cpu());
tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
if (cpu_has_sep)
tsk->thread.sysenter_cs = 0;
load_sp0(tss, &tsk->thread);
put_cpu();
tsk->thread.screen_bitmap = info->screen_bitmap;
if (info->flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk->mm);
/*call __audit_syscall_exit since we do not exit via the normal paths */
#ifdef CONFIG_AUDITSYSCALL
if (unlikely(current->audit_context))
__audit_syscall_exit(1, 0);
#endif
__asm__ __volatile__(
"movl %0,%%esp\n\t"
"movl %1,%%ebp\n\t"
#ifdef CONFIG_X86_32_LAZY_GS
"mov %2, %%gs\n\t"
#endif
"jmp resume_userspace"
: /* no outputs */
:"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
/* we never return here */
}
| DoS | 0 | static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
{
struct tss_struct *tss;
/*
* make sure the vm86() system call doesn't try to do anything silly
*/
info->regs.pt.ds = 0;
info->regs.pt.es = 0;
info->regs.pt.fs = 0;
#ifndef CONFIG_X86_32_LAZY_GS
info->regs.pt.gs = 0;
#endif
/*
* The flags register is also special: we cannot trust that the user
* has set it up safely, so this makes sure interrupt etc flags are
* inherited from protected mode.
*/
VEFLAGS = info->regs.pt.flags;
info->regs.pt.flags &= SAFE_MASK;
info->regs.pt.flags |= info->regs32->flags & ~SAFE_MASK;
info->regs.pt.flags |= X86_VM_MASK;
switch (info->cpu_type) {
case CPU_286:
tsk->thread.v86mask = 0;
break;
case CPU_386:
tsk->thread.v86mask = X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
case CPU_486:
tsk->thread.v86mask = X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
default:
tsk->thread.v86mask = X86_EFLAGS_ID | X86_EFLAGS_AC | X86_EFLAGS_NT | X86_EFLAGS_IOPL;
break;
}
/*
* Save old state, set default return value (%ax) to 0 (VM86_SIGNAL)
*/
info->regs32->ax = VM86_SIGNAL;
tsk->thread.saved_sp0 = tsk->thread.sp0;
tsk->thread.saved_fs = info->regs32->fs;
tsk->thread.saved_gs = get_user_gs(info->regs32);
tss = &per_cpu(init_tss, get_cpu());
tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
if (cpu_has_sep)
tsk->thread.sysenter_cs = 0;
load_sp0(tss, &tsk->thread);
put_cpu();
tsk->thread.screen_bitmap = info->screen_bitmap;
if (info->flags & VM86_SCREEN_BITMAP)
mark_screen_rdonly(tsk->mm);
/*call __audit_syscall_exit since we do not exit via the normal paths */
#ifdef CONFIG_AUDITSYSCALL
if (unlikely(current->audit_context))
__audit_syscall_exit(1, 0);
#endif
__asm__ __volatile__(
"movl %0,%%esp\n\t"
"movl %1,%%ebp\n\t"
#ifdef CONFIG_X86_32_LAZY_GS
"mov %2, %%gs\n\t"
#endif
"jmp resume_userspace"
: /* no outputs */
:"r" (&info->regs), "r" (task_thread_info(tsk)), "r" (0));
/* we never return here */
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,760 | static int do_vm86_irq_handling(int subfunction, int irqnumber)
{
int ret;
switch (subfunction) {
case VM86_GET_AND_RESET_IRQ: {
return get_and_reset_irq(irqnumber);
}
case VM86_GET_IRQ_BITS: {
return irqbits;
}
case VM86_REQUEST_IRQ: {
int sig = irqnumber >> 8;
int irq = irqnumber & 255;
if (!capable(CAP_SYS_ADMIN)) return -EPERM;
if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
if (invalid_vm86_irq(irq)) return -EPERM;
if (vm86_irqs[irq].tsk) return -EPERM;
ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
if (ret) return ret;
vm86_irqs[irq].sig = sig;
vm86_irqs[irq].tsk = current;
return irq;
}
case VM86_FREE_IRQ: {
if (invalid_vm86_irq(irqnumber)) return -EPERM;
if (!vm86_irqs[irqnumber].tsk) return 0;
if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
free_vm86_irq(irqnumber);
return 0;
}
}
return -EINVAL;
}
| DoS | 0 | static int do_vm86_irq_handling(int subfunction, int irqnumber)
{
int ret;
switch (subfunction) {
case VM86_GET_AND_RESET_IRQ: {
return get_and_reset_irq(irqnumber);
}
case VM86_GET_IRQ_BITS: {
return irqbits;
}
case VM86_REQUEST_IRQ: {
int sig = irqnumber >> 8;
int irq = irqnumber & 255;
if (!capable(CAP_SYS_ADMIN)) return -EPERM;
if (!((1 << sig) & ALLOWED_SIGS)) return -EPERM;
if (invalid_vm86_irq(irq)) return -EPERM;
if (vm86_irqs[irq].tsk) return -EPERM;
ret = request_irq(irq, &irq_handler, 0, VM86_IRQNAME, NULL);
if (ret) return ret;
vm86_irqs[irq].sig = sig;
vm86_irqs[irq].tsk = current;
return irq;
}
case VM86_FREE_IRQ: {
if (invalid_vm86_irq(irqnumber)) return -EPERM;
if (!vm86_irqs[irqnumber].tsk) return 0;
if (vm86_irqs[irqnumber].tsk != current) return -EPERM;
free_vm86_irq(irqnumber);
return 0;
}
}
return -EINVAL;
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,761 | static inline void free_vm86_irq(int irqnumber)
{
unsigned long flags;
free_irq(irqnumber, NULL);
vm86_irqs[irqnumber].tsk = NULL;
spin_lock_irqsave(&irqbits_lock, flags);
irqbits &= ~(1 << irqnumber);
spin_unlock_irqrestore(&irqbits_lock, flags);
}
| DoS | 0 | static inline void free_vm86_irq(int irqnumber)
{
unsigned long flags;
free_irq(irqnumber, NULL);
vm86_irqs[irqnumber].tsk = NULL;
spin_lock_irqsave(&irqbits_lock, flags);
irqbits &= ~(1 << irqnumber);
spin_unlock_irqrestore(&irqbits_lock, flags);
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,762 | static inline int get_and_reset_irq(int irqnumber)
{
int bit;
unsigned long flags;
int ret = 0;
if (invalid_vm86_irq(irqnumber)) return 0;
if (vm86_irqs[irqnumber].tsk != current) return 0;
spin_lock_irqsave(&irqbits_lock, flags);
bit = irqbits & (1 << irqnumber);
irqbits &= ~bit;
if (bit) {
enable_irq(irqnumber);
ret = 1;
}
spin_unlock_irqrestore(&irqbits_lock, flags);
return ret;
}
| DoS | 0 | static inline int get_and_reset_irq(int irqnumber)
{
int bit;
unsigned long flags;
int ret = 0;
if (invalid_vm86_irq(irqnumber)) return 0;
if (vm86_irqs[irqnumber].tsk != current) return 0;
spin_lock_irqsave(&irqbits_lock, flags);
bit = irqbits & (1 << irqnumber);
irqbits &= ~bit;
if (bit) {
enable_irq(irqnumber);
ret = 1;
}
spin_unlock_irqrestore(&irqbits_lock, flags);
return ret;
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,763 | static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
{
unsigned long flags = regs->pt.flags & RETURN_MASK;
if (VEFLAGS & X86_EFLAGS_VIF)
flags |= X86_EFLAGS_IF;
flags |= X86_EFLAGS_IOPL;
return flags | (VEFLAGS & current->thread.v86mask);
}
| DoS | 0 | static inline unsigned long get_vflags(struct kernel_vm86_regs *regs)
{
unsigned long flags = regs->pt.flags & RETURN_MASK;
if (VEFLAGS & X86_EFLAGS_VIF)
flags |= X86_EFLAGS_IF;
flags |= X86_EFLAGS_IOPL;
return flags | (VEFLAGS & current->thread.v86mask);
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,764 | void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
{
unsigned char opcode;
unsigned char __user *csp;
unsigned char __user *ssp;
unsigned short ip, sp, orig_flags;
int data32, pref_done;
#define CHECK_IF_IN_TRAP \
if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
newflags |= X86_EFLAGS_TF
#define VM86_FAULT_RETURN do { \
if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
return_to_32bit(regs, VM86_PICRETURN); \
if (orig_flags & X86_EFLAGS_TF) \
handle_vm86_trap(regs, 0, 1); \
return; } while (0)
orig_flags = *(unsigned short *)®s->pt.flags;
csp = (unsigned char __user *) (regs->pt.cs << 4);
ssp = (unsigned char __user *) (regs->pt.ss << 4);
sp = SP(regs);
ip = IP(regs);
data32 = 0;
pref_done = 0;
do {
switch (opcode = popb(csp, ip, simulate_sigsegv)) {
case 0x66: /* 32-bit data */ data32 = 1; break;
case 0x67: /* 32-bit address */ break;
case 0x2e: /* CS */ break;
case 0x3e: /* DS */ break;
case 0x26: /* ES */ break;
case 0x36: /* SS */ break;
case 0x65: /* GS */ break;
case 0x64: /* FS */ break;
case 0xf2: /* repnz */ break;
case 0xf3: /* rep */ break;
default: pref_done = 1;
}
} while (!pref_done);
switch (opcode) {
/* pushf */
case 0x9c:
if (data32) {
pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
SP(regs) -= 4;
} else {
pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
SP(regs) -= 2;
}
IP(regs) = ip;
VM86_FAULT_RETURN;
/* popf */
case 0x9d:
{
unsigned long newflags;
if (data32) {
newflags = popl(ssp, sp, simulate_sigsegv);
SP(regs) += 4;
} else {
newflags = popw(ssp, sp, simulate_sigsegv);
SP(regs) += 2;
}
IP(regs) = ip;
CHECK_IF_IN_TRAP;
if (data32)
set_vflags_long(newflags, regs);
else
set_vflags_short(newflags, regs);
VM86_FAULT_RETURN;
}
/* int xx */
case 0xcd: {
int intno = popb(csp, ip, simulate_sigsegv);
IP(regs) = ip;
if (VMPI.vm86dbg_active) {
if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3])
return_to_32bit(regs, VM86_INTx + (intno << 8));
}
do_int(regs, intno, ssp, sp);
return;
}
/* iret */
case 0xcf:
{
unsigned long newip;
unsigned long newcs;
unsigned long newflags;
if (data32) {
newip = popl(ssp, sp, simulate_sigsegv);
newcs = popl(ssp, sp, simulate_sigsegv);
newflags = popl(ssp, sp, simulate_sigsegv);
SP(regs) += 12;
} else {
newip = popw(ssp, sp, simulate_sigsegv);
newcs = popw(ssp, sp, simulate_sigsegv);
newflags = popw(ssp, sp, simulate_sigsegv);
SP(regs) += 6;
}
IP(regs) = newip;
regs->pt.cs = newcs;
CHECK_IF_IN_TRAP;
if (data32) {
set_vflags_long(newflags, regs);
} else {
set_vflags_short(newflags, regs);
}
VM86_FAULT_RETURN;
}
/* cli */
case 0xfa:
IP(regs) = ip;
clear_IF(regs);
VM86_FAULT_RETURN;
/* sti */
/*
* Damn. This is incorrect: the 'sti' instruction should actually
* enable interrupts after the /next/ instruction. Not good.
*
* Probably needs some horsing around with the TF flag. Aiee..
*/
case 0xfb:
IP(regs) = ip;
set_IF(regs);
VM86_FAULT_RETURN;
default:
return_to_32bit(regs, VM86_UNKNOWN);
}
return;
simulate_sigsegv:
/* FIXME: After a long discussion with Stas we finally
* agreed, that this is wrong. Here we should
* really send a SIGSEGV to the user program.
* But how do we create the correct context? We
* are inside a general protection fault handler
* and has just returned from a page fault handler.
* The correct context for the signal handler
* should be a mixture of the two, but how do we
* get the information? [KD]
*/
return_to_32bit(regs, VM86_UNKNOWN);
}
| DoS | 0 | void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
{
unsigned char opcode;
unsigned char __user *csp;
unsigned char __user *ssp;
unsigned short ip, sp, orig_flags;
int data32, pref_done;
#define CHECK_IF_IN_TRAP \
if (VMPI.vm86dbg_active && VMPI.vm86dbg_TFpendig) \
newflags |= X86_EFLAGS_TF
#define VM86_FAULT_RETURN do { \
if (VMPI.force_return_for_pic && (VEFLAGS & (X86_EFLAGS_IF | X86_EFLAGS_VIF))) \
return_to_32bit(regs, VM86_PICRETURN); \
if (orig_flags & X86_EFLAGS_TF) \
handle_vm86_trap(regs, 0, 1); \
return; } while (0)
orig_flags = *(unsigned short *)®s->pt.flags;
csp = (unsigned char __user *) (regs->pt.cs << 4);
ssp = (unsigned char __user *) (regs->pt.ss << 4);
sp = SP(regs);
ip = IP(regs);
data32 = 0;
pref_done = 0;
do {
switch (opcode = popb(csp, ip, simulate_sigsegv)) {
case 0x66: /* 32-bit data */ data32 = 1; break;
case 0x67: /* 32-bit address */ break;
case 0x2e: /* CS */ break;
case 0x3e: /* DS */ break;
case 0x26: /* ES */ break;
case 0x36: /* SS */ break;
case 0x65: /* GS */ break;
case 0x64: /* FS */ break;
case 0xf2: /* repnz */ break;
case 0xf3: /* rep */ break;
default: pref_done = 1;
}
} while (!pref_done);
switch (opcode) {
/* pushf */
case 0x9c:
if (data32) {
pushl(ssp, sp, get_vflags(regs), simulate_sigsegv);
SP(regs) -= 4;
} else {
pushw(ssp, sp, get_vflags(regs), simulate_sigsegv);
SP(regs) -= 2;
}
IP(regs) = ip;
VM86_FAULT_RETURN;
/* popf */
case 0x9d:
{
unsigned long newflags;
if (data32) {
newflags = popl(ssp, sp, simulate_sigsegv);
SP(regs) += 4;
} else {
newflags = popw(ssp, sp, simulate_sigsegv);
SP(regs) += 2;
}
IP(regs) = ip;
CHECK_IF_IN_TRAP;
if (data32)
set_vflags_long(newflags, regs);
else
set_vflags_short(newflags, regs);
VM86_FAULT_RETURN;
}
/* int xx */
case 0xcd: {
int intno = popb(csp, ip, simulate_sigsegv);
IP(regs) = ip;
if (VMPI.vm86dbg_active) {
if ((1 << (intno & 7)) & VMPI.vm86dbg_intxxtab[intno >> 3])
return_to_32bit(regs, VM86_INTx + (intno << 8));
}
do_int(regs, intno, ssp, sp);
return;
}
/* iret */
case 0xcf:
{
unsigned long newip;
unsigned long newcs;
unsigned long newflags;
if (data32) {
newip = popl(ssp, sp, simulate_sigsegv);
newcs = popl(ssp, sp, simulate_sigsegv);
newflags = popl(ssp, sp, simulate_sigsegv);
SP(regs) += 12;
} else {
newip = popw(ssp, sp, simulate_sigsegv);
newcs = popw(ssp, sp, simulate_sigsegv);
newflags = popw(ssp, sp, simulate_sigsegv);
SP(regs) += 6;
}
IP(regs) = newip;
regs->pt.cs = newcs;
CHECK_IF_IN_TRAP;
if (data32) {
set_vflags_long(newflags, regs);
} else {
set_vflags_short(newflags, regs);
}
VM86_FAULT_RETURN;
}
/* cli */
case 0xfa:
IP(regs) = ip;
clear_IF(regs);
VM86_FAULT_RETURN;
/* sti */
/*
* Damn. This is incorrect: the 'sti' instruction should actually
* enable interrupts after the /next/ instruction. Not good.
*
* Probably needs some horsing around with the TF flag. Aiee..
*/
case 0xfb:
IP(regs) = ip;
set_IF(regs);
VM86_FAULT_RETURN;
default:
return_to_32bit(regs, VM86_UNKNOWN);
}
return;
simulate_sigsegv:
/* FIXME: After a long discussion with Stas we finally
* agreed, that this is wrong. Here we should
* really send a SIGSEGV to the user program.
* But how do we create the correct context? We
* are inside a general protection fault handler
* and has just returned from a page fault handler.
* The correct context for the signal handler
* should be a mixture of the two, but how do we
* get the information? [KD]
*/
return_to_32bit(regs, VM86_UNKNOWN);
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,765 | int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
{
if (VMPI.is_vm86pus) {
if ((trapno == 3) || (trapno == 1)) {
KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
/* setting this flag forces the code in entry_32.S to
call save_v86_state() and change the stack pointer
to KVM86->regs32 */
set_thread_flag(TIF_IRET);
return 0;
}
do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
return 0;
}
if (trapno != 1)
return 1; /* we let this handle by the calling routine */
current->thread.trap_no = trapno;
current->thread.error_code = error_code;
force_sig(SIGTRAP, current);
return 0;
}
| DoS | 0 | int handle_vm86_trap(struct kernel_vm86_regs *regs, long error_code, int trapno)
{
if (VMPI.is_vm86pus) {
if ((trapno == 3) || (trapno == 1)) {
KVM86->regs32->ax = VM86_TRAP + (trapno << 8);
/* setting this flag forces the code in entry_32.S to
call save_v86_state() and change the stack pointer
to KVM86->regs32 */
set_thread_flag(TIF_IRET);
return 0;
}
do_int(regs, trapno, (unsigned char __user *) (regs->pt.ss << 4), SP(regs));
return 0;
}
if (trapno != 1)
return 1; /* we let this handle by the calling routine */
current->thread.trap_no = trapno;
current->thread.error_code = error_code;
force_sig(SIGTRAP, current);
return 0;
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,766 | static irqreturn_t irq_handler(int intno, void *dev_id)
{
int irq_bit;
unsigned long flags;
spin_lock_irqsave(&irqbits_lock, flags);
irq_bit = 1 << intno;
if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
goto out;
irqbits |= irq_bit;
if (vm86_irqs[intno].sig)
send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
/*
* IRQ will be re-enabled when user asks for the irq (whether
* polling or as a result of the signal)
*/
disable_irq_nosync(intno);
spin_unlock_irqrestore(&irqbits_lock, flags);
return IRQ_HANDLED;
out:
spin_unlock_irqrestore(&irqbits_lock, flags);
return IRQ_NONE;
}
| DoS | 0 | static irqreturn_t irq_handler(int intno, void *dev_id)
{
int irq_bit;
unsigned long flags;
spin_lock_irqsave(&irqbits_lock, flags);
irq_bit = 1 << intno;
if ((irqbits & irq_bit) || !vm86_irqs[intno].tsk)
goto out;
irqbits |= irq_bit;
if (vm86_irqs[intno].sig)
send_sig(vm86_irqs[intno].sig, vm86_irqs[intno].tsk, 1);
/*
* IRQ will be re-enabled when user asks for the irq (whether
* polling or as a result of the signal)
*/
disable_irq_nosync(intno);
spin_unlock_irqrestore(&irqbits_lock, flags);
return IRQ_HANDLED;
out:
spin_unlock_irqrestore(&irqbits_lock, flags);
return IRQ_NONE;
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,767 | static inline int is_revectored(int nr, struct revectored_struct *bitmap)
{
__asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
:"=r" (nr)
:"m" (*bitmap), "r" (nr));
return nr;
}
| DoS | 0 | static inline int is_revectored(int nr, struct revectored_struct *bitmap)
{
__asm__ __volatile__("btl %2,%1\n\tsbbl %0,%0"
:"=r" (nr)
:"m" (*bitmap), "r" (nr));
return nr;
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,768 | struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
{
struct tss_struct *tss;
struct pt_regs *ret;
unsigned long tmp;
/*
* This gets called from entry.S with interrupts disabled, but
* from process context. Enable interrupts here, before trying
* to access user space.
*/
local_irq_enable();
if (!current->thread.vm86_info) {
printk("no vm86_info: BAD\n");
do_exit(SIGSEGV);
}
set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask);
tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs, regs);
tmp += put_user(current->thread.screen_bitmap, ¤t->thread.vm86_info->screen_bitmap);
if (tmp) {
printk("vm86: could not access userspace vm86_info\n");
do_exit(SIGSEGV);
}
tss = &per_cpu(init_tss, get_cpu());
current->thread.sp0 = current->thread.saved_sp0;
current->thread.sysenter_cs = __KERNEL_CS;
load_sp0(tss, ¤t->thread);
current->thread.saved_sp0 = 0;
put_cpu();
ret = KVM86->regs32;
ret->fs = current->thread.saved_fs;
set_user_gs(ret, current->thread.saved_gs);
return ret;
}
| DoS | 0 | struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
{
struct tss_struct *tss;
struct pt_regs *ret;
unsigned long tmp;
/*
* This gets called from entry.S with interrupts disabled, but
* from process context. Enable interrupts here, before trying
* to access user space.
*/
local_irq_enable();
if (!current->thread.vm86_info) {
printk("no vm86_info: BAD\n");
do_exit(SIGSEGV);
}
set_flags(regs->pt.flags, VEFLAGS, X86_EFLAGS_VIF | current->thread.v86mask);
tmp = copy_vm86_regs_to_user(¤t->thread.vm86_info->regs, regs);
tmp += put_user(current->thread.screen_bitmap, ¤t->thread.vm86_info->screen_bitmap);
if (tmp) {
printk("vm86: could not access userspace vm86_info\n");
do_exit(SIGSEGV);
}
tss = &per_cpu(init_tss, get_cpu());
current->thread.sp0 = current->thread.saved_sp0;
current->thread.sysenter_cs = __KERNEL_CS;
load_sp0(tss, ¤t->thread);
current->thread.saved_sp0 = 0;
put_cpu();
ret = KVM86->regs32;
ret->fs = current->thread.saved_fs;
set_user_gs(ret, current->thread.saved_gs);
return ret;
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,769 | static inline void set_IF(struct kernel_vm86_regs *regs)
{
VEFLAGS |= X86_EFLAGS_VIF;
if (VEFLAGS & X86_EFLAGS_VIP)
return_to_32bit(regs, VM86_STI);
}
| DoS | 0 | static inline void set_IF(struct kernel_vm86_regs *regs)
{
VEFLAGS |= X86_EFLAGS_VIF;
if (VEFLAGS & X86_EFLAGS_VIP)
return_to_32bit(regs, VM86_STI);
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,770 | static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
{
set_flags(VEFLAGS, flags, current->thread.v86mask);
set_flags(regs->pt.flags, flags, SAFE_MASK);
if (flags & X86_EFLAGS_IF)
set_IF(regs);
else
clear_IF(regs);
}
| DoS | 0 | static inline void set_vflags_long(unsigned long flags, struct kernel_vm86_regs *regs)
{
set_flags(VEFLAGS, flags, current->thread.v86mask);
set_flags(regs->pt.flags, flags, SAFE_MASK);
if (flags & X86_EFLAGS_IF)
set_IF(regs);
else
clear_IF(regs);
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,771 | static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
{
set_flags(VFLAGS, flags, current->thread.v86mask);
set_flags(regs->pt.flags, flags, SAFE_MASK);
if (flags & X86_EFLAGS_IF)
set_IF(regs);
else
clear_IF(regs);
}
| DoS | 0 | static inline void set_vflags_short(unsigned short flags, struct kernel_vm86_regs *regs)
{
set_flags(VFLAGS, flags, current->thread.v86mask);
set_flags(regs->pt.flags, flags, SAFE_MASK);
if (flags & X86_EFLAGS_IF)
set_IF(regs);
else
clear_IF(regs);
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,772 | int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
{
struct kernel_vm86_struct info; /* declare this _on top_,
* this avoids wasting of stack space.
* This remains on the stack until we
* return to 32 bit user space.
*/
struct task_struct *tsk;
int tmp, ret;
struct vm86plus_struct __user *v86;
tsk = current;
switch (cmd) {
case VM86_REQUEST_IRQ:
case VM86_FREE_IRQ:
case VM86_GET_IRQ_BITS:
case VM86_GET_AND_RESET_IRQ:
ret = do_vm86_irq_handling(cmd, (int)arg);
goto out;
case VM86_PLUS_INSTALL_CHECK:
/*
* NOTE: on old vm86 stuff this will return the error
* from access_ok(), because the subfunction is
* interpreted as (invalid) address to vm86_struct.
* So the installation check works.
*/
ret = 0;
goto out;
}
/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
ret = -EPERM;
if (tsk->thread.saved_sp0)
goto out;
v86 = (struct vm86plus_struct __user *)arg;
tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
offsetof(struct kernel_vm86_struct, regs32) -
sizeof(info.regs));
ret = -EFAULT;
if (tmp)
goto out;
info.regs32 = regs;
info.vm86plus.is_vm86pus = 1;
tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
do_sys_vm86(&info, tsk);
ret = 0; /* we never return here */
out:
return ret;
}
| DoS | 0 | int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
{
struct kernel_vm86_struct info; /* declare this _on top_,
* this avoids wasting of stack space.
* This remains on the stack until we
* return to 32 bit user space.
*/
struct task_struct *tsk;
int tmp, ret;
struct vm86plus_struct __user *v86;
tsk = current;
switch (cmd) {
case VM86_REQUEST_IRQ:
case VM86_FREE_IRQ:
case VM86_GET_IRQ_BITS:
case VM86_GET_AND_RESET_IRQ:
ret = do_vm86_irq_handling(cmd, (int)arg);
goto out;
case VM86_PLUS_INSTALL_CHECK:
/*
* NOTE: on old vm86 stuff this will return the error
* from access_ok(), because the subfunction is
* interpreted as (invalid) address to vm86_struct.
* So the installation check works.
*/
ret = 0;
goto out;
}
/* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */
ret = -EPERM;
if (tsk->thread.saved_sp0)
goto out;
v86 = (struct vm86plus_struct __user *)arg;
tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
offsetof(struct kernel_vm86_struct, regs32) -
sizeof(info.regs));
ret = -EFAULT;
if (tmp)
goto out;
info.regs32 = regs;
info.vm86plus.is_vm86pus = 1;
tsk->thread.vm86_info = (struct vm86_struct __user *)v86;
do_sys_vm86(&info, tsk);
ret = 0; /* we never return here */
out:
return ret;
}
| @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
spinlock_t *ptl;
int i;
+ down_write(&mm->mmap_sem);
pgd = pgd_offset(mm, 0xA0000);
if (pgd_none_or_clear_bad(pgd))
goto out;
@@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm)
}
pte_unmap_unlock(pte, ptl);
out:
+ up_write(&mm->mmap_sem);
flush_tlb();
}
| CWE-264 | null | null |
16,773 | static int add_to_pagemap(unsigned long addr, u64 pfn,
struct pagemapread *pm)
{
pm->buffer[pm->pos++] = pfn;
if (pm->pos >= pm->len)
return PM_END_OF_BUFFER;
return 0;
}
| DoS | 0 | static int add_to_pagemap(unsigned long addr, u64 pfn,
struct pagemapread *pm)
{
pm->buffer[pm->pos++] = pfn;
if (pm->pos >= pm->len)
return PM_END_OF_BUFFER;
return 0;
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,774 | static ssize_t clear_refs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task;
char buffer[PROC_NUMBUF];
struct mm_struct *mm;
struct vm_area_struct *vma;
int type;
int rv;
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count))
return -EFAULT;
rv = kstrtoint(strstrip(buffer), 10, &type);
if (rv < 0)
return rv;
if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
return -EINVAL;
task = get_proc_task(file->f_path.dentry->d_inode);
if (!task)
return -ESRCH;
mm = get_task_mm(task);
if (mm) {
struct mm_walk clear_refs_walk = {
.pmd_entry = clear_refs_pte_range,
.mm = mm,
};
down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
clear_refs_walk.private = vma;
if (is_vm_hugetlb_page(vma))
continue;
/*
* Writing 1 to /proc/pid/clear_refs affects all pages.
*
* Writing 2 to /proc/pid/clear_refs only affects
* Anonymous pages.
*
* Writing 3 to /proc/pid/clear_refs only affects file
* mapped pages.
*/
if (type == CLEAR_REFS_ANON && vma->vm_file)
continue;
if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
continue;
walk_page_range(vma->vm_start, vma->vm_end,
&clear_refs_walk);
}
flush_tlb_mm(mm);
up_read(&mm->mmap_sem);
mmput(mm);
}
put_task_struct(task);
return count;
}
| DoS | 0 | static ssize_t clear_refs_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task;
char buffer[PROC_NUMBUF];
struct mm_struct *mm;
struct vm_area_struct *vma;
int type;
int rv;
memset(buffer, 0, sizeof(buffer));
if (count > sizeof(buffer) - 1)
count = sizeof(buffer) - 1;
if (copy_from_user(buffer, buf, count))
return -EFAULT;
rv = kstrtoint(strstrip(buffer), 10, &type);
if (rv < 0)
return rv;
if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED)
return -EINVAL;
task = get_proc_task(file->f_path.dentry->d_inode);
if (!task)
return -ESRCH;
mm = get_task_mm(task);
if (mm) {
struct mm_walk clear_refs_walk = {
.pmd_entry = clear_refs_pte_range,
.mm = mm,
};
down_read(&mm->mmap_sem);
for (vma = mm->mmap; vma; vma = vma->vm_next) {
clear_refs_walk.private = vma;
if (is_vm_hugetlb_page(vma))
continue;
/*
* Writing 1 to /proc/pid/clear_refs affects all pages.
*
* Writing 2 to /proc/pid/clear_refs only affects
* Anonymous pages.
*
* Writing 3 to /proc/pid/clear_refs only affects file
* mapped pages.
*/
if (type == CLEAR_REFS_ANON && vma->vm_file)
continue;
if (type == CLEAR_REFS_MAPPED && !vma->vm_file)
continue;
walk_page_range(vma->vm_start, vma->vm_end,
&clear_refs_walk);
}
flush_tlb_mm(mm);
up_read(&mm->mmap_sem);
mmput(mm);
}
put_task_struct(task);
return count;
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,775 | static int do_maps_open(struct inode *inode, struct file *file,
const struct seq_operations *ops)
{
struct proc_maps_private *priv;
int ret = -ENOMEM;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv) {
priv->pid = proc_pid(inode);
ret = seq_open(file, ops);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = priv;
} else {
kfree(priv);
}
}
return ret;
}
| DoS | 0 | static int do_maps_open(struct inode *inode, struct file *file,
const struct seq_operations *ops)
{
struct proc_maps_private *priv;
int ret = -ENOMEM;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv) {
priv->pid = proc_pid(inode);
ret = seq_open(file, ops);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = priv;
} else {
kfree(priv);
}
}
return ret;
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,776 | static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
struct numa_maps *md;
struct page *page;
if (pte_none(*pte))
return 0;
page = pte_page(*pte);
if (!page)
return 0;
md = walk->private;
gather_stats(page, md, pte_dirty(*pte), 1);
return 0;
}
| DoS | 0 | static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
struct numa_maps *md;
struct page *page;
if (pte_none(*pte))
return 0;
page = pte_page(*pte);
if (!page)
return 0;
md = walk->private;
gather_stats(page, md, pte_dirty(*pte), 1);
return 0;
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,777 | static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
return 0;
}
| DoS | 0 | static int gather_hugetbl_stats(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end, struct mm_walk *walk)
{
return 0;
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,778 | static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
unsigned long nr_pages)
{
int count = page_mapcount(page);
md->pages += nr_pages;
if (pte_dirty || PageDirty(page))
md->dirty += nr_pages;
if (PageSwapCache(page))
md->swapcache += nr_pages;
if (PageActive(page) || PageUnevictable(page))
md->active += nr_pages;
if (PageWriteback(page))
md->writeback += nr_pages;
if (PageAnon(page))
md->anon += nr_pages;
if (count > md->mapcount_max)
md->mapcount_max = count;
md->node[page_to_nid(page)] += nr_pages;
}
| DoS | 0 | static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
unsigned long nr_pages)
{
int count = page_mapcount(page);
md->pages += nr_pages;
if (pte_dirty || PageDirty(page))
md->dirty += nr_pages;
if (PageSwapCache(page))
md->swapcache += nr_pages;
if (PageActive(page) || PageUnevictable(page))
md->active += nr_pages;
if (PageWriteback(page))
md->writeback += nr_pages;
if (PageAnon(page))
md->anon += nr_pages;
if (count > md->mapcount_max)
md->mapcount_max = count;
md->node[page_to_nid(page)] += nr_pages;
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,779 | static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
struct proc_maps_private *priv = m->private;
struct vm_area_struct *vma = v;
struct vm_area_struct *tail_vma = priv->tail_vma;
(*pos)++;
if (vma && (vma != tail_vma) && vma->vm_next)
return vma->vm_next;
vma_stop(priv, vma);
return (vma != tail_vma)? tail_vma: NULL;
}
| DoS | 0 | static void *m_next(struct seq_file *m, void *v, loff_t *pos)
{
struct proc_maps_private *priv = m->private;
struct vm_area_struct *vma = v;
struct vm_area_struct *tail_vma = priv->tail_vma;
(*pos)++;
if (vma && (vma != tail_vma) && vma->vm_next)
return vma->vm_next;
vma_stop(priv, vma);
return (vma != tail_vma)? tail_vma: NULL;
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,780 | static void *m_start(struct seq_file *m, loff_t *pos)
{
struct proc_maps_private *priv = m->private;
unsigned long last_addr = m->version;
struct mm_struct *mm;
struct vm_area_struct *vma, *tail_vma = NULL;
loff_t l = *pos;
/* Clear the per syscall fields in priv */
priv->task = NULL;
priv->tail_vma = NULL;
/*
* We remember last_addr rather than next_addr to hit with
* mmap_cache most of the time. We have zero last_addr at
* the beginning and also after lseek. We will have -1 last_addr
* after the end of the vmas.
*/
if (last_addr == -1UL)
return NULL;
priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
if (!priv->task)
return ERR_PTR(-ESRCH);
mm = mm_for_maps(priv->task);
if (!mm || IS_ERR(mm))
return mm;
down_read(&mm->mmap_sem);
tail_vma = get_gate_vma(priv->task->mm);
priv->tail_vma = tail_vma;
/* Start with last addr hint */
vma = find_vma(mm, last_addr);
if (last_addr && vma) {
vma = vma->vm_next;
goto out;
}
/*
* Check the vma index is within the range and do
* sequential scan until m_index.
*/
vma = NULL;
if ((unsigned long)l < mm->map_count) {
vma = mm->mmap;
while (l-- && vma)
vma = vma->vm_next;
goto out;
}
if (l != mm->map_count)
tail_vma = NULL; /* After gate vma */
out:
if (vma)
return vma;
/* End of vmas has been reached */
m->version = (tail_vma != NULL)? 0: -1UL;
up_read(&mm->mmap_sem);
mmput(mm);
return tail_vma;
}
| DoS | 0 | static void *m_start(struct seq_file *m, loff_t *pos)
{
struct proc_maps_private *priv = m->private;
unsigned long last_addr = m->version;
struct mm_struct *mm;
struct vm_area_struct *vma, *tail_vma = NULL;
loff_t l = *pos;
/* Clear the per syscall fields in priv */
priv->task = NULL;
priv->tail_vma = NULL;
/*
* We remember last_addr rather than next_addr to hit with
* mmap_cache most of the time. We have zero last_addr at
* the beginning and also after lseek. We will have -1 last_addr
* after the end of the vmas.
*/
if (last_addr == -1UL)
return NULL;
priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
if (!priv->task)
return ERR_PTR(-ESRCH);
mm = mm_for_maps(priv->task);
if (!mm || IS_ERR(mm))
return mm;
down_read(&mm->mmap_sem);
tail_vma = get_gate_vma(priv->task->mm);
priv->tail_vma = tail_vma;
/* Start with last addr hint */
vma = find_vma(mm, last_addr);
if (last_addr && vma) {
vma = vma->vm_next;
goto out;
}
/*
* Check the vma index is within the range and do
* sequential scan until m_index.
*/
vma = NULL;
if ((unsigned long)l < mm->map_count) {
vma = mm->mmap;
while (l-- && vma)
vma = vma->vm_next;
goto out;
}
if (l != mm->map_count)
tail_vma = NULL; /* After gate vma */
out:
if (vma)
return vma;
/* End of vmas has been reached */
m->version = (tail_vma != NULL)? 0: -1UL;
up_read(&mm->mmap_sem);
mmput(mm);
return tail_vma;
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,781 | static void m_stop(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
struct vm_area_struct *vma = v;
if (!IS_ERR(vma))
vma_stop(priv, vma);
if (priv->task)
put_task_struct(priv->task);
}
| DoS | 0 | static void m_stop(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
struct vm_area_struct *vma = v;
if (!IS_ERR(vma))
vma_stop(priv, vma);
if (priv->task)
put_task_struct(priv->task);
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,782 | static int maps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_pid_maps_op);
}
| DoS | 0 | static int maps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_pid_maps_op);
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,783 | static int numa_maps_open(struct inode *inode, struct file *file)
{
struct numa_maps_private *priv;
int ret = -ENOMEM;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv) {
priv->proc_maps.pid = proc_pid(inode);
ret = seq_open(file, &proc_pid_numa_maps_op);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = priv;
} else {
kfree(priv);
}
}
return ret;
}
| DoS | 0 | static int numa_maps_open(struct inode *inode, struct file *file)
{
struct numa_maps_private *priv;
int ret = -ENOMEM;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (priv) {
priv->proc_maps.pid = proc_pid(inode);
ret = seq_open(file, &proc_pid_numa_maps_op);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = priv;
} else {
kfree(priv);
}
}
return ret;
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,784 | static void pad_len_spaces(struct seq_file *m, int len)
{
len = 25 + sizeof(void*) * 6 - len;
if (len < 1)
len = 1;
seq_printf(m, "%*c", len, ' ');
}
| DoS | 0 | static void pad_len_spaces(struct seq_file *m, int len)
{
len = 25 + sizeof(void*) * 6 - len;
if (len < 1)
len = 1;
seq_printf(m, "%*c", len, ' ');
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,785 | static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
int err = 0;
u64 pfn;
for (; addr != end; addr += PAGE_SIZE) {
int offset = (addr & ~hmask) >> PAGE_SHIFT;
pfn = huge_pte_to_pagemap_entry(*pte, offset);
err = add_to_pagemap(addr, pfn, pm);
if (err)
return err;
}
cond_resched();
return err;
}
| DoS | 0 | static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long end,
struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
int err = 0;
u64 pfn;
for (; addr != end; addr += PAGE_SIZE) {
int offset = (addr & ~hmask) >> PAGE_SHIFT;
pfn = huge_pte_to_pagemap_entry(*pte, offset);
err = add_to_pagemap(addr, pfn, pm);
if (err)
return err;
}
cond_resched();
return err;
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,786 | static int pagemap_pte_hole(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
unsigned long addr;
int err = 0;
for (addr = start; addr < end; addr += PAGE_SIZE) {
err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
if (err)
break;
}
return err;
}
| DoS | 0 | static int pagemap_pte_hole(unsigned long start, unsigned long end,
struct mm_walk *walk)
{
struct pagemapread *pm = walk->private;
unsigned long addr;
int err = 0;
for (addr = start; addr < end; addr += PAGE_SIZE) {
err = add_to_pagemap(addr, PM_NOT_PRESENT, pm);
if (err)
break;
}
return err;
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,787 | static int show_map(struct seq_file *m, void *v)
{
struct vm_area_struct *vma = v;
struct proc_maps_private *priv = m->private;
struct task_struct *task = priv->task;
show_map_vma(m, vma);
if (m->count < m->size) /* vma is copied successfully */
m->version = (vma != get_gate_vma(task->mm))
? vma->vm_start : 0;
return 0;
}
| DoS | 0 | static int show_map(struct seq_file *m, void *v)
{
struct vm_area_struct *vma = v;
struct proc_maps_private *priv = m->private;
struct task_struct *task = priv->task;
show_map_vma(m, vma);
if (m->count < m->size) /* vma is copied successfully */
m->version = (vma != get_gate_vma(task->mm))
? vma->vm_start : 0;
return 0;
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,788 | static int show_smap(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
struct task_struct *task = priv->task;
struct vm_area_struct *vma = v;
struct mem_size_stats mss;
struct mm_walk smaps_walk = {
.pmd_entry = smaps_pte_range,
.mm = vma->vm_mm,
.private = &mss,
};
memset(&mss, 0, sizeof mss);
mss.vma = vma;
/* mmap_sem is held in m_start */
if (vma->vm_mm && !is_vm_hugetlb_page(vma))
walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
show_map_vma(m, vma);
seq_printf(m,
"Size: %8lu kB\n"
"Rss: %8lu kB\n"
"Pss: %8lu kB\n"
"Shared_Clean: %8lu kB\n"
"Shared_Dirty: %8lu kB\n"
"Private_Clean: %8lu kB\n"
"Private_Dirty: %8lu kB\n"
"Referenced: %8lu kB\n"
"Anonymous: %8lu kB\n"
"AnonHugePages: %8lu kB\n"
"Swap: %8lu kB\n"
"KernelPageSize: %8lu kB\n"
"MMUPageSize: %8lu kB\n"
"Locked: %8lu kB\n",
(vma->vm_end - vma->vm_start) >> 10,
mss.resident >> 10,
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
mss.shared_clean >> 10,
mss.shared_dirty >> 10,
mss.private_clean >> 10,
mss.private_dirty >> 10,
mss.referenced >> 10,
mss.anonymous >> 10,
mss.anonymous_thp >> 10,
mss.swap >> 10,
vma_kernel_pagesize(vma) >> 10,
vma_mmu_pagesize(vma) >> 10,
(vma->vm_flags & VM_LOCKED) ?
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
if (m->count < m->size) /* vma is copied successfully */
m->version = (vma != get_gate_vma(task->mm))
? vma->vm_start : 0;
return 0;
}
| DoS | 0 | static int show_smap(struct seq_file *m, void *v)
{
struct proc_maps_private *priv = m->private;
struct task_struct *task = priv->task;
struct vm_area_struct *vma = v;
struct mem_size_stats mss;
struct mm_walk smaps_walk = {
.pmd_entry = smaps_pte_range,
.mm = vma->vm_mm,
.private = &mss,
};
memset(&mss, 0, sizeof mss);
mss.vma = vma;
/* mmap_sem is held in m_start */
if (vma->vm_mm && !is_vm_hugetlb_page(vma))
walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
show_map_vma(m, vma);
seq_printf(m,
"Size: %8lu kB\n"
"Rss: %8lu kB\n"
"Pss: %8lu kB\n"
"Shared_Clean: %8lu kB\n"
"Shared_Dirty: %8lu kB\n"
"Private_Clean: %8lu kB\n"
"Private_Dirty: %8lu kB\n"
"Referenced: %8lu kB\n"
"Anonymous: %8lu kB\n"
"AnonHugePages: %8lu kB\n"
"Swap: %8lu kB\n"
"KernelPageSize: %8lu kB\n"
"MMUPageSize: %8lu kB\n"
"Locked: %8lu kB\n",
(vma->vm_end - vma->vm_start) >> 10,
mss.resident >> 10,
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
mss.shared_clean >> 10,
mss.shared_dirty >> 10,
mss.private_clean >> 10,
mss.private_dirty >> 10,
mss.referenced >> 10,
mss.anonymous >> 10,
mss.anonymous_thp >> 10,
mss.swap >> 10,
vma_kernel_pagesize(vma) >> 10,
vma_mmu_pagesize(vma) >> 10,
(vma->vm_flags & VM_LOCKED) ?
(unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
if (m->count < m->size) /* vma is copied successfully */
m->version = (vma != get_gate_vma(task->mm))
? vma->vm_start : 0;
return 0;
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,789 | static int smaps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_pid_smaps_op);
}
| DoS | 0 | static int smaps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_pid_smaps_op);
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,790 | static u64 swap_pte_to_pagemap_entry(pte_t pte)
{
swp_entry_t e = pte_to_swp_entry(pte);
return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
}
| DoS | 0 | static u64 swap_pte_to_pagemap_entry(pte_t pte)
{
swp_entry_t e = pte_to_swp_entry(pte);
return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,791 | void task_mem(struct seq_file *m, struct mm_struct *mm)
{
unsigned long data, text, lib, swap;
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
/*
* Note: to minimize their overhead, mm maintains hiwater_vm and
* hiwater_rss only when about to *lower* total_vm or rss. Any
* collector of these hiwater stats must therefore get total_vm
* and rss too, which will usually be the higher. Barriers? not
* worth the effort, such snapshots can always be inconsistent.
*/
hiwater_vm = total_vm = mm->total_vm;
if (hiwater_vm < mm->hiwater_vm)
hiwater_vm = mm->hiwater_vm;
hiwater_rss = total_rss = get_mm_rss(mm);
if (hiwater_rss < mm->hiwater_rss)
hiwater_rss = mm->hiwater_rss;
data = mm->total_vm - mm->shared_vm - mm->stack_vm;
text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
swap = get_mm_counter(mm, MM_SWAPENTS);
seq_printf(m,
"VmPeak:\t%8lu kB\n"
"VmSize:\t%8lu kB\n"
"VmLck:\t%8lu kB\n"
"VmPin:\t%8lu kB\n"
"VmHWM:\t%8lu kB\n"
"VmRSS:\t%8lu kB\n"
"VmData:\t%8lu kB\n"
"VmStk:\t%8lu kB\n"
"VmExe:\t%8lu kB\n"
"VmLib:\t%8lu kB\n"
"VmPTE:\t%8lu kB\n"
"VmSwap:\t%8lu kB\n",
hiwater_vm << (PAGE_SHIFT-10),
(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
mm->locked_vm << (PAGE_SHIFT-10),
mm->pinned_vm << (PAGE_SHIFT-10),
hiwater_rss << (PAGE_SHIFT-10),
total_rss << (PAGE_SHIFT-10),
data << (PAGE_SHIFT-10),
mm->stack_vm << (PAGE_SHIFT-10), text, lib,
(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
swap << (PAGE_SHIFT-10));
}
| DoS | 0 | void task_mem(struct seq_file *m, struct mm_struct *mm)
{
unsigned long data, text, lib, swap;
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
/*
* Note: to minimize their overhead, mm maintains hiwater_vm and
* hiwater_rss only when about to *lower* total_vm or rss. Any
* collector of these hiwater stats must therefore get total_vm
* and rss too, which will usually be the higher. Barriers? not
* worth the effort, such snapshots can always be inconsistent.
*/
hiwater_vm = total_vm = mm->total_vm;
if (hiwater_vm < mm->hiwater_vm)
hiwater_vm = mm->hiwater_vm;
hiwater_rss = total_rss = get_mm_rss(mm);
if (hiwater_rss < mm->hiwater_rss)
hiwater_rss = mm->hiwater_rss;
data = mm->total_vm - mm->shared_vm - mm->stack_vm;
text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK)) >> 10;
lib = (mm->exec_vm << (PAGE_SHIFT-10)) - text;
swap = get_mm_counter(mm, MM_SWAPENTS);
seq_printf(m,
"VmPeak:\t%8lu kB\n"
"VmSize:\t%8lu kB\n"
"VmLck:\t%8lu kB\n"
"VmPin:\t%8lu kB\n"
"VmHWM:\t%8lu kB\n"
"VmRSS:\t%8lu kB\n"
"VmData:\t%8lu kB\n"
"VmStk:\t%8lu kB\n"
"VmExe:\t%8lu kB\n"
"VmLib:\t%8lu kB\n"
"VmPTE:\t%8lu kB\n"
"VmSwap:\t%8lu kB\n",
hiwater_vm << (PAGE_SHIFT-10),
(total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
mm->locked_vm << (PAGE_SHIFT-10),
mm->pinned_vm << (PAGE_SHIFT-10),
hiwater_rss << (PAGE_SHIFT-10),
total_rss << (PAGE_SHIFT-10),
data << (PAGE_SHIFT-10),
mm->stack_vm << (PAGE_SHIFT-10), text, lib,
(PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
swap << (PAGE_SHIFT-10));
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,792 | unsigned long task_statm(struct mm_struct *mm,
unsigned long *shared, unsigned long *text,
unsigned long *data, unsigned long *resident)
{
*shared = get_mm_counter(mm, MM_FILEPAGES);
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT;
*data = mm->total_vm - mm->shared_vm;
*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
return mm->total_vm;
}
| DoS | 0 | unsigned long task_statm(struct mm_struct *mm,
unsigned long *shared, unsigned long *text,
unsigned long *data, unsigned long *resident)
{
*shared = get_mm_counter(mm, MM_FILEPAGES);
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT;
*data = mm->total_vm - mm->shared_vm;
*resident = *shared + get_mm_counter(mm, MM_ANONPAGES);
return mm->total_vm;
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,793 | unsigned long task_vsize(struct mm_struct *mm)
{
return PAGE_SIZE * mm->total_vm;
}
| DoS | 0 | unsigned long task_vsize(struct mm_struct *mm)
{
return PAGE_SIZE * mm->total_vm;
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,794 | static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
{
if (vma && vma != priv->tail_vma) {
struct mm_struct *mm = vma->vm_mm;
up_read(&mm->mmap_sem);
mmput(mm);
}
}
| DoS | 0 | static void vma_stop(struct proc_maps_private *priv, struct vm_area_struct *vma)
{
if (vma && vma != priv->tail_vma) {
struct mm_struct *mm = vma->vm_mm;
up_read(&mm->mmap_sem);
mmput(mm);
}
}
| @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
} else {
spin_unlock(&walk->mm->page_table_lock);
}
+
+ if (pmd_trans_unstable(pmd))
+ return 0;
/*
* The mmap_sem held all the way back in m_start() is what
* keeps khugepaged out of here and from collapsing things
@@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct page *page;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
int err = 0;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
/* find the first VMA at or above 'addr' */
vma = find_vma(walk->mm, addr);
@@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(&walk->mm->page_table_lock);
}
+ if (pmd_trans_unstable(pmd))
+ return 0;
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, md->vma, addr); | CWE-264 | null | null |
16,795 | static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
unsigned int nr_pages)
{
if (!mem_cgroup_is_root(memcg)) {
unsigned long bytes = nr_pages * PAGE_SIZE;
res_counter_uncharge(&memcg->res, bytes);
if (do_swap_account)
res_counter_uncharge(&memcg->memsw, bytes);
}
}
| DoS | 0 | static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
unsigned int nr_pages)
{
if (!mem_cgroup_is_root(memcg)) {
unsigned long bytes = nr_pages * PAGE_SIZE;
res_counter_uncharge(&memcg->res, bytes);
if (do_swap_account)
res_counter_uncharge(&memcg->memsw, bytes);
}
}
| @@ -5234,6 +5234,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
spinlock_t *ptl;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE)
@@ -5396,6 +5398,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
spinlock_t *ptl;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
retry:
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; addr += PAGE_SIZE) { | CWE-264 | null | null |
16,796 | static void __mem_cgroup_clear_mc(void)
{
struct mem_cgroup *from = mc.from;
struct mem_cgroup *to = mc.to;
/* we must uncharge all the leftover precharges from mc.to */
if (mc.precharge) {
__mem_cgroup_cancel_charge(mc.to, mc.precharge);
mc.precharge = 0;
}
/*
* we didn't uncharge from mc.from at mem_cgroup_move_account(), so
* we must uncharge here.
*/
if (mc.moved_charge) {
__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
mc.moved_charge = 0;
}
/* we must fixup refcnts and charges */
if (mc.moved_swap) {
/* uncharge swap account from the old cgroup */
if (!mem_cgroup_is_root(mc.from))
res_counter_uncharge(&mc.from->memsw,
PAGE_SIZE * mc.moved_swap);
__mem_cgroup_put(mc.from, mc.moved_swap);
if (!mem_cgroup_is_root(mc.to)) {
/*
* we charged both to->res and to->memsw, so we should
* uncharge to->res.
*/
res_counter_uncharge(&mc.to->res,
PAGE_SIZE * mc.moved_swap);
}
/* we've already done mem_cgroup_get(mc.to) */
mc.moved_swap = 0;
}
memcg_oom_recover(from);
memcg_oom_recover(to);
wake_up_all(&mc.waitq);
}
| DoS | 0 | static void __mem_cgroup_clear_mc(void)
{
struct mem_cgroup *from = mc.from;
struct mem_cgroup *to = mc.to;
/* we must uncharge all the leftover precharges from mc.to */
if (mc.precharge) {
__mem_cgroup_cancel_charge(mc.to, mc.precharge);
mc.precharge = 0;
}
/*
* we didn't uncharge from mc.from at mem_cgroup_move_account(), so
* we must uncharge here.
*/
if (mc.moved_charge) {
__mem_cgroup_cancel_charge(mc.from, mc.moved_charge);
mc.moved_charge = 0;
}
/* we must fixup refcnts and charges */
if (mc.moved_swap) {
/* uncharge swap account from the old cgroup */
if (!mem_cgroup_is_root(mc.from))
res_counter_uncharge(&mc.from->memsw,
PAGE_SIZE * mc.moved_swap);
__mem_cgroup_put(mc.from, mc.moved_swap);
if (!mem_cgroup_is_root(mc.to)) {
/*
* we charged both to->res and to->memsw, so we should
* uncharge to->res.
*/
res_counter_uncharge(&mc.to->res,
PAGE_SIZE * mc.moved_swap);
}
/* we've already done mem_cgroup_get(mc.to) */
mc.moved_swap = 0;
}
memcg_oom_recover(from);
memcg_oom_recover(to);
wake_up_all(&mc.waitq);
}
| @@ -5234,6 +5234,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
spinlock_t *ptl;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE)
@@ -5396,6 +5398,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
spinlock_t *ptl;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
retry:
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; addr += PAGE_SIZE) { | CWE-264 | null | null |
16,797 | static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
struct page *page,
unsigned int nr_pages,
struct page_cgroup *pc,
enum charge_type ctype,
bool lrucare)
{
struct zone *uninitialized_var(zone);
bool was_on_lru = false;
lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
__mem_cgroup_cancel_charge(memcg, nr_pages);
return;
}
/*
* we don't need page_cgroup_lock about tail pages, becase they are not
* accessed by any other context at this point.
*/
/*
* In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
* may already be on some other mem_cgroup's LRU. Take care of it.
*/
if (lrucare) {
zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
if (PageLRU(page)) {
ClearPageLRU(page);
del_page_from_lru_list(zone, page, page_lru(page));
was_on_lru = true;
}
}
pc->mem_cgroup = memcg;
/*
* We access a page_cgroup asynchronously without lock_page_cgroup().
* Especially when a page_cgroup is taken from a page, pc->mem_cgroup
* is accessed after testing USED bit. To make pc->mem_cgroup visible
* before USED bit, we need memory barrier here.
* See mem_cgroup_add_lru_list(), etc.
*/
smp_wmb();
switch (ctype) {
case MEM_CGROUP_CHARGE_TYPE_CACHE:
case MEM_CGROUP_CHARGE_TYPE_SHMEM:
SetPageCgroupCache(pc);
SetPageCgroupUsed(pc);
break;
case MEM_CGROUP_CHARGE_TYPE_MAPPED:
ClearPageCgroupCache(pc);
SetPageCgroupUsed(pc);
break;
default:
break;
}
if (lrucare) {
if (was_on_lru) {
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
add_page_to_lru_list(zone, page, page_lru(page));
}
spin_unlock_irq(&zone->lru_lock);
}
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
unlock_page_cgroup(pc);
/*
* "charge_statistics" updated event counter. Then, check it.
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
* if they exceeds softlimit.
*/
memcg_check_events(memcg, page);
}
| DoS | 0 | static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
struct page *page,
unsigned int nr_pages,
struct page_cgroup *pc,
enum charge_type ctype,
bool lrucare)
{
struct zone *uninitialized_var(zone);
bool was_on_lru = false;
lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
__mem_cgroup_cancel_charge(memcg, nr_pages);
return;
}
/*
* we don't need page_cgroup_lock about tail pages, becase they are not
* accessed by any other context at this point.
*/
/*
* In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
* may already be on some other mem_cgroup's LRU. Take care of it.
*/
if (lrucare) {
zone = page_zone(page);
spin_lock_irq(&zone->lru_lock);
if (PageLRU(page)) {
ClearPageLRU(page);
del_page_from_lru_list(zone, page, page_lru(page));
was_on_lru = true;
}
}
pc->mem_cgroup = memcg;
/*
* We access a page_cgroup asynchronously without lock_page_cgroup().
* Especially when a page_cgroup is taken from a page, pc->mem_cgroup
* is accessed after testing USED bit. To make pc->mem_cgroup visible
* before USED bit, we need memory barrier here.
* See mem_cgroup_add_lru_list(), etc.
*/
smp_wmb();
switch (ctype) {
case MEM_CGROUP_CHARGE_TYPE_CACHE:
case MEM_CGROUP_CHARGE_TYPE_SHMEM:
SetPageCgroupCache(pc);
SetPageCgroupUsed(pc);
break;
case MEM_CGROUP_CHARGE_TYPE_MAPPED:
ClearPageCgroupCache(pc);
SetPageCgroupUsed(pc);
break;
default:
break;
}
if (lrucare) {
if (was_on_lru) {
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
add_page_to_lru_list(zone, page, page_lru(page));
}
spin_unlock_irq(&zone->lru_lock);
}
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
unlock_page_cgroup(pc);
/*
* "charge_statistics" updated event counter. Then, check it.
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
* if they exceeds softlimit.
*/
memcg_check_events(memcg, page);
}
| @@ -5234,6 +5234,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
spinlock_t *ptl;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE)
@@ -5396,6 +5398,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
spinlock_t *ptl;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
retry:
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; addr += PAGE_SIZE) { | CWE-264 | null | null |
16,798 | static void __mem_cgroup_free(struct mem_cgroup *memcg)
{
int node;
mem_cgroup_remove_from_trees(memcg);
free_css_id(&mem_cgroup_subsys, &memcg->css);
for_each_node(node)
free_mem_cgroup_per_zone_info(memcg, node);
free_percpu(memcg->stat);
if (sizeof(struct mem_cgroup) < PAGE_SIZE)
kfree_rcu(memcg, rcu_freeing);
else
call_rcu(&memcg->rcu_freeing, vfree_rcu);
}
| DoS | 0 | static void __mem_cgroup_free(struct mem_cgroup *memcg)
{
int node;
mem_cgroup_remove_from_trees(memcg);
free_css_id(&mem_cgroup_subsys, &memcg->css);
for_each_node(node)
free_mem_cgroup_per_zone_info(memcg, node);
free_percpu(memcg->stat);
if (sizeof(struct mem_cgroup) < PAGE_SIZE)
kfree_rcu(memcg, rcu_freeing);
else
call_rcu(&memcg->rcu_freeing, vfree_rcu);
}
| @@ -5234,6 +5234,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
spinlock_t *ptl;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE)
@@ -5396,6 +5398,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
spinlock_t *ptl;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
retry:
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; addr += PAGE_SIZE) { | CWE-264 | null | null |
16,799 | __mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
struct mem_cgroup_per_zone *mz,
struct mem_cgroup_tree_per_zone *mctz,
unsigned long long new_usage_in_excess)
{
struct rb_node **p = &mctz->rb_root.rb_node;
struct rb_node *parent = NULL;
struct mem_cgroup_per_zone *mz_node;
if (mz->on_tree)
return;
mz->usage_in_excess = new_usage_in_excess;
if (!mz->usage_in_excess)
return;
while (*p) {
parent = *p;
mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
tree_node);
if (mz->usage_in_excess < mz_node->usage_in_excess)
p = &(*p)->rb_left;
/*
* We can't avoid mem cgroups that are over their soft
* limit by the same amount
*/
else if (mz->usage_in_excess >= mz_node->usage_in_excess)
p = &(*p)->rb_right;
}
rb_link_node(&mz->tree_node, parent, p);
rb_insert_color(&mz->tree_node, &mctz->rb_root);
mz->on_tree = true;
}
| DoS | 0 | __mem_cgroup_insert_exceeded(struct mem_cgroup *memcg,
struct mem_cgroup_per_zone *mz,
struct mem_cgroup_tree_per_zone *mctz,
unsigned long long new_usage_in_excess)
{
struct rb_node **p = &mctz->rb_root.rb_node;
struct rb_node *parent = NULL;
struct mem_cgroup_per_zone *mz_node;
if (mz->on_tree)
return;
mz->usage_in_excess = new_usage_in_excess;
if (!mz->usage_in_excess)
return;
while (*p) {
parent = *p;
mz_node = rb_entry(parent, struct mem_cgroup_per_zone,
tree_node);
if (mz->usage_in_excess < mz_node->usage_in_excess)
p = &(*p)->rb_left;
/*
* We can't avoid mem cgroups that are over their soft
* limit by the same amount
*/
else if (mz->usage_in_excess >= mz_node->usage_in_excess)
p = &(*p)->rb_right;
}
rb_link_node(&mz->tree_node, parent, p);
rb_insert_color(&mz->tree_node, &mctz->rb_root);
mz->on_tree = true;
}
| @@ -5234,6 +5234,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
spinlock_t *ptl;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE)
@@ -5396,6 +5398,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
spinlock_t *ptl;
split_huge_page_pmd(walk->mm, pmd);
+ if (pmd_trans_unstable(pmd))
+ return 0;
retry:
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; addr += PAGE_SIZE) { | CWE-264 | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.