idx
int64
func_before
string
Vulnerability Classification
string
vul
int64
func_after
string
patch
string
CWE ID
string
lines_before
string
lines_after
string
23,100
void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 bool vcpus_matched; bool do_request = false; struct kvm_arch *ka = &vcpu->kvm->arch; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&vcpu->kvm->online_vcpus)); if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC) if (!ka->use_master_clock) do_request = 1; if (!vcpus_matched && ka->use_master_clock) do_request = 1; if (do_request) kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, atomic_read(&vcpu->kvm->online_vcpus), ka->use_master_clock, gtod->clock.vclock_mode); #endif }
DoS +Priv
0
void kvm_track_tsc_matching(struct kvm_vcpu *vcpu) { #ifdef CONFIG_X86_64 bool vcpus_matched; bool do_request = false; struct kvm_arch *ka = &vcpu->kvm->arch; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&vcpu->kvm->online_vcpus)); if (vcpus_matched && gtod->clock.vclock_mode == VCLOCK_TSC) if (!ka->use_master_clock) do_request = 1; if (!vcpus_matched && ka->use_master_clock) do_request = 1; if (do_request) kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu); trace_kvm_track_tsc(vcpu->vcpu_id, ka->nr_vcpus_matched_tsc, atomic_read(&vcpu->kvm->online_vcpus), ka->use_master_clock, gtod->clock.vclock_mode); #endif }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,101
static void kvm_update_dr7(struct kvm_vcpu *vcpu) { unsigned long dr7; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) dr7 = vcpu->arch.guest_debug_dr7; else dr7 = vcpu->arch.dr7; kvm_x86_ops->set_dr7(vcpu, dr7); vcpu->arch.switch_db_regs = (dr7 & DR7_BP_EN_MASK); }
DoS +Priv
0
static void kvm_update_dr7(struct kvm_vcpu *vcpu) { unsigned long dr7; if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) dr7 = vcpu->arch.guest_debug_dr7; else dr7 = vcpu->arch.dr7; kvm_x86_ops->set_dr7(vcpu, dr7); vcpu->arch.switch_db_regs = (dr7 & DR7_BP_EN_MASK); }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,102
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) { if (efer & efer_reserved_bits) return false; if (efer & EFER_FFXSR) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) return false; } if (efer & EFER_SVME) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) return false; } return true; }
DoS +Priv
0
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) { if (efer & efer_reserved_bits) return false; if (efer & EFER_FFXSR) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) return false; } if (efer & EFER_SVME) { struct kvm_cpuid_entry2 *feat; feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) return false; } return true; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,103
static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) { struct kvm_run *kvm_run = vcpu->run; unsigned long eip = vcpu->arch.emulate_ctxt.eip; u32 dr6 = 0; if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { dr6 = kvm_vcpu_check_hw_bp(eip, 0, vcpu->arch.guest_debug_dr7, vcpu->arch.eff_db); if (dr6 != 0) { kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; kvm_run->debug.arch.pc = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; *r = EMULATE_USER_EXIT; return true; } } if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK)) { dr6 = kvm_vcpu_check_hw_bp(eip, 0, vcpu->arch.dr7, vcpu->arch.db); if (dr6 != 0) { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= dr6; kvm_queue_exception(vcpu, DB_VECTOR); *r = EMULATE_DONE; return true; } } return false; }
DoS +Priv
0
static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) { struct kvm_run *kvm_run = vcpu->run; unsigned long eip = vcpu->arch.emulate_ctxt.eip; u32 dr6 = 0; if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) && (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) { dr6 = kvm_vcpu_check_hw_bp(eip, 0, vcpu->arch.guest_debug_dr7, vcpu->arch.eff_db); if (dr6 != 0) { kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; kvm_run->debug.arch.pc = kvm_rip_read(vcpu) + get_segment_base(vcpu, VCPU_SREG_CS); kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; *r = EMULATE_USER_EXIT; return true; } } if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK)) { dr6 = kvm_vcpu_check_hw_bp(eip, 0, vcpu->arch.dr7, vcpu->arch.db); if (dr6 != 0) { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= dr6; kvm_queue_exception(vcpu, DB_VECTOR); *r = EMULATE_DONE; return true; } } return false; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,104
static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, unsigned long *db) { u32 dr6 = 0; int i; u32 enable, rwlen; enable = dr7; rwlen = dr7 >> 16; for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) dr6 |= (1 << i); return dr6; }
DoS +Priv
0
static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, unsigned long *db) { u32 dr6 = 0; int i; u32 enable, rwlen; enable = dr7; rwlen = dr7 >> 16; for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4) if ((enable & 3) && (rwlen & 15) == type && db[i] == addr) dr6 |= (1 << i); return dr6; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,105
static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r) { struct kvm_run *kvm_run = vcpu->run; /* * Use the "raw" value to see if TF was passed to the processor. * Note that the new value of the flags has not been saved yet. * * This is correct even for TF set by the guest, because "the * processor will not generate this exception after the instruction * that sets the TF flag". */ unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); if (unlikely(rflags & X86_EFLAGS_TF)) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1; kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; *r = EMULATE_USER_EXIT; } else { vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; /* * "Certain debug exceptions may clear bit 0-3. The * remaining contents of the DR6 register are never * cleared by the processor". */ vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= DR6_BS; kvm_queue_exception(vcpu, DB_VECTOR); } } }
DoS +Priv
0
static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r) { struct kvm_run *kvm_run = vcpu->run; /* * Use the "raw" value to see if TF was passed to the processor. * Note that the new value of the flags has not been saved yet. * * This is correct even for TF set by the guest, because "the * processor will not generate this exception after the instruction * that sets the TF flag". */ unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); if (unlikely(rflags & X86_EFLAGS_TF)) { if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1; kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; kvm_run->debug.arch.exception = DB_VECTOR; kvm_run->exit_reason = KVM_EXIT_DEBUG; *r = EMULATE_USER_EXIT; } else { vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF; /* * "Certain debug exceptions may clear bit 0-3. The * remaining contents of the DR6 register are never * cleared by the processor". */ vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= DR6_BS; kvm_queue_exception(vcpu, DB_VECTOR); } } }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,106
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); }
DoS +Priv
0
bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return irqchip_in_kernel(vcpu->kvm) == (vcpu->arch.apic != NULL); }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,107
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector) { struct kvm_segment cs; kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); cs.selector = vector << 8; cs.base = vector << 12; kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); kvm_rip_write(vcpu, 0); }
DoS +Priv
0
void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector) { struct kvm_segment cs; kvm_get_segment(vcpu, &cs, VCPU_SREG_CS); cs.selector = vector << 8; cs.base = vector << 12; kvm_set_segment(vcpu, &cs, VCPU_SREG_CS); kvm_rip_write(vcpu, 0); }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,108
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { kvm_x86_ops->sync_pir_to_irr(vcpu); memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); return 0; }
DoS +Priv
0
static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { kvm_x86_ops->sync_pir_to_irr(vcpu); memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); return 0; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,109
static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { kvm_apic_post_state_restore(vcpu, s); update_cr8_intercept(vcpu); return 0; }
DoS +Priv
0
static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s) { kvm_apic_post_state_restore(vcpu, s); update_cr8_intercept(vcpu); return 0; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,110
static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { process_nmi(vcpu); events->exception.injected = vcpu->arch.exception.pending && !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; events->interrupt.injected = vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending != 0; events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); events->nmi.pad = 0; events->sipi_vector = 0; /* never valid when reporting to user space */ events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SHADOW); memset(&events->reserved, 0, sizeof(events->reserved)); }
DoS +Priv
0
static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, struct kvm_vcpu_events *events) { process_nmi(vcpu); events->exception.injected = vcpu->arch.exception.pending && !kvm_exception_is_soft(vcpu->arch.exception.nr); events->exception.nr = vcpu->arch.exception.nr; events->exception.has_error_code = vcpu->arch.exception.has_error_code; events->exception.pad = 0; events->exception.error_code = vcpu->arch.exception.error_code; events->interrupt.injected = vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; events->interrupt.nr = vcpu->arch.interrupt.nr; events->interrupt.soft = 0; events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI); events->nmi.injected = vcpu->arch.nmi_injected; events->nmi.pending = vcpu->arch.nmi_pending != 0; events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu); events->nmi.pad = 0; events->sipi_vector = 0; /* never valid when reporting to user space */ events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING | KVM_VCPUEVENT_VALID_SHADOW); memset(&events->reserved, 0, sizeof(events->reserved)); }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,111
static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { int i, r = 0; if (!cpu_has_xsave) return -EINVAL; if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) return -EINVAL; for (i = 0; i < guest_xcrs->nr_xcrs; i++) /* Only support XCR0 currently */ if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, guest_xcrs->xcrs[i].value); break; } if (r) r = -EINVAL; return r; }
DoS +Priv
0
static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu, struct kvm_xcrs *guest_xcrs) { int i, r = 0; if (!cpu_has_xsave) return -EINVAL; if (guest_xcrs->nr_xcrs > KVM_MAX_XCRS || guest_xcrs->flags) return -EINVAL; for (i = 0; i < guest_xcrs->nr_xcrs; i++) /* Only support XCR0 currently */ if (guest_xcrs->xcrs[i].xcr == XCR_XFEATURE_ENABLED_MASK) { r = __kvm_set_xcr(vcpu, XCR_XFEATURE_ENABLED_MASK, guest_xcrs->xcrs[i].value); break; } if (r) r = -EINVAL; return r; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,112
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { u64 xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; if (cpu_has_xsave) { /* * Here we allow setting states that are not present in * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility * with old userspace. */ if (xstate_bv & ~KVM_SUPPORTED_XCR0) return -EINVAL; if (xstate_bv & ~host_xcr0) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->xsave, guest_xsave->region, vcpu->arch.guest_xstate_size); } else { if (xstate_bv & ~XSTATE_FPSSE) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->fxsave, guest_xsave->region, sizeof(struct i387_fxsave_struct)); } return 0; }
DoS +Priv
0
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { u64 xstate_bv = *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)]; if (cpu_has_xsave) { /* * Here we allow setting states that are not present in * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility * with old userspace. */ if (xstate_bv & ~KVM_SUPPORTED_XCR0) return -EINVAL; if (xstate_bv & ~host_xcr0) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->xsave, guest_xsave->region, vcpu->arch.guest_xstate_size); } else { if (xstate_bv & ~XSTATE_FPSSE) return -EINVAL; memcpy(&vcpu->arch.guest_fpu.state->fxsave, guest_xsave->region, sizeof(struct i387_fxsave_struct)); } return 0; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,113
void kvm_vcpu_reset(struct kvm_vcpu *vcpu) { atomic_set(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = 0; vcpu->arch.nmi_injected = false; memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = DR6_FIXED_1; vcpu->arch.dr7 = DR7_FIXED_1; kvm_update_dr7(vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.apf.msr_val = 0; vcpu->arch.st.msr_val = 0; kvmclock_reset(vcpu); kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); vcpu->arch.apf.halted = false; kvm_pmu_reset(vcpu); memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); vcpu->arch.regs_avail = ~0; vcpu->arch.regs_dirty = ~0; kvm_x86_ops->vcpu_reset(vcpu); }
DoS +Priv
0
void kvm_vcpu_reset(struct kvm_vcpu *vcpu) { atomic_set(&vcpu->arch.nmi_queued, 0); vcpu->arch.nmi_pending = 0; vcpu->arch.nmi_injected = false; memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); vcpu->arch.dr6 = DR6_FIXED_1; vcpu->arch.dr7 = DR7_FIXED_1; kvm_update_dr7(vcpu); kvm_make_request(KVM_REQ_EVENT, vcpu); vcpu->arch.apf.msr_val = 0; vcpu->arch.st.msr_val = 0; kvmclock_reset(vcpu); kvm_clear_async_pf_completion_queue(vcpu); kvm_async_pf_hash_reset(vcpu); vcpu->arch.apf.halted = false; kvm_pmu_reset(vcpu); memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs)); vcpu->arch.regs_avail = ~0; vcpu->arch.regs_dirty = ~0; kvm_x86_ops->vcpu_reset(vcpu); }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,114
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { int r; struct kvm_memory_slot *memslot; unsigned long n, i; unsigned long *dirty_bitmap; unsigned long *dirty_bitmap_buffer; bool is_dirty = false; mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_USER_MEM_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); dirty_bitmap = memslot->dirty_bitmap; r = -ENOENT; if (!dirty_bitmap) goto out; n = kvm_dirty_bitmap_bytes(memslot); dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); memset(dirty_bitmap_buffer, 0, n); spin_lock(&kvm->mmu_lock); for (i = 0; i < n / sizeof(long); i++) { unsigned long mask; gfn_t offset; if (!dirty_bitmap[i]) continue; is_dirty = true; mask = xchg(&dirty_bitmap[i], 0); dirty_bitmap_buffer[i] = mask; offset = i * BITS_PER_LONG; kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask); } if (is_dirty) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); r = -EFAULT; if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) goto out; r = 0; out: mutex_unlock(&kvm->slots_lock); return r; }
DoS +Priv
0
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { int r; struct kvm_memory_slot *memslot; unsigned long n, i; unsigned long *dirty_bitmap; unsigned long *dirty_bitmap_buffer; bool is_dirty = false; mutex_lock(&kvm->slots_lock); r = -EINVAL; if (log->slot >= KVM_USER_MEM_SLOTS) goto out; memslot = id_to_memslot(kvm->memslots, log->slot); dirty_bitmap = memslot->dirty_bitmap; r = -ENOENT; if (!dirty_bitmap) goto out; n = kvm_dirty_bitmap_bytes(memslot); dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); memset(dirty_bitmap_buffer, 0, n); spin_lock(&kvm->mmu_lock); for (i = 0; i < n / sizeof(long); i++) { unsigned long mask; gfn_t offset; if (!dirty_bitmap[i]) continue; is_dirty = true; mask = xchg(&dirty_bitmap[i], 0); dirty_bitmap_buffer[i] = mask; offset = i * BITS_PER_LONG; kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask); } if (is_dirty) kvm_flush_remote_tlbs(kvm); spin_unlock(&kvm->mmu_lock); r = -EFAULT; if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n)) goto out; r = 0; out: mutex_unlock(&kvm->slots_lock); return r; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,115
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status) { if (!irqchip_in_kernel(kvm)) return -ENXIO; irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event->irq, irq_event->level, line_status); return 0; }
DoS +Priv
0
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, bool line_status) { if (!irqchip_in_kernel(kvm)) return -ENXIO; irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irq_event->irq, irq_event->level, line_status); return 0; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,116
static int kvm_vm_ioctl_reinject(struct kvm *kvm, struct kvm_reinject_control *control) { if (!kvm->arch.vpit) return -ENXIO; mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm->arch.vpit->pit_state.reinject = control->pit_reinject; mutex_unlock(&kvm->arch.vpit->pit_state.lock); return 0; }
DoS +Priv
0
static int kvm_vm_ioctl_reinject(struct kvm *kvm, struct kvm_reinject_control *control) { if (!kvm->arch.vpit) return -ENXIO; mutex_lock(&kvm->arch.vpit->pit_state.lock); kvm->arch.vpit->pit_state.reinject = control->pit_reinject; mutex_unlock(&kvm->arch.vpit->pit_state.lock); return 0; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,117
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, u32 kvm_nr_mmu_pages) { if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) return -EINVAL; mutex_lock(&kvm->slots_lock); kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; mutex_unlock(&kvm->slots_lock); return 0; }
DoS +Priv
0
static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm, u32 kvm_nr_mmu_pages) { if (kvm_nr_mmu_pages < KVM_MIN_ALLOC_MMU_PAGES) return -EINVAL; mutex_lock(&kvm->slots_lock); kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages); kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages; mutex_unlock(&kvm->slots_lock); return 0; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,118
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct kvm *kvm = vcpu->kvm; u64 offset, ns, elapsed; unsigned long flags; s64 usdiff; bool matched; u64 data = msr->data; raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); ns = get_kernel_ns(); elapsed = ns - kvm->arch.last_tsc_nsec; if (vcpu->arch.virtual_tsc_khz) { int faulted = 0; /* n.b - signed multiplication and division required */ usdiff = data - kvm->arch.last_tsc_write; #ifdef CONFIG_X86_64 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; #else /* do_div() only does unsigned */ asm("1: idivl %[divisor]\n" "2: xor %%edx, %%edx\n" " movl $0, %[faulted]\n" "3:\n" ".section .fixup,\"ax\"\n" "4: movl $1, %[faulted]\n" " jmp 3b\n" ".previous\n" _ASM_EXTABLE(1b, 4b) : "=A"(usdiff), [faulted] "=r" (faulted) : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz)); #endif do_div(elapsed, 1000); usdiff -= elapsed; if (usdiff < 0) usdiff = -usdiff; /* idivl overflow => difference is larger than USEC_PER_SEC */ if (faulted) usdiff = USEC_PER_SEC; } else usdiff = USEC_PER_SEC; /* disable TSC match window below */ /* * Special case: TSC write with a small delta (1 second) of virtual * cycle time against real time is interpreted as an attempt to * synchronize the CPU. * * For a reliable TSC, we can match TSC offsets, and for an unstable * TSC, we add elapsed time in this computation. We could let the * compensation code attempt to catch up if we fall behind, but * it's better to try to match offsets from the beginning. */ if (usdiff < USEC_PER_SEC && vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { if (!check_tsc_unstable()) { offset = kvm->arch.cur_tsc_offset; pr_debug("kvm: matched tsc offset for %llu\n", data); } else { u64 delta = nsec_to_cycles(vcpu, elapsed); data += delta; offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); pr_debug("kvm: adjusted tsc offset by %llu\n", delta); } matched = true; } else { /* * We split periods of matched TSC writes into generations. * For each generation, we track the original measured * nanosecond time, offset, and write, so if TSCs are in * sync, we can match exact offset, and if not, we can match * exact software computation in compute_guest_tsc() * * These values are tracked in kvm->arch.cur_xxx variables. */ kvm->arch.cur_tsc_generation++; kvm->arch.cur_tsc_nsec = ns; kvm->arch.cur_tsc_write = data; kvm->arch.cur_tsc_offset = offset; matched = false; pr_debug("kvm: new tsc generation %u, clock %llu\n", kvm->arch.cur_tsc_generation, data); } /* * We also track th most recent recorded KHZ, write and time to * allow the matching interval to be extended at each write. */ kvm->arch.last_tsc_nsec = ns; kvm->arch.last_tsc_write = data; kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; /* Reset of TSC must disable overshoot protection below */ vcpu->arch.hv_clock.tsc_timestamp = 0; vcpu->arch.last_guest_tsc = data; /* Keep track of which generation this VCPU has synchronized to */ vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) update_ia32_tsc_adjust_msr(vcpu, offset); kvm_x86_ops->write_tsc_offset(vcpu, offset); raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); spin_lock(&kvm->arch.pvclock_gtod_sync_lock); if (matched) kvm->arch.nr_vcpus_matched_tsc++; else kvm->arch.nr_vcpus_matched_tsc = 0; kvm_track_tsc_matching(vcpu); spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); }
DoS +Priv
0
void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) { struct kvm *kvm = vcpu->kvm; u64 offset, ns, elapsed; unsigned long flags; s64 usdiff; bool matched; u64 data = msr->data; raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); ns = get_kernel_ns(); elapsed = ns - kvm->arch.last_tsc_nsec; if (vcpu->arch.virtual_tsc_khz) { int faulted = 0; /* n.b - signed multiplication and division required */ usdiff = data - kvm->arch.last_tsc_write; #ifdef CONFIG_X86_64 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz; #else /* do_div() only does unsigned */ asm("1: idivl %[divisor]\n" "2: xor %%edx, %%edx\n" " movl $0, %[faulted]\n" "3:\n" ".section .fixup,\"ax\"\n" "4: movl $1, %[faulted]\n" " jmp 3b\n" ".previous\n" _ASM_EXTABLE(1b, 4b) : "=A"(usdiff), [faulted] "=r" (faulted) : "A"(usdiff * 1000), [divisor] "rm"(vcpu->arch.virtual_tsc_khz)); #endif do_div(elapsed, 1000); usdiff -= elapsed; if (usdiff < 0) usdiff = -usdiff; /* idivl overflow => difference is larger than USEC_PER_SEC */ if (faulted) usdiff = USEC_PER_SEC; } else usdiff = USEC_PER_SEC; /* disable TSC match window below */ /* * Special case: TSC write with a small delta (1 second) of virtual * cycle time against real time is interpreted as an attempt to * synchronize the CPU. * * For a reliable TSC, we can match TSC offsets, and for an unstable * TSC, we add elapsed time in this computation. We could let the * compensation code attempt to catch up if we fall behind, but * it's better to try to match offsets from the beginning. */ if (usdiff < USEC_PER_SEC && vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { if (!check_tsc_unstable()) { offset = kvm->arch.cur_tsc_offset; pr_debug("kvm: matched tsc offset for %llu\n", data); } else { u64 delta = nsec_to_cycles(vcpu, elapsed); data += delta; offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); pr_debug("kvm: adjusted tsc offset by %llu\n", delta); } matched = true; } else { /* * We split periods of matched TSC writes into generations. * For each generation, we track the original measured * nanosecond time, offset, and write, so if TSCs are in * sync, we can match exact offset, and if not, we can match * exact software computation in compute_guest_tsc() * * These values are tracked in kvm->arch.cur_xxx variables. */ kvm->arch.cur_tsc_generation++; kvm->arch.cur_tsc_nsec = ns; kvm->arch.cur_tsc_write = data; kvm->arch.cur_tsc_offset = offset; matched = false; pr_debug("kvm: new tsc generation %u, clock %llu\n", kvm->arch.cur_tsc_generation, data); } /* * We also track th most recent recorded KHZ, write and time to * allow the matching interval to be extended at each write. */ kvm->arch.last_tsc_nsec = ns; kvm->arch.last_tsc_write = data; kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; /* Reset of TSC must disable overshoot protection below */ vcpu->arch.hv_clock.tsc_timestamp = 0; vcpu->arch.last_guest_tsc = data; /* Keep track of which generation this VCPU has synchronized to */ vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation; vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated) update_ia32_tsc_adjust_msr(vcpu, offset); kvm_x86_ops->write_tsc_offset(vcpu, offset); raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); spin_lock(&kvm->arch.pvclock_gtod_sync_lock); if (matched) kvm->arch.nr_vcpus_matched_tsc++; else kvm->arch.nr_vcpus_matched_tsc = 0; kvm_track_tsc_matching(vcpu); spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,119
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { int version; int r; struct pvclock_wall_clock wc; struct timespec boot; if (!wall_clock) return; r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); if (r) return; if (version & 1) ++version; /* first time write, random junk */ ++version; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); /* * The guest calculates current wall clock time by adding * system time (updated by kvm_guest_time_update below) to the * wall clock specified here. guest system time equals host * system time for us, thus we must fill in host boot time here. */ getboottime(&boot); if (kvm->arch.kvmclock_offset) { struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); boot = timespec_sub(boot, ts); } wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; wc.version = version; kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); version++; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); }
DoS +Priv
0
static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) { int version; int r; struct pvclock_wall_clock wc; struct timespec boot; if (!wall_clock) return; r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); if (r) return; if (version & 1) ++version; /* first time write, random junk */ ++version; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); /* * The guest calculates current wall clock time by adding * system time (updated by kvm_guest_time_update below) to the * wall clock specified here. guest system time equals host * system time for us, thus we must fill in host boot time here. */ getboottime(&boot); if (kvm->arch.kvmclock_offset) { struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); boot = timespec_sub(boot, ts); } wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; wc.version = version; kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc)); version++; kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,120
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; struct kvm *kvm; struct kvm_vcpu *vcpu; int i, send_ipi = 0; /* * We allow guests to temporarily run on slowing clocks, * provided we notify them after, or to run on accelerating * clocks, provided we notify them before. Thus time never * goes backwards. * * However, we have a problem. We can't atomically update * the frequency of a given CPU from this function; it is * merely a notifier, which can be called from any CPU. * Changing the TSC frequency at arbitrary points in time * requires a recomputation of local variables related to * the TSC for each VCPU. We must flag these local variables * to be updated and be sure the update takes place with the * new frequency before any guests proceed. * * Unfortunately, the combination of hotplug CPU and frequency * change creates an intractable locking scenario; the order * of when these callouts happen is undefined with respect to * CPU hotplug, and they can race with each other. As such, * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is * undefined; you can actually have a CPU frequency change take * place in between the computation of X and the setting of the * variable. To protect against this problem, all updates of * the per_cpu tsc_khz variable are done in an interrupt * protected IPI, and all callers wishing to update the value * must wait for a synchronous IPI to complete (which is trivial * if the caller is on the CPU already). This establishes the * necessary total order on variable updates. * * Note that because a guest time update may take place * anytime after the setting of the VCPU's request bit, the * correct TSC value must be set before the request. However, * to ensure the update actually makes it to any guest which * starts running in hardware virtualization between the set * and the acquisition of the spinlock, we must also ping the * CPU after setting the request bit. * */ if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) return 0; if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) return 0; smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->cpu != freq->cpu) continue; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->cpu != smp_processor_id()) send_ipi = 1; } } spin_unlock(&kvm_lock); if (freq->old < freq->new && send_ipi) { /* * We upscale the frequency. Must make the guest * doesn't see old kvmclock values while running with * the new frequency, otherwise we risk the guest sees * time go backwards. * * In case we update the frequency for another cpu * (which might be in guest context) send an interrupt * to kick the cpu out of guest context. Next time * guest context is entered kvmclock will be updated, * so the guest will not see stale values. */ smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); } return 0; }
DoS +Priv
0
static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) { struct cpufreq_freqs *freq = data; struct kvm *kvm; struct kvm_vcpu *vcpu; int i, send_ipi = 0; /* * We allow guests to temporarily run on slowing clocks, * provided we notify them after, or to run on accelerating * clocks, provided we notify them before. Thus time never * goes backwards. * * However, we have a problem. We can't atomically update * the frequency of a given CPU from this function; it is * merely a notifier, which can be called from any CPU. * Changing the TSC frequency at arbitrary points in time * requires a recomputation of local variables related to * the TSC for each VCPU. We must flag these local variables * to be updated and be sure the update takes place with the * new frequency before any guests proceed. * * Unfortunately, the combination of hotplug CPU and frequency * change creates an intractable locking scenario; the order * of when these callouts happen is undefined with respect to * CPU hotplug, and they can race with each other. As such, * merely setting per_cpu(cpu_tsc_khz) = X during a hotadd is * undefined; you can actually have a CPU frequency change take * place in between the computation of X and the setting of the * variable. To protect against this problem, all updates of * the per_cpu tsc_khz variable are done in an interrupt * protected IPI, and all callers wishing to update the value * must wait for a synchronous IPI to complete (which is trivial * if the caller is on the CPU already). This establishes the * necessary total order on variable updates. * * Note that because a guest time update may take place * anytime after the setting of the VCPU's request bit, the * correct TSC value must be set before the request. However, * to ensure the update actually makes it to any guest which * starts running in hardware virtualization between the set * and the acquisition of the spinlock, we must also ping the * CPU after setting the request bit. * */ if (val == CPUFREQ_PRECHANGE && freq->old > freq->new) return 0; if (val == CPUFREQ_POSTCHANGE && freq->old < freq->new) return 0; smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) { kvm_for_each_vcpu(i, vcpu, kvm) { if (vcpu->cpu != freq->cpu) continue; kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu); if (vcpu->cpu != smp_processor_id()) send_ipi = 1; } } spin_unlock(&kvm_lock); if (freq->old < freq->new && send_ipi) { /* * We upscale the frequency. Must make the guest * doesn't see old kvmclock values while running with * the new frequency, otherwise we risk the guest sees * time go backwards. * * In case we update the frequency for another cpu * (which might be in guest context) send an interrupt * to kick the cpu out of guest context. Next time * guest context is entered kvmclock will be updated, * so the guest will not see stale values. */ smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1); } return 0; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,121
static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) { return kvm_arch_has_noncoherent_dma(vcpu->kvm); }
DoS +Priv
0
static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu) { return kvm_arch_has_noncoherent_dma(vcpu->kvm); }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,122
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) { return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); }
DoS +Priv
0
static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec) { return pvclock_scale_delta(nsec, vcpu->arch.virtual_tsc_mult, vcpu->arch.virtual_tsc_shift); }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,123
static void post_kvm_run_save(struct kvm_vcpu *vcpu) { struct kvm_run *kvm_run = vcpu->run; kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); if (irqchip_in_kernel(vcpu->kvm)) kvm_run->ready_for_interrupt_injection = 1; else kvm_run->ready_for_interrupt_injection = kvm_arch_interrupt_allowed(vcpu) && !kvm_cpu_has_interrupt(vcpu) && !kvm_event_needs_reinjection(vcpu); }
DoS +Priv
0
static void post_kvm_run_save(struct kvm_vcpu *vcpu) { struct kvm_run *kvm_run = vcpu->run; kvm_run->if_flag = (kvm_get_rflags(vcpu) & X86_EFLAGS_IF) != 0; kvm_run->cr8 = kvm_get_cr8(vcpu); kvm_run->apic_base = kvm_get_apic_base(vcpu); if (irqchip_in_kernel(vcpu->kvm)) kvm_run->ready_for_interrupt_injection = 1; else kvm_run->ready_for_interrupt_injection = kvm_arch_interrupt_allowed(vcpu) && !kvm_cpu_has_interrupt(vcpu) && !kvm_event_needs_reinjection(vcpu); }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,124
static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, void *priv) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; struct timekeeper *tk = priv; update_pvclock_gtod(tk); /* disable master clock if host does not trust, or does not * use, TSC clocksource */ if (gtod->clock.vclock_mode != VCLOCK_TSC && atomic_read(&kvm_guest_has_master_clock) != 0) queue_work(system_long_wq, &pvclock_gtod_work); return 0; }
DoS +Priv
0
static int pvclock_gtod_notify(struct notifier_block *nb, unsigned long unused, void *priv) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; struct timekeeper *tk = priv; update_pvclock_gtod(tk); /* disable master clock if host does not trust, or does not * use, TSC clocksource */ if (gtod->clock.vclock_mode != VCLOCK_TSC && atomic_read(&kvm_guest_has_master_clock) != 0) queue_work(system_long_wq, &pvclock_gtod_work); return 0; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,125
static void pvclock_gtod_update_fn(struct work_struct *work) { struct kvm *kvm; struct kvm_vcpu *vcpu; int i; spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) kvm_for_each_vcpu(i, vcpu, kvm) set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests); atomic_set(&kvm_guest_has_master_clock, 0); spin_unlock(&kvm_lock); }
DoS +Priv
0
static void pvclock_gtod_update_fn(struct work_struct *work) { struct kvm *kvm; struct kvm_vcpu *vcpu; int i; spin_lock(&kvm_lock); list_for_each_entry(kvm, &vm_list, vm_list) kvm_for_each_vcpu(i, vcpu, kvm) set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests); atomic_set(&kvm_guest_has_master_clock, 0); spin_unlock(&kvm_lock); }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,126
static void pvclock_update_vm_gtod_copy(struct kvm *kvm) { #ifdef CONFIG_X86_64 struct kvm_arch *ka = &kvm->arch; int vclock_mode; bool host_tsc_clocksource, vcpus_matched; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&kvm->online_vcpus)); /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ host_tsc_clocksource = kvm_get_time_and_clockread( &ka->master_kernel_ns, &ka->master_cycle_now); ka->use_master_clock = host_tsc_clocksource & vcpus_matched; if (ka->use_master_clock) atomic_set(&kvm_guest_has_master_clock, 1); vclock_mode = pvclock_gtod_data.clock.vclock_mode; trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, vcpus_matched); #endif }
DoS +Priv
0
static void pvclock_update_vm_gtod_copy(struct kvm *kvm) { #ifdef CONFIG_X86_64 struct kvm_arch *ka = &kvm->arch; int vclock_mode; bool host_tsc_clocksource, vcpus_matched; vcpus_matched = (ka->nr_vcpus_matched_tsc + 1 == atomic_read(&kvm->online_vcpus)); /* * If the host uses TSC clock, then passthrough TSC as stable * to the guest. */ host_tsc_clocksource = kvm_get_time_and_clockread( &ka->master_kernel_ns, &ka->master_cycle_now); ka->use_master_clock = host_tsc_clocksource & vcpus_matched; if (ka->use_master_clock) atomic_set(&kvm_guest_has_master_clock, 1); vclock_mode = pvclock_gtod_data.clock.vclock_mode; trace_kvm_update_master_clock(ka->use_master_clock, vclock_mode, vcpus_matched); #endif }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,127
static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) { if (vcpu->mmio_read_completed) { trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, vcpu->mmio_fragments[0].gpa, *(u64 *)val); vcpu->mmio_read_completed = 0; return 1; } return 0; }
DoS +Priv
0
static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) { if (vcpu->mmio_read_completed) { trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, vcpu->mmio_fragments[0].gpa, *(u64 *)val); vcpu->mmio_read_completed = 0; return 1; } return 0; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,128
static cycle_t read_tsc(void) { cycle_t ret; u64 last; /* * Empirically, a fence (of type that depends on the CPU) * before rdtsc is enough to ensure that rdtsc is ordered * with respect to loads. The various CPU manuals are unclear * as to whether rdtsc can be reordered with later loads, * but no one has ever seen it happen. */ rdtsc_barrier(); ret = (cycle_t)vget_cycles(); last = pvclock_gtod_data.clock.cycle_last; if (likely(ret >= last)) return ret; /* * GCC likes to generate cmov here, but this branch is extremely * predictable (it's just a funciton of time and the likely is * very likely) and there's a data dependence, so force GCC * to generate a branch instead. I don't barrier() because * we don't actually need a barrier, and if this function * ever gets inlined it will generate worse code. */ asm volatile (""); return last; }
DoS +Priv
0
static cycle_t read_tsc(void) { cycle_t ret; u64 last; /* * Empirically, a fence (of type that depends on the CPU) * before rdtsc is enough to ensure that rdtsc is ordered * with respect to loads. The various CPU manuals are unclear * as to whether rdtsc can be reordered with later loads, * but no one has ever seen it happen. */ rdtsc_barrier(); ret = (cycle_t)vget_cycles(); last = pvclock_gtod_data.clock.cycle_last; if (likely(ret >= last)) return ret; /* * GCC likes to generate cmov here, but this branch is extremely * predictable (it's just a funciton of time and the likely is * very likely) and there's a data dependence, so force GCC * to generate a branch instead. I don't barrier() because * we don't actually need a barrier, and if this function * ever gets inlined it will generate worse code. */ asm volatile (""); return last; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,129
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, bool write_fault_to_shadow_pgtable, int emulation_type) { gpa_t gpa = cr2; pfn_t pfn; if (emulation_type & EMULTYPE_NO_REEXECUTE) return false; if (!vcpu->arch.mmu.direct_map) { /* * Write permission should be allowed since only * write access need to be emulated. */ gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); /* * If the mapping is invalid in guest, let cpu retry * it to generate fault. */ if (gpa == UNMAPPED_GVA) return true; } /* * Do not retry the unhandleable instruction if it faults on the * readonly host memory, otherwise it will goto a infinite loop: * retry instruction -> write #PF -> emulation fail -> retry * instruction -> ... */ pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); /* * If the instruction failed on the error pfn, it can not be fixed, * report the error to userspace. */ if (is_error_noslot_pfn(pfn)) return false; kvm_release_pfn_clean(pfn); /* The instructions are well-emulated on direct mmu. */ if (vcpu->arch.mmu.direct_map) { unsigned int indirect_shadow_pages; spin_lock(&vcpu->kvm->mmu_lock); indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; spin_unlock(&vcpu->kvm->mmu_lock); if (indirect_shadow_pages) kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); return true; } /* * if emulation was due to access to shadowed page table * and it failed try to unshadow page and re-enter the * guest to let CPU execute the instruction. */ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); /* * If the access faults on its page table, it can not * be fixed by unprotecting shadow page and it should * be reported to userspace. */ return !write_fault_to_shadow_pgtable; }
DoS +Priv
0
static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2, bool write_fault_to_shadow_pgtable, int emulation_type) { gpa_t gpa = cr2; pfn_t pfn; if (emulation_type & EMULTYPE_NO_REEXECUTE) return false; if (!vcpu->arch.mmu.direct_map) { /* * Write permission should be allowed since only * write access need to be emulated. */ gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL); /* * If the mapping is invalid in guest, let cpu retry * it to generate fault. */ if (gpa == UNMAPPED_GVA) return true; } /* * Do not retry the unhandleable instruction if it faults on the * readonly host memory, otherwise it will goto a infinite loop: * retry instruction -> write #PF -> emulation fail -> retry * instruction -> ... */ pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa)); /* * If the instruction failed on the error pfn, it can not be fixed, * report the error to userspace. */ if (is_error_noslot_pfn(pfn)) return false; kvm_release_pfn_clean(pfn); /* The instructions are well-emulated on direct mmu. */ if (vcpu->arch.mmu.direct_map) { unsigned int indirect_shadow_pages; spin_lock(&vcpu->kvm->mmu_lock); indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages; spin_unlock(&vcpu->kvm->mmu_lock); if (indirect_shadow_pages) kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); return true; } /* * if emulation was due to access to shadowed page table * and it failed try to unshadow page and re-enter the * guest to let CPU execute the instruction. */ kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa)); /* * If the access faults on its page table, it can not * be fixed by unprotecting shadow page and it should * be reported to userspace. */ return !write_fault_to_shadow_pgtable; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,130
static int set_efer(struct kvm_vcpu *vcpu, u64 efer) { u64 old_efer = vcpu->arch.efer; if (!kvm_valid_efer(vcpu, efer)) return 1; if (is_paging(vcpu) && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) return 1; efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; kvm_x86_ops->set_efer(vcpu, efer); /* Update reserved bits */ if ((efer ^ old_efer) & EFER_NX) kvm_mmu_reset_context(vcpu); return 0; }
DoS +Priv
0
static int set_efer(struct kvm_vcpu *vcpu, u64 efer) { u64 old_efer = vcpu->arch.efer; if (!kvm_valid_efer(vcpu, efer)) return 1; if (is_paging(vcpu) && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) return 1; efer &= ~EFER_LMA; efer |= vcpu->arch.efer & EFER_LMA; kvm_x86_ops->set_efer(vcpu, efer); /* Update reserved bits */ if ((efer ^ old_efer) & EFER_NX) kvm_mmu_reset_context(vcpu); return 0; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,131
static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { case HV_X64_MSR_APIC_ASSIST_PAGE: { unsigned long addr; if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { vcpu->arch.hv_vapic = data; break; } addr = gfn_to_hva(vcpu->kvm, data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); if (kvm_is_error_hva(addr)) return 1; if (__clear_user((void __user *)addr, PAGE_SIZE)) return 1; vcpu->arch.hv_vapic = data; break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); default: vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; }
DoS +Priv
0
static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data) { switch (msr) { case HV_X64_MSR_APIC_ASSIST_PAGE: { unsigned long addr; if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) { vcpu->arch.hv_vapic = data; break; } addr = gfn_to_hva(vcpu->kvm, data >> HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT); if (kvm_is_error_hva(addr)) return 1; if (__clear_user((void __user *)addr, PAGE_SIZE)) return 1; vcpu->arch.hv_vapic = data; break; } case HV_X64_MSR_EOI: return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data); case HV_X64_MSR_ICR: return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data); case HV_X64_MSR_TPR: return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data); default: vcpu_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x " "data 0x%llx\n", msr, data); return 1; } return 0; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,132
static void shared_msr_update(unsigned slot, u32 msr) { u64 value; unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); /* only read, and nobody should modify it at this time, * so don't need lock */ if (slot >= shared_msrs_global.nr) { printk(KERN_ERR "kvm: invalid MSR slot!"); return; } rdmsrl_safe(msr, &value); smsr->values[slot].host = value; smsr->values[slot].curr = value; }
DoS +Priv
0
static void shared_msr_update(unsigned slot, u32 msr) { u64 value; unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); /* only read, and nobody should modify it at this time, * so don't need lock */ if (slot >= shared_msrs_global.nr) { printk(KERN_ERR "kvm: invalid MSR slot!"); return; } rdmsrl_safe(msr, &value); smsr->values[slot].host = value; smsr->values[slot].curr = value; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,133
static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t *gpa, struct x86_exception *exception, bool write) { u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) | (write ? PFERR_WRITE_MASK : 0); if (vcpu_match_mmio_gva(vcpu, gva) && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) { *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | (gva & (PAGE_SIZE - 1)); trace_vcpu_match_mmio(gva, *gpa, write, false); return 1; } *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); if (*gpa == UNMAPPED_GVA) return -1; /* For APIC access vmexit */ if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) return 1; if (vcpu_match_mmio_gpa(vcpu, *gpa)) { trace_vcpu_match_mmio(gva, *gpa, write, true); return 1; } return 0; }
DoS +Priv
0
static int vcpu_mmio_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, gpa_t *gpa, struct x86_exception *exception, bool write) { u32 access = ((kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0) | (write ? PFERR_WRITE_MASK : 0); if (vcpu_match_mmio_gva(vcpu, gva) && !permission_fault(vcpu->arch.walk_mmu, vcpu->arch.access, access)) { *gpa = vcpu->arch.mmio_gfn << PAGE_SHIFT | (gva & (PAGE_SIZE - 1)); trace_vcpu_match_mmio(gva, *gpa, write, false); return 1; } *gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, access, exception); if (*gpa == UNMAPPED_GVA) return -1; /* For APIC access vmexit */ if ((*gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) return 1; if (vcpu_match_mmio_gpa(vcpu, *gpa)) { trace_vcpu_match_mmio(gva, *gpa, write, true); return 1; } return 0; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,134
static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); return X86EMUL_CONTINUE; }
DoS +Priv
0
static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, void *val, int bytes) { struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); return X86EMUL_CONTINUE; }
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = -EFAULT; if (copy_from_user(&va, argp, sizeof va)) goto out; - r = 0; - kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); break; } case KVM_X86_SETUP_MCE: { @@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static int vapic_enter(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - struct page *page; - - if (!apic || !apic->vapic_addr) - return 0; - - page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - if (is_error_page(page)) - return -EFAULT; - - vcpu->arch.apic->vapic_page = page; - return 0; -} - -static void vapic_exit(struct kvm_vcpu *vcpu) -{ - struct kvm_lapic *apic = vcpu->arch.apic; - int idx; - - if (!apic || !apic->vapic_addr) - return; - - idx = srcu_read_lock(&vcpu->kvm->srcu); - kvm_release_page_dirty(apic->vapic_page); - mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); - srcu_read_unlock(&vcpu->kvm->srcu, idx); -} - static void update_cr8_intercept(struct kvm_vcpu *vcpu) { int max_irr, tpr; @@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) struct kvm *kvm = vcpu->kvm; vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - r = vapic_enter(vcpu); - if (r) { - srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - return r; - } r = 1; while (r > 0) { @@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); - vapic_exit(vcpu); - return r; }
CWE-20
null
null
23,135
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) { vcpu->arch.apic->vapic_addr = vapic_addr; if (vapic_addr) __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); else __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); }
DoS
0
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) { vcpu->arch.apic->vapic_addr = vapic_addr; if (vapic_addr) __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); else __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention); }
@@ -841,7 +841,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic) ASSERT(apic != NULL); /* if initial count is 0, current count should also be 0 */ - if (kvm_apic_get_reg(apic, APIC_TMICT) == 0) + if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 || + apic->lapic_timer.period == 0) return 0; remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
CWE-189
null
null
23,136
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) { u32 data; void *vapic; if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; vapic = kmap_atomic(vcpu->arch.apic->vapic_page); data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); kunmap_atomic(vapic); apic_set_tpr(vcpu->arch.apic, data & 0xff); }
DoS
0
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) { u32 data; void *vapic; if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention)) apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic); if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; vapic = kmap_atomic(vcpu->arch.apic->vapic_page); data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); kunmap_atomic(vapic); apic_set_tpr(vcpu->arch.apic, data & 0xff); }
@@ -841,7 +841,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic) ASSERT(apic != NULL); /* if initial count is 0, current count should also be 0 */ - if (kvm_apic_get_reg(apic, APIC_TMICT) == 0) + if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 || + apic->lapic_timer.period == 0) return 0; remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
CWE-189
null
null
23,137
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) { u32 data, tpr; int max_irr, max_isr; struct kvm_lapic *apic = vcpu->arch.apic; void *vapic; apic_sync_pv_eoi_to_guest(vcpu, apic); if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff; max_irr = apic_find_highest_irr(apic); if (max_irr < 0) max_irr = 0; max_isr = apic_find_highest_isr(apic); if (max_isr < 0) max_isr = 0; data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); vapic = kmap_atomic(vcpu->arch.apic->vapic_page); *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; kunmap_atomic(vapic); }
DoS
0
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) { u32 data, tpr; int max_irr, max_isr; struct kvm_lapic *apic = vcpu->arch.apic; void *vapic; apic_sync_pv_eoi_to_guest(vcpu, apic); if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention)) return; tpr = kvm_apic_get_reg(apic, APIC_TASKPRI) & 0xff; max_irr = apic_find_highest_irr(apic); if (max_irr < 0) max_irr = 0; max_isr = apic_find_highest_isr(apic); if (max_isr < 0) max_isr = 0; data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); vapic = kmap_atomic(vcpu->arch.apic->vapic_page); *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; kunmap_atomic(vapic); }
@@ -841,7 +841,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic) ASSERT(apic != NULL); /* if initial count is 0, current count should also be 0 */ - if (kvm_apic_get_reg(apic, APIC_TMICT) == 0) + if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 || + apic->lapic_timer.period == 0) return 0; remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
CWE-189
null
null
23,138
static int arm_init(void) { int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); return rc; }
DoS
0
static int arm_init(void) { int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); return rc; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,139
static void check_kvm_target_cpu(void *ret) { *(int *)ret = kvm_target_cpu(); }
DoS
0
static void check_kvm_target_cpu(void *ret) { *(int *)ret = kvm_target_cpu(); }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,140
static void cpu_init_hyp_mode(void *dummy) { unsigned long long boot_pgd_ptr; unsigned long long pgd_ptr; unsigned long hyp_stack_ptr; unsigned long stack_page; unsigned long vector_ptr; /* Switch from the HYP stub to our own HYP init vector */ __hyp_set_vectors(kvm_get_idmap_vector()); boot_pgd_ptr = (unsigned long long)kvm_mmu_get_boot_httbr(); pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); hyp_stack_ptr = stack_page + PAGE_SIZE; vector_ptr = (unsigned long)__kvm_hyp_vector; __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); }
DoS
0
static void cpu_init_hyp_mode(void *dummy) { unsigned long long boot_pgd_ptr; unsigned long long pgd_ptr; unsigned long hyp_stack_ptr; unsigned long stack_page; unsigned long vector_ptr; /* Switch from the HYP stub to our own HYP init vector */ __hyp_set_vectors(kvm_get_idmap_vector()); boot_pgd_ptr = (unsigned long long)kvm_mmu_get_boot_httbr(); pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); hyp_stack_ptr = stack_page + PAGE_SIZE; vector_ptr = (unsigned long)__kvm_hyp_vector; __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,141
static void exit_vm_noop(void *info) { }
DoS
0
static void exit_vm_noop(void *info) { }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,142
void force_vm_exit(const cpumask_t *mask) { smp_call_function_many(mask, exit_vm_noop, NULL, true); }
DoS
0
void force_vm_exit(const cpumask_t *mask) { smp_call_function_many(mask, exit_vm_noop, NULL, true); }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,143
static int hyp_init_cpu_notify(struct notifier_block *self, unsigned long action, void *cpu) { switch (action) { case CPU_STARTING: case CPU_STARTING_FROZEN: cpu_init_hyp_mode(NULL); break; } return NOTIFY_OK; }
DoS
0
static int hyp_init_cpu_notify(struct notifier_block *self, unsigned long action, void *cpu) { switch (action) { case CPU_STARTING: case CPU_STARTING_FROZEN: cpu_init_hyp_mode(NULL); break; } return NOTIFY_OK; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,144
static int init_hyp_mode(void) { int cpu; int err = 0; /* * Allocate Hyp PGD and setup Hyp identity mapping */ err = kvm_mmu_init(); if (err) goto out_err; /* * It is probably enough to obtain the default on one * CPU. It's unlikely to be different on the others. */ hyp_default_vectors = __hyp_get_vectors(); /* * Allocate stack pages for Hypervisor-mode */ for_each_possible_cpu(cpu) { unsigned long stack_page; stack_page = __get_free_page(GFP_KERNEL); if (!stack_page) { err = -ENOMEM; goto out_free_stack_pages; } per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; } /* * Map the Hyp-code called directly from the host */ err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); if (err) { kvm_err("Cannot map world-switch code\n"); goto out_free_mappings; } /* * Map the Hyp stack pages */ for_each_possible_cpu(cpu) { char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE); if (err) { kvm_err("Cannot map hyp stack\n"); goto out_free_mappings; } } /* * Map the host CPU structures */ kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); if (!kvm_host_cpu_state) { err = -ENOMEM; kvm_err("Cannot allocate host CPU state\n"); goto out_free_mappings; } for_each_possible_cpu(cpu) { kvm_cpu_context_t *cpu_ctxt; cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu); err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1); if (err) { kvm_err("Cannot map host CPU state: %d\n", err); goto out_free_context; } } /* * Execute the init code on each CPU. */ on_each_cpu(cpu_init_hyp_mode, NULL, 1); /* * Init HYP view of VGIC */ err = kvm_vgic_hyp_init(); if (err) goto out_free_context; #ifdef CONFIG_KVM_ARM_VGIC vgic_present = true; #endif /* * Init HYP architected timer support */ err = kvm_timer_hyp_init(); if (err) goto out_free_mappings; #ifndef CONFIG_HOTPLUG_CPU free_boot_hyp_pgd(); #endif kvm_perf_init(); kvm_info("Hyp mode initialized successfully\n"); return 0; out_free_context: free_percpu(kvm_host_cpu_state); out_free_mappings: free_hyp_pgds(); out_free_stack_pages: for_each_possible_cpu(cpu) free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); out_err: kvm_err("error initializing Hyp mode: %d\n", err); return err; }
DoS
0
static int init_hyp_mode(void) { int cpu; int err = 0; /* * Allocate Hyp PGD and setup Hyp identity mapping */ err = kvm_mmu_init(); if (err) goto out_err; /* * It is probably enough to obtain the default on one * CPU. It's unlikely to be different on the others. */ hyp_default_vectors = __hyp_get_vectors(); /* * Allocate stack pages for Hypervisor-mode */ for_each_possible_cpu(cpu) { unsigned long stack_page; stack_page = __get_free_page(GFP_KERNEL); if (!stack_page) { err = -ENOMEM; goto out_free_stack_pages; } per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; } /* * Map the Hyp-code called directly from the host */ err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); if (err) { kvm_err("Cannot map world-switch code\n"); goto out_free_mappings; } /* * Map the Hyp stack pages */ for_each_possible_cpu(cpu) { char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE); if (err) { kvm_err("Cannot map hyp stack\n"); goto out_free_mappings; } } /* * Map the host CPU structures */ kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); if (!kvm_host_cpu_state) { err = -ENOMEM; kvm_err("Cannot allocate host CPU state\n"); goto out_free_mappings; } for_each_possible_cpu(cpu) { kvm_cpu_context_t *cpu_ctxt; cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu); err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1); if (err) { kvm_err("Cannot map host CPU state: %d\n", err); goto out_free_context; } } /* * Execute the init code on each CPU. */ on_each_cpu(cpu_init_hyp_mode, NULL, 1); /* * Init HYP view of VGIC */ err = kvm_vgic_hyp_init(); if (err) goto out_free_context; #ifdef CONFIG_KVM_ARM_VGIC vgic_present = true; #endif /* * Init HYP architected timer support */ err = kvm_timer_hyp_init(); if (err) goto out_free_mappings; #ifndef CONFIG_HOTPLUG_CPU free_boot_hyp_pgd(); #endif kvm_perf_init(); kvm_info("Hyp mode initialized successfully\n"); return 0; out_free_context: free_percpu(kvm_host_cpu_state); out_free_mappings: free_hyp_pgds(); out_free_stack_pages: for_each_possible_cpu(cpu) free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); out_err: kvm_err("error initializing Hyp mode: %d\n", err); return err; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,145
void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, enum kvm_mr_change change) { }
DoS
0
void kvm_arch_commit_memory_region(struct kvm *kvm, struct kvm_userspace_memory_region *mem, const struct kvm_memory_slot *old, enum kvm_mr_change change) { }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,146
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) { return 0; }
DoS
0
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) { return 0; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,147
void kvm_arch_destroy_vm(struct kvm *kvm) { int i; kvm_free_stage2_pgd(kvm); for (i = 0; i < KVM_MAX_VCPUS; ++i) { if (kvm->vcpus[i]) { kvm_arch_vcpu_free(kvm->vcpus[i]); kvm->vcpus[i] = NULL; } } }
DoS
0
void kvm_arch_destroy_vm(struct kvm *kvm) { int i; kvm_free_stage2_pgd(kvm); for (i = 0; i < KVM_MAX_VCPUS; ++i) { if (kvm->vcpus[i]) { kvm_arch_vcpu_free(kvm->vcpus[i]); kvm->vcpus[i] = NULL; } } }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,148
void kvm_arch_exit(void) { kvm_perf_teardown(); }
DoS
0
void kvm_arch_exit(void) { kvm_perf_teardown(); }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,149
void kvm_arch_flush_shadow_all(struct kvm *kvm) { }
DoS
0
void kvm_arch_flush_shadow_all(struct kvm *kvm) { }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,150
void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { }
DoS
0
void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) { }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,151
void kvm_arch_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { }
DoS
0
void kvm_arch_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont) { }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,152
void kvm_arch_hardware_disable(void *garbage) { }
DoS
0
void kvm_arch_hardware_disable(void *garbage) { }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,153
int kvm_arch_hardware_enable(void *garbage) { return 0; }
DoS
0
int kvm_arch_hardware_enable(void *garbage) { return 0; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,154
int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { return 0; }
DoS
0
int kvm_arch_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, struct kvm_userspace_memory_region *mem, enum kvm_mr_change change) { return 0; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,155
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { int err; struct kvm_vcpu *vcpu; vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); if (!vcpu) { err = -ENOMEM; goto out; } err = kvm_vcpu_init(vcpu, kvm, id); if (err) goto free_vcpu; err = create_hyp_mappings(vcpu, vcpu + 1); if (err) goto vcpu_uninit; return vcpu; vcpu_uninit: kvm_vcpu_uninit(vcpu); free_vcpu: kmem_cache_free(kvm_vcpu_cache, vcpu); out: return ERR_PTR(err); }
DoS
0
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) { int err; struct kvm_vcpu *vcpu; vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); if (!vcpu) { err = -ENOMEM; goto out; } err = kvm_vcpu_init(vcpu, kvm, id); if (err) goto free_vcpu; err = create_hyp_mappings(vcpu, vcpu + 1); if (err) goto vcpu_uninit; return vcpu; vcpu_uninit: kvm_vcpu_uninit(vcpu); free_vcpu: kmem_cache_free(kvm_vcpu_cache, vcpu); out: return ERR_PTR(err); }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,156
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { kvm_mmu_free_memory_caches(vcpu); kvm_timer_vcpu_terminate(vcpu); kmem_cache_free(kvm_vcpu_cache, vcpu); }
DoS
0
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) { kvm_mmu_free_memory_caches(vcpu); kvm_timer_vcpu_terminate(vcpu); kmem_cache_free(kvm_vcpu_cache, vcpu); }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,157
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { int ret; /* Force users to call KVM_ARM_VCPU_INIT */ vcpu->arch.target = -1; /* Set up VGIC */ ret = kvm_vgic_vcpu_init(vcpu); if (ret) return ret; /* Set up the timer */ kvm_timer_vcpu_init(vcpu); return 0; }
DoS
0
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { int ret; /* Force users to call KVM_ARM_VCPU_INIT */ vcpu->arch.target = -1; /* Set up VGIC */ ret = kvm_vgic_vcpu_init(vcpu); if (ret) return ret; /* Set up the timer */ kvm_timer_vcpu_init(vcpu); return 0; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,158
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -EINVAL; }
DoS
0
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -EINVAL; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,159
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -EINVAL; }
DoS
0
int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, struct kvm_mp_state *mp_state) { return -EINVAL; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,160
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { vcpu->cpu = cpu; vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); /* * Check whether this vcpu requires the cache to be flushed on * this physical CPU. This is a consequence of doing dcache * operations by set/way on this vcpu. We do it here to be in * a non-preemptible section. */ if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ kvm_arm_set_running_vcpu(vcpu); }
DoS
0
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { vcpu->cpu = cpu; vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); /* * Check whether this vcpu requires the cache to be flushed on * this physical CPU. This is a consequence of doing dcache * operations by set/way on this vcpu. We do it here to be in * a non-preemptible section. */ if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ kvm_arm_set_running_vcpu(vcpu); }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,161
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { return 0; }
DoS
0
int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { return 0; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,162
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_arm_set_running_vcpu(NULL); }
DoS
0
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { kvm_arm_set_running_vcpu(NULL); }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,163
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) { return !!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v); }
DoS
0
int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) { return !!v->arch.irq_lines || kvm_vgic_vcpu_pending_irq(v); }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,164
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { }
DoS
0
void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) { }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,165
long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; switch (ioctl) { case KVM_CREATE_IRQCHIP: { if (vgic_present) return kvm_vgic_create(kvm); else return -ENXIO; } case KVM_ARM_SET_DEVICE_ADDR: { struct kvm_arm_device_addr dev_addr; if (copy_from_user(&dev_addr, argp, sizeof(dev_addr))) return -EFAULT; return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); } default: return -EINVAL; } }
DoS
0
long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm *kvm = filp->private_data; void __user *argp = (void __user *)arg; switch (ioctl) { case KVM_CREATE_IRQCHIP: { if (vgic_present) return kvm_vgic_create(kvm); else return -ENXIO; } case KVM_ARM_SET_DEVICE_ADDR: { struct kvm_arm_device_addr dev_addr; if (copy_from_user(&dev_addr, argp, sizeof(dev_addr))) return -EFAULT; return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); } default: return -EINVAL; } }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,166
struct kvm_vcpu *kvm_arm_get_running_vcpu(void) { BUG_ON(preemptible()); return __get_cpu_var(kvm_arm_running_vcpu); }
DoS
0
struct kvm_vcpu *kvm_arm_get_running_vcpu(void) { BUG_ON(preemptible()); return __get_cpu_var(kvm_arm_running_vcpu); }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,167
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { return 0; }
DoS
0
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { return 0; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,168
int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: r = vgic_present; break; case KVM_CAP_USER_MEMORY: case KVM_CAP_SYNC_MMU: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: case KVM_CAP_ONE_REG: case KVM_CAP_ARM_PSCI: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_ARM_SET_DEVICE_ADDR: r = 1; break; case KVM_CAP_NR_VCPUS: r = num_online_cpus(); break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; default: r = kvm_arch_dev_ioctl_check_extension(ext); break; } return r; }
DoS
0
int kvm_dev_ioctl_check_extension(long ext) { int r; switch (ext) { case KVM_CAP_IRQCHIP: r = vgic_present; break; case KVM_CAP_USER_MEMORY: case KVM_CAP_SYNC_MMU: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: case KVM_CAP_ONE_REG: case KVM_CAP_ARM_PSCI: r = 1; break; case KVM_CAP_COALESCED_MMIO: r = KVM_COALESCED_MMIO_PAGE_OFFSET; break; case KVM_CAP_ARM_SET_DEVICE_ADDR: r = 1; break; case KVM_CAP_NR_VCPUS: r = num_online_cpus(); break; case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; default: r = kvm_arch_dev_ioctl_check_extension(ext); break; } return r; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,169
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void) { return &kvm_arm_running_vcpu; }
DoS
0
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void) { return &kvm_arm_running_vcpu; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,170
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) { if (likely(vcpu->arch.has_run_once)) return 0; vcpu->arch.has_run_once = true; /* * Initialize the VGIC before running a vcpu the first time on * this VM. */ if (irqchip_in_kernel(vcpu->kvm) && unlikely(!vgic_initialized(vcpu->kvm))) { int ret = kvm_vgic_init(vcpu->kvm); if (ret) return ret; } /* * Handle the "start in power-off" case by calling into the * PSCI code. */ if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) { *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF; kvm_psci_call(vcpu); } return 0; }
DoS
0
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) { if (likely(vcpu->arch.has_run_once)) return 0; vcpu->arch.has_run_once = true; /* * Initialize the VGIC before running a vcpu the first time on * this VM. */ if (irqchip_in_kernel(vcpu->kvm) && unlikely(!vgic_initialized(vcpu->kvm))) { int ret = kvm_vgic_init(vcpu->kvm); if (ret) return ret; } /* * Handle the "start in power-off" case by calling into the * PSCI code. */ if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) { *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF; kvm_psci_call(vcpu); } return 0; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,171
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { return -EINVAL; }
DoS
0
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { return -EINVAL; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,172
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status) { u32 irq = irq_level->irq; unsigned int irq_type, vcpu_idx, irq_num; int nrcpus = atomic_read(&kvm->online_vcpus); struct kvm_vcpu *vcpu = NULL; bool level = irq_level->level; irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); switch (irq_type) { case KVM_ARM_IRQ_TYPE_CPU: if (irqchip_in_kernel(kvm)) return -ENXIO; if (vcpu_idx >= nrcpus) return -EINVAL; vcpu = kvm_get_vcpu(kvm, vcpu_idx); if (!vcpu) return -EINVAL; if (irq_num > KVM_ARM_IRQ_CPU_FIQ) return -EINVAL; return vcpu_interrupt_line(vcpu, irq_num, level); case KVM_ARM_IRQ_TYPE_PPI: if (!irqchip_in_kernel(kvm)) return -ENXIO; if (vcpu_idx >= nrcpus) return -EINVAL; vcpu = kvm_get_vcpu(kvm, vcpu_idx); if (!vcpu) return -EINVAL; if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) return -EINVAL; return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level); case KVM_ARM_IRQ_TYPE_SPI: if (!irqchip_in_kernel(kvm)) return -ENXIO; if (irq_num < VGIC_NR_PRIVATE_IRQS || irq_num > KVM_ARM_IRQ_GIC_MAX) return -EINVAL; return kvm_vgic_inject_irq(kvm, 0, irq_num, level); } return -EINVAL; }
DoS
0
int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status) { u32 irq = irq_level->irq; unsigned int irq_type, vcpu_idx, irq_num; int nrcpus = atomic_read(&kvm->online_vcpus); struct kvm_vcpu *vcpu = NULL; bool level = irq_level->level; irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); switch (irq_type) { case KVM_ARM_IRQ_TYPE_CPU: if (irqchip_in_kernel(kvm)) return -ENXIO; if (vcpu_idx >= nrcpus) return -EINVAL; vcpu = kvm_get_vcpu(kvm, vcpu_idx); if (!vcpu) return -EINVAL; if (irq_num > KVM_ARM_IRQ_CPU_FIQ) return -EINVAL; return vcpu_interrupt_line(vcpu, irq_num, level); case KVM_ARM_IRQ_TYPE_PPI: if (!irqchip_in_kernel(kvm)) return -ENXIO; if (vcpu_idx >= nrcpus) return -EINVAL; vcpu = kvm_get_vcpu(kvm, vcpu_idx); if (!vcpu) return -EINVAL; if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) return -EINVAL; return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level); case KVM_ARM_IRQ_TYPE_SPI: if (!irqchip_in_kernel(kvm)) return -ENXIO; if (irq_num < VGIC_NR_PRIVATE_IRQS || irq_num > KVM_ARM_IRQ_GIC_MAX) return -EINVAL; return kvm_vgic_inject_irq(kvm, 0, irq_num, level); } return -EINVAL; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,173
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr) { unsigned long dev_id, type; dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >> KVM_ARM_DEVICE_ID_SHIFT; type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >> KVM_ARM_DEVICE_TYPE_SHIFT; switch (dev_id) { case KVM_ARM_DEVICE_VGIC_V2: if (!vgic_present) return -ENXIO; return kvm_vgic_set_addr(kvm, type, dev_addr->addr); default: return -ENODEV; } }
DoS
0
static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr) { unsigned long dev_id, type; dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >> KVM_ARM_DEVICE_ID_SHIFT; type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >> KVM_ARM_DEVICE_TYPE_SHIFT; switch (dev_id) { case KVM_ARM_DEVICE_VGIC_V2: if (!vgic_present) return -ENXIO; return kvm_vgic_set_addr(kvm, type, dev_addr->addr); default: return -ENODEV; } }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,174
static bool need_new_vmid_gen(struct kvm *kvm) { return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); }
DoS
0
static bool need_new_vmid_gen(struct kvm *kvm) { return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,175
static void update_vttbr(struct kvm *kvm) { phys_addr_t pgd_phys; u64 vmid; if (!need_new_vmid_gen(kvm)) return; spin_lock(&kvm_vmid_lock); /* * We need to re-check the vmid_gen here to ensure that if another vcpu * already allocated a valid vmid for this vm, then this vcpu should * use the same vmid. */ if (!need_new_vmid_gen(kvm)) { spin_unlock(&kvm_vmid_lock); return; } /* First user of a new VMID generation? */ if (unlikely(kvm_next_vmid == 0)) { atomic64_inc(&kvm_vmid_gen); kvm_next_vmid = 1; /* * On SMP we know no other CPUs can use this CPU's or each * other's VMID after force_vm_exit returns since the * kvm_vmid_lock blocks them from reentry to the guest. */ force_vm_exit(cpu_all_mask); /* * Now broadcast TLB + ICACHE invalidation over the inner * shareable domain to make sure all data structures are * clean. */ kvm_call_hyp(__kvm_flush_vm_context); } kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); kvm->arch.vmid = kvm_next_vmid; kvm_next_vmid++; /* update vttbr to be used with the new vmid */ pgd_phys = virt_to_phys(kvm->arch.pgd); vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK; kvm->arch.vttbr |= vmid; spin_unlock(&kvm_vmid_lock); }
DoS
0
static void update_vttbr(struct kvm *kvm) { phys_addr_t pgd_phys; u64 vmid; if (!need_new_vmid_gen(kvm)) return; spin_lock(&kvm_vmid_lock); /* * We need to re-check the vmid_gen here to ensure that if another vcpu * already allocated a valid vmid for this vm, then this vcpu should * use the same vmid. */ if (!need_new_vmid_gen(kvm)) { spin_unlock(&kvm_vmid_lock); return; } /* First user of a new VMID generation? */ if (unlikely(kvm_next_vmid == 0)) { atomic64_inc(&kvm_vmid_gen); kvm_next_vmid = 1; /* * On SMP we know no other CPUs can use this CPU's or each * other's VMID after force_vm_exit returns since the * kvm_vmid_lock blocks them from reentry to the guest. */ force_vm_exit(cpu_all_mask); /* * Now broadcast TLB + ICACHE invalidation over the inner * shareable domain to make sure all data structures are * clean. */ kvm_call_hyp(__kvm_flush_vm_context); } kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); kvm->arch.vmid = kvm_next_vmid; kvm_next_vmid++; /* update vttbr to be used with the new vmid */ pgd_phys = virt_to_phys(kvm->arch.pgd); vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK; kvm->arch.vttbr |= vmid; spin_unlock(&kvm_vmid_lock); }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,176
static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) { int bit_index; bool set; unsigned long *ptr; if (number == KVM_ARM_IRQ_CPU_IRQ) bit_index = __ffs(HCR_VI); else /* KVM_ARM_IRQ_CPU_FIQ */ bit_index = __ffs(HCR_VF); ptr = (unsigned long *)&vcpu->arch.irq_lines; if (level) set = test_and_set_bit(bit_index, ptr); else set = test_and_clear_bit(bit_index, ptr); /* * If we didn't change anything, no need to wake up or kick other CPUs */ if (set == level) return 0; /* * The vcpu irq_lines field was updated, wake up sleeping VCPUs and * trigger a world-switch round on the running physical CPU to set the * virtual IRQ/FIQ fields in the HCR appropriately. */ kvm_vcpu_kick(vcpu); return 0; }
DoS
0
static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) { int bit_index; bool set; unsigned long *ptr; if (number == KVM_ARM_IRQ_CPU_IRQ) bit_index = __ffs(HCR_VI); else /* KVM_ARM_IRQ_CPU_FIQ */ bit_index = __ffs(HCR_VF); ptr = (unsigned long *)&vcpu->arch.irq_lines; if (level) set = test_and_set_bit(bit_index, ptr); else set = test_and_clear_bit(bit_index, ptr); /* * If we didn't change anything, no need to wake up or kick other CPUs */ if (set == level) return 0; /* * The vcpu irq_lines field was updated, wake up sleeping VCPUs and * trigger a world-switch round on the running physical CPU to set the * virtual IRQ/FIQ fields in the HCR appropriately. */ kvm_vcpu_kick(vcpu); return 0; }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,177
static void vcpu_pause(struct kvm_vcpu *vcpu) { wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); wait_event_interruptible(*wq, !vcpu->arch.pause); }
DoS
0
static void vcpu_pause(struct kvm_vcpu *vcpu) { wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); wait_event_interruptible(*wq, !vcpu->arch.pause); }
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) wait_event_interruptible(*wq, !vcpu->arch.pause); } +static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.target >= 0; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) int ret; sigset_t sigsaved; - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) + if (unlikely(!kvm_vcpu_initialized(vcpu))) return -ENOEXEC; ret = kvm_vcpu_first_run_init(vcpu); @@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, case KVM_SET_ONE_REG: case KVM_GET_ONE_REG: { struct kvm_one_reg reg; + + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg, argp, sizeof(reg))) return -EFAULT; if (ioctl == KVM_SET_ONE_REG) @@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, struct kvm_reg_list reg_list; unsigned n; + if (unlikely(!kvm_vcpu_initialized(vcpu))) + return -ENOEXEC; + if (copy_from_user(&reg_list, user_list, sizeof(reg_list))) return -EFAULT; n = reg_list.n;
CWE-399
null
null
23,178
static int ssl3_prf( unsigned char *secret, size_t slen, char *label, unsigned char *random, size_t rlen, unsigned char *dstbuf, size_t dlen ) { size_t i; md5_context md5; sha1_context sha1; unsigned char padding[16]; unsigned char sha1sum[20]; ((void)label); /* * SSLv3: * block = * MD5( secret + SHA1( 'A' + secret + random ) ) + * MD5( secret + SHA1( 'BB' + secret + random ) ) + * MD5( secret + SHA1( 'CCC' + secret + random ) ) + * ... */ for( i = 0; i < dlen / 16; i++ ) { memset( padding, 'A' + i, 1 + i ); sha1_starts( &sha1 ); sha1_update( &sha1, padding, 1 + i ); sha1_update( &sha1, secret, slen ); sha1_update( &sha1, random, rlen ); sha1_finish( &sha1, sha1sum ); md5_starts( &md5 ); md5_update( &md5, secret, slen ); md5_update( &md5, sha1sum, 20 ); md5_finish( &md5, dstbuf + i * 16 ); } memset( &md5, 0, sizeof( md5 ) ); memset( &sha1, 0, sizeof( sha1 ) ); memset( padding, 0, sizeof( padding ) ); memset( sha1sum, 0, sizeof( sha1sum ) ); return( 0 ); }
DoS
0
static int ssl3_prf( unsigned char *secret, size_t slen, char *label, unsigned char *random, size_t rlen, unsigned char *dstbuf, size_t dlen ) { size_t i; md5_context md5; sha1_context sha1; unsigned char padding[16]; unsigned char sha1sum[20]; ((void)label); /* * SSLv3: * block = * MD5( secret + SHA1( 'A' + secret + random ) ) + * MD5( secret + SHA1( 'BB' + secret + random ) ) + * MD5( secret + SHA1( 'CCC' + secret + random ) ) + * ... */ for( i = 0; i < dlen / 16; i++ ) { memset( padding, 'A' + i, 1 + i ); sha1_starts( &sha1 ); sha1_update( &sha1, padding, 1 + i ); sha1_update( &sha1, secret, slen ); sha1_update( &sha1, random, rlen ); sha1_finish( &sha1, sha1sum ); md5_starts( &md5 ); md5_update( &md5, secret, slen ); md5_update( &md5, sha1sum, 20 ); md5_finish( &md5, dstbuf + i * 16 ); } memset( &md5, 0, sizeof( md5 ) ); memset( &sha1, 0, sizeof( sha1 ) ); memset( padding, 0, sizeof( padding ) ); memset( sha1sum, 0, sizeof( sha1sum ) ); return( 0 ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,179
static void ssl_calc_finished_ssl( ssl_context *ssl, unsigned char *buf, int from ) { const char *sender; md5_context md5; sha1_context sha1; unsigned char padbuf[48]; unsigned char md5sum[16]; unsigned char sha1sum[20]; ssl_session *session = ssl->session_negotiate; if( !session ) session = ssl->session; SSL_DEBUG_MSG( 2, ( "=> calc finished ssl" ) ); memcpy( &md5 , &ssl->handshake->fin_md5 , sizeof(md5_context) ); memcpy( &sha1, &ssl->handshake->fin_sha1, sizeof(sha1_context) ); /* * SSLv3: * hash = * MD5( master + pad2 + * MD5( handshake + sender + master + pad1 ) ) * + SHA1( master + pad2 + * SHA1( handshake + sender + master + pad1 ) ) */ SSL_DEBUG_BUF( 4, "finished md5 state", (unsigned char *) md5.state, sizeof( md5.state ) ); SSL_DEBUG_BUF( 4, "finished sha1 state", (unsigned char *) sha1.state, sizeof( sha1.state ) ); sender = ( from == SSL_IS_CLIENT ) ? "CLNT" : "SRVR"; memset( padbuf, 0x36, 48 ); md5_update( &md5, (const unsigned char *) sender, 4 ); md5_update( &md5, session->master, 48 ); md5_update( &md5, padbuf, 48 ); md5_finish( &md5, md5sum ); sha1_update( &sha1, (const unsigned char *) sender, 4 ); sha1_update( &sha1, session->master, 48 ); sha1_update( &sha1, padbuf, 40 ); sha1_finish( &sha1, sha1sum ); memset( padbuf, 0x5C, 48 ); md5_starts( &md5 ); md5_update( &md5, session->master, 48 ); md5_update( &md5, padbuf, 48 ); md5_update( &md5, md5sum, 16 ); md5_finish( &md5, buf ); sha1_starts( &sha1 ); sha1_update( &sha1, session->master, 48 ); sha1_update( &sha1, padbuf , 40 ); sha1_update( &sha1, sha1sum, 20 ); sha1_finish( &sha1, buf + 16 ); SSL_DEBUG_BUF( 3, "calc finished result", buf, 36 ); memset( &md5, 0, sizeof( md5_context ) ); memset( &sha1, 0, sizeof( sha1_context ) ); memset( padbuf, 0, sizeof( padbuf ) ); memset( md5sum, 0, sizeof( md5sum ) ); memset( sha1sum, 0, sizeof( sha1sum ) ); SSL_DEBUG_MSG( 2, ( "<= calc finished" ) ); }
DoS
0
static void ssl_calc_finished_ssl( ssl_context *ssl, unsigned char *buf, int from ) { const char *sender; md5_context md5; sha1_context sha1; unsigned char padbuf[48]; unsigned char md5sum[16]; unsigned char sha1sum[20]; ssl_session *session = ssl->session_negotiate; if( !session ) session = ssl->session; SSL_DEBUG_MSG( 2, ( "=> calc finished ssl" ) ); memcpy( &md5 , &ssl->handshake->fin_md5 , sizeof(md5_context) ); memcpy( &sha1, &ssl->handshake->fin_sha1, sizeof(sha1_context) ); /* * SSLv3: * hash = * MD5( master + pad2 + * MD5( handshake + sender + master + pad1 ) ) * + SHA1( master + pad2 + * SHA1( handshake + sender + master + pad1 ) ) */ SSL_DEBUG_BUF( 4, "finished md5 state", (unsigned char *) md5.state, sizeof( md5.state ) ); SSL_DEBUG_BUF( 4, "finished sha1 state", (unsigned char *) sha1.state, sizeof( sha1.state ) ); sender = ( from == SSL_IS_CLIENT ) ? "CLNT" : "SRVR"; memset( padbuf, 0x36, 48 ); md5_update( &md5, (const unsigned char *) sender, 4 ); md5_update( &md5, session->master, 48 ); md5_update( &md5, padbuf, 48 ); md5_finish( &md5, md5sum ); sha1_update( &sha1, (const unsigned char *) sender, 4 ); sha1_update( &sha1, session->master, 48 ); sha1_update( &sha1, padbuf, 40 ); sha1_finish( &sha1, sha1sum ); memset( padbuf, 0x5C, 48 ); md5_starts( &md5 ); md5_update( &md5, session->master, 48 ); md5_update( &md5, padbuf, 48 ); md5_update( &md5, md5sum, 16 ); md5_finish( &md5, buf ); sha1_starts( &sha1 ); sha1_update( &sha1, session->master, 48 ); sha1_update( &sha1, padbuf , 40 ); sha1_update( &sha1, sha1sum, 20 ); sha1_finish( &sha1, buf + 16 ); SSL_DEBUG_BUF( 3, "calc finished result", buf, 36 ); memset( &md5, 0, sizeof( md5_context ) ); memset( &sha1, 0, sizeof( sha1_context ) ); memset( padbuf, 0, sizeof( padbuf ) ); memset( md5sum, 0, sizeof( md5sum ) ); memset( sha1sum, 0, sizeof( sha1sum ) ); SSL_DEBUG_MSG( 2, ( "<= calc finished" ) ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,180
static void ssl_calc_finished_tls_sha256( ssl_context *ssl, unsigned char *buf, int from ) { int len = 12; const char *sender; sha2_context sha2; unsigned char padbuf[32]; ssl_session *session = ssl->session_negotiate; if( !session ) session = ssl->session; SSL_DEBUG_MSG( 2, ( "=> calc finished tls sha256" ) ); memcpy( &sha2, &ssl->handshake->fin_sha2, sizeof(sha2_context) ); /* * TLSv1.2: * hash = PRF( master, finished_label, * Hash( handshake ) )[0.11] */ SSL_DEBUG_BUF( 4, "finished sha2 state", (unsigned char *) sha2.state, sizeof( sha2.state ) ); sender = ( from == SSL_IS_CLIENT ) ? "client finished" : "server finished"; sha2_finish( &sha2, padbuf ); ssl->handshake->tls_prf( session->master, 48, (char *) sender, padbuf, 32, buf, len ); SSL_DEBUG_BUF( 3, "calc finished result", buf, len ); memset( &sha2, 0, sizeof( sha2_context ) ); memset( padbuf, 0, sizeof( padbuf ) ); SSL_DEBUG_MSG( 2, ( "<= calc finished" ) ); }
DoS
0
static void ssl_calc_finished_tls_sha256( ssl_context *ssl, unsigned char *buf, int from ) { int len = 12; const char *sender; sha2_context sha2; unsigned char padbuf[32]; ssl_session *session = ssl->session_negotiate; if( !session ) session = ssl->session; SSL_DEBUG_MSG( 2, ( "=> calc finished tls sha256" ) ); memcpy( &sha2, &ssl->handshake->fin_sha2, sizeof(sha2_context) ); /* * TLSv1.2: * hash = PRF( master, finished_label, * Hash( handshake ) )[0.11] */ SSL_DEBUG_BUF( 4, "finished sha2 state", (unsigned char *) sha2.state, sizeof( sha2.state ) ); sender = ( from == SSL_IS_CLIENT ) ? "client finished" : "server finished"; sha2_finish( &sha2, padbuf ); ssl->handshake->tls_prf( session->master, 48, (char *) sender, padbuf, 32, buf, len ); SSL_DEBUG_BUF( 3, "calc finished result", buf, len ); memset( &sha2, 0, sizeof( sha2_context ) ); memset( padbuf, 0, sizeof( padbuf ) ); SSL_DEBUG_MSG( 2, ( "<= calc finished" ) ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,181
static void ssl_calc_finished_tls_sha384( ssl_context *ssl, unsigned char *buf, int from ) { int len = 12; const char *sender; sha4_context sha4; unsigned char padbuf[48]; ssl_session *session = ssl->session_negotiate; if( !session ) session = ssl->session; SSL_DEBUG_MSG( 2, ( "=> calc finished tls sha384" ) ); memcpy( &sha4, &ssl->handshake->fin_sha4, sizeof(sha4_context) ); /* * TLSv1.2: * hash = PRF( master, finished_label, * Hash( handshake ) )[0.11] */ SSL_DEBUG_BUF( 4, "finished sha4 state", (unsigned char *) sha4.state, sizeof( sha4.state ) ); sender = ( from == SSL_IS_CLIENT ) ? "client finished" : "server finished"; sha4_finish( &sha4, padbuf ); ssl->handshake->tls_prf( session->master, 48, (char *) sender, padbuf, 48, buf, len ); SSL_DEBUG_BUF( 3, "calc finished result", buf, len ); memset( &sha4, 0, sizeof( sha4_context ) ); memset( padbuf, 0, sizeof( padbuf ) ); SSL_DEBUG_MSG( 2, ( "<= calc finished" ) ); }
DoS
0
static void ssl_calc_finished_tls_sha384( ssl_context *ssl, unsigned char *buf, int from ) { int len = 12; const char *sender; sha4_context sha4; unsigned char padbuf[48]; ssl_session *session = ssl->session_negotiate; if( !session ) session = ssl->session; SSL_DEBUG_MSG( 2, ( "=> calc finished tls sha384" ) ); memcpy( &sha4, &ssl->handshake->fin_sha4, sizeof(sha4_context) ); /* * TLSv1.2: * hash = PRF( master, finished_label, * Hash( handshake ) )[0.11] */ SSL_DEBUG_BUF( 4, "finished sha4 state", (unsigned char *) sha4.state, sizeof( sha4.state ) ); sender = ( from == SSL_IS_CLIENT ) ? "client finished" : "server finished"; sha4_finish( &sha4, padbuf ); ssl->handshake->tls_prf( session->master, 48, (char *) sender, padbuf, 48, buf, len ); SSL_DEBUG_BUF( 3, "calc finished result", buf, len ); memset( &sha4, 0, sizeof( sha4_context ) ); memset( padbuf, 0, sizeof( padbuf ) ); SSL_DEBUG_MSG( 2, ( "<= calc finished" ) ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,182
void ssl_calc_verify_ssl( ssl_context *ssl, unsigned char hash[36] ) { md5_context md5; sha1_context sha1; unsigned char pad_1[48]; unsigned char pad_2[48]; SSL_DEBUG_MSG( 2, ( "=> calc verify ssl" ) ); memcpy( &md5 , &ssl->handshake->fin_md5 , sizeof(md5_context) ); memcpy( &sha1, &ssl->handshake->fin_sha1, sizeof(sha1_context) ); memset( pad_1, 0x36, 48 ); memset( pad_2, 0x5C, 48 ); md5_update( &md5, ssl->session_negotiate->master, 48 ); md5_update( &md5, pad_1, 48 ); md5_finish( &md5, hash ); md5_starts( &md5 ); md5_update( &md5, ssl->session_negotiate->master, 48 ); md5_update( &md5, pad_2, 48 ); md5_update( &md5, hash, 16 ); md5_finish( &md5, hash ); sha1_update( &sha1, ssl->session_negotiate->master, 48 ); sha1_update( &sha1, pad_1, 40 ); sha1_finish( &sha1, hash + 16 ); sha1_starts( &sha1 ); sha1_update( &sha1, ssl->session_negotiate->master, 48 ); sha1_update( &sha1, pad_2, 40 ); sha1_update( &sha1, hash + 16, 20 ); sha1_finish( &sha1, hash + 16 ); SSL_DEBUG_BUF( 3, "calculated verify result", hash, 36 ); SSL_DEBUG_MSG( 2, ( "<= calc verify" ) ); return; }
DoS
0
void ssl_calc_verify_ssl( ssl_context *ssl, unsigned char hash[36] ) { md5_context md5; sha1_context sha1; unsigned char pad_1[48]; unsigned char pad_2[48]; SSL_DEBUG_MSG( 2, ( "=> calc verify ssl" ) ); memcpy( &md5 , &ssl->handshake->fin_md5 , sizeof(md5_context) ); memcpy( &sha1, &ssl->handshake->fin_sha1, sizeof(sha1_context) ); memset( pad_1, 0x36, 48 ); memset( pad_2, 0x5C, 48 ); md5_update( &md5, ssl->session_negotiate->master, 48 ); md5_update( &md5, pad_1, 48 ); md5_finish( &md5, hash ); md5_starts( &md5 ); md5_update( &md5, ssl->session_negotiate->master, 48 ); md5_update( &md5, pad_2, 48 ); md5_update( &md5, hash, 16 ); md5_finish( &md5, hash ); sha1_update( &sha1, ssl->session_negotiate->master, 48 ); sha1_update( &sha1, pad_1, 40 ); sha1_finish( &sha1, hash + 16 ); sha1_starts( &sha1 ); sha1_update( &sha1, ssl->session_negotiate->master, 48 ); sha1_update( &sha1, pad_2, 40 ); sha1_update( &sha1, hash + 16, 20 ); sha1_finish( &sha1, hash + 16 ); SSL_DEBUG_BUF( 3, "calculated verify result", hash, 36 ); SSL_DEBUG_MSG( 2, ( "<= calc verify" ) ); return; }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,183
void ssl_calc_verify_tls( ssl_context *ssl, unsigned char hash[36] ) { md5_context md5; sha1_context sha1; SSL_DEBUG_MSG( 2, ( "=> calc verify tls" ) ); memcpy( &md5 , &ssl->handshake->fin_md5 , sizeof(md5_context) ); memcpy( &sha1, &ssl->handshake->fin_sha1, sizeof(sha1_context) ); md5_finish( &md5, hash ); sha1_finish( &sha1, hash + 16 ); SSL_DEBUG_BUF( 3, "calculated verify result", hash, 36 ); SSL_DEBUG_MSG( 2, ( "<= calc verify" ) ); return; }
DoS
0
void ssl_calc_verify_tls( ssl_context *ssl, unsigned char hash[36] ) { md5_context md5; sha1_context sha1; SSL_DEBUG_MSG( 2, ( "=> calc verify tls" ) ); memcpy( &md5 , &ssl->handshake->fin_md5 , sizeof(md5_context) ); memcpy( &sha1, &ssl->handshake->fin_sha1, sizeof(sha1_context) ); md5_finish( &md5, hash ); sha1_finish( &sha1, hash + 16 ); SSL_DEBUG_BUF( 3, "calculated verify result", hash, 36 ); SSL_DEBUG_MSG( 2, ( "<= calc verify" ) ); return; }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,184
void ssl_calc_verify_tls_sha256( ssl_context *ssl, unsigned char hash[32] ) { sha2_context sha2; SSL_DEBUG_MSG( 2, ( "=> calc verify sha256" ) ); memcpy( &sha2, &ssl->handshake->fin_sha2, sizeof(sha2_context) ); sha2_finish( &sha2, hash ); SSL_DEBUG_BUF( 3, "calculated verify result", hash, 32 ); SSL_DEBUG_MSG( 2, ( "<= calc verify" ) ); return; }
DoS
0
void ssl_calc_verify_tls_sha256( ssl_context *ssl, unsigned char hash[32] ) { sha2_context sha2; SSL_DEBUG_MSG( 2, ( "=> calc verify sha256" ) ); memcpy( &sha2, &ssl->handshake->fin_sha2, sizeof(sha2_context) ); sha2_finish( &sha2, hash ); SSL_DEBUG_BUF( 3, "calculated verify result", hash, 32 ); SSL_DEBUG_MSG( 2, ( "<= calc verify" ) ); return; }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,185
int ssl_close_notify( ssl_context *ssl ) { int ret; SSL_DEBUG_MSG( 2, ( "=> write close notify" ) ); if( ( ret = ssl_flush_output( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_flush_output", ret ); return( ret ); } if( ssl->state == SSL_HANDSHAKE_OVER ) { if( ( ret = ssl_send_alert_message( ssl, SSL_ALERT_LEVEL_WARNING, SSL_ALERT_MSG_CLOSE_NOTIFY ) ) != 0 ) { return( ret ); } } SSL_DEBUG_MSG( 2, ( "<= write close notify" ) ); return( ret ); }
DoS
0
int ssl_close_notify( ssl_context *ssl ) { int ret; SSL_DEBUG_MSG( 2, ( "=> write close notify" ) ); if( ( ret = ssl_flush_output( ssl ) ) != 0 ) { SSL_DEBUG_RET( 1, "ssl_flush_output", ret ); return( ret ); } if( ssl->state == SSL_HANDSHAKE_OVER ) { if( ( ret = ssl_send_alert_message( ssl, SSL_ALERT_LEVEL_WARNING, SSL_ALERT_MSG_CLOSE_NOTIFY ) ) != 0 ) { return( ret ); } } SSL_DEBUG_MSG( 2, ( "<= write close notify" ) ); return( ret ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,186
static int ssl_compress_buf( ssl_context *ssl ) { int ret; unsigned char *msg_post = ssl->out_msg; size_t len_pre = ssl->out_msglen; unsigned char *msg_pre; SSL_DEBUG_MSG( 2, ( "=> compress buf" ) ); msg_pre = (unsigned char*) malloc( len_pre ); if( msg_pre == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len_pre ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memcpy( msg_pre, ssl->out_msg, len_pre ); SSL_DEBUG_MSG( 3, ( "before compression: msglen = %d, ", ssl->out_msglen ) ); SSL_DEBUG_BUF( 4, "before compression: output payload", ssl->out_msg, ssl->out_msglen ); ssl->transform_out->ctx_deflate.next_in = msg_pre; ssl->transform_out->ctx_deflate.avail_in = len_pre; ssl->transform_out->ctx_deflate.next_out = msg_post; ssl->transform_out->ctx_deflate.avail_out = SSL_BUFFER_LEN; ret = deflate( &ssl->transform_out->ctx_deflate, Z_SYNC_FLUSH ); if( ret != Z_OK ) { SSL_DEBUG_MSG( 1, ( "failed to perform compression (%d)", ret ) ); return( POLARSSL_ERR_SSL_COMPRESSION_FAILED ); } ssl->out_msglen = SSL_BUFFER_LEN - ssl->transform_out->ctx_deflate.avail_out; free( msg_pre ); SSL_DEBUG_MSG( 3, ( "after compression: msglen = %d, ", ssl->out_msglen ) ); SSL_DEBUG_BUF( 4, "after compression: output payload", ssl->out_msg, ssl->out_msglen ); SSL_DEBUG_MSG( 2, ( "<= compress buf" ) ); return( 0 ); }
DoS
0
static int ssl_compress_buf( ssl_context *ssl ) { int ret; unsigned char *msg_post = ssl->out_msg; size_t len_pre = ssl->out_msglen; unsigned char *msg_pre; SSL_DEBUG_MSG( 2, ( "=> compress buf" ) ); msg_pre = (unsigned char*) malloc( len_pre ); if( msg_pre == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len_pre ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memcpy( msg_pre, ssl->out_msg, len_pre ); SSL_DEBUG_MSG( 3, ( "before compression: msglen = %d, ", ssl->out_msglen ) ); SSL_DEBUG_BUF( 4, "before compression: output payload", ssl->out_msg, ssl->out_msglen ); ssl->transform_out->ctx_deflate.next_in = msg_pre; ssl->transform_out->ctx_deflate.avail_in = len_pre; ssl->transform_out->ctx_deflate.next_out = msg_post; ssl->transform_out->ctx_deflate.avail_out = SSL_BUFFER_LEN; ret = deflate( &ssl->transform_out->ctx_deflate, Z_SYNC_FLUSH ); if( ret != Z_OK ) { SSL_DEBUG_MSG( 1, ( "failed to perform compression (%d)", ret ) ); return( POLARSSL_ERR_SSL_COMPRESSION_FAILED ); } ssl->out_msglen = SSL_BUFFER_LEN - ssl->transform_out->ctx_deflate.avail_out; free( msg_pre ); SSL_DEBUG_MSG( 3, ( "after compression: msglen = %d, ", ssl->out_msglen ) ); SSL_DEBUG_BUF( 4, "after compression: output payload", ssl->out_msg, ssl->out_msglen ); SSL_DEBUG_MSG( 2, ( "<= compress buf" ) ); return( 0 ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,187
static int ssl_decompress_buf( ssl_context *ssl ) { int ret; unsigned char *msg_post = ssl->in_msg; size_t len_pre = ssl->in_msglen; unsigned char *msg_pre; SSL_DEBUG_MSG( 2, ( "=> decompress buf" ) ); msg_pre = (unsigned char*) malloc( len_pre ); if( msg_pre == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len_pre ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memcpy( msg_pre, ssl->in_msg, len_pre ); SSL_DEBUG_MSG( 3, ( "before decompression: msglen = %d, ", ssl->in_msglen ) ); SSL_DEBUG_BUF( 4, "before decompression: input payload", ssl->in_msg, ssl->in_msglen ); ssl->transform_in->ctx_inflate.next_in = msg_pre; ssl->transform_in->ctx_inflate.avail_in = len_pre; ssl->transform_in->ctx_inflate.next_out = msg_post; ssl->transform_in->ctx_inflate.avail_out = SSL_MAX_CONTENT_LEN; ret = inflate( &ssl->transform_in->ctx_inflate, Z_SYNC_FLUSH ); if( ret != Z_OK ) { SSL_DEBUG_MSG( 1, ( "failed to perform decompression (%d)", ret ) ); return( POLARSSL_ERR_SSL_COMPRESSION_FAILED ); } ssl->in_msglen = SSL_MAX_CONTENT_LEN - ssl->transform_in->ctx_inflate.avail_out; free( msg_pre ); SSL_DEBUG_MSG( 3, ( "after decompression: msglen = %d, ", ssl->in_msglen ) ); SSL_DEBUG_BUF( 4, "after decompression: input payload", ssl->in_msg, ssl->in_msglen ); SSL_DEBUG_MSG( 2, ( "<= decompress buf" ) ); return( 0 ); }
DoS
0
static int ssl_decompress_buf( ssl_context *ssl ) { int ret; unsigned char *msg_post = ssl->in_msg; size_t len_pre = ssl->in_msglen; unsigned char *msg_pre; SSL_DEBUG_MSG( 2, ( "=> decompress buf" ) ); msg_pre = (unsigned char*) malloc( len_pre ); if( msg_pre == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len_pre ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memcpy( msg_pre, ssl->in_msg, len_pre ); SSL_DEBUG_MSG( 3, ( "before decompression: msglen = %d, ", ssl->in_msglen ) ); SSL_DEBUG_BUF( 4, "before decompression: input payload", ssl->in_msg, ssl->in_msglen ); ssl->transform_in->ctx_inflate.next_in = msg_pre; ssl->transform_in->ctx_inflate.avail_in = len_pre; ssl->transform_in->ctx_inflate.next_out = msg_post; ssl->transform_in->ctx_inflate.avail_out = SSL_MAX_CONTENT_LEN; ret = inflate( &ssl->transform_in->ctx_inflate, Z_SYNC_FLUSH ); if( ret != Z_OK ) { SSL_DEBUG_MSG( 1, ( "failed to perform decompression (%d)", ret ) ); return( POLARSSL_ERR_SSL_COMPRESSION_FAILED ); } ssl->in_msglen = SSL_MAX_CONTENT_LEN - ssl->transform_in->ctx_inflate.avail_out; free( msg_pre ); SSL_DEBUG_MSG( 3, ( "after decompression: msglen = %d, ", ssl->in_msglen ) ); SSL_DEBUG_BUF( 4, "after decompression: input payload", ssl->in_msg, ssl->in_msglen ); SSL_DEBUG_MSG( 2, ( "<= decompress buf" ) ); return( 0 ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,188
static int ssl_decrypt_buf( ssl_context *ssl ) { size_t i, padlen = 0, correct = 1; unsigned char tmp[POLARSSL_SSL_MAX_MAC_SIZE]; SSL_DEBUG_MSG( 2, ( "=> decrypt buf" ) ); if( ssl->in_msglen < ssl->transform_in->minlen ) { SSL_DEBUG_MSG( 1, ( "in_msglen (%d) < minlen (%d)", ssl->in_msglen, ssl->transform_in->minlen ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } if( ssl->transform_in->ivlen == 0 ) { #if defined(POLARSSL_ARC4_C) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_RC4_128_MD5 || ssl->session_in->ciphersuite == TLS_RSA_WITH_RC4_128_SHA ) { arc4_crypt( (arc4_context *) ssl->transform_in->ctx_dec, ssl->in_msglen, ssl->in_msg, ssl->in_msg ); } else #endif #if defined(POLARSSL_CIPHER_NULL_CIPHER) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_NULL_MD5 || ssl->session_in->ciphersuite == TLS_RSA_WITH_NULL_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_NULL_SHA256 ) { } else #endif return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } else if( ssl->transform_in->ivlen == 12 ) { unsigned char *dec_msg; unsigned char *dec_msg_result; size_t dec_msglen; unsigned char add_data[13]; int ret = POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE; #if defined(POLARSSL_AES_C) && defined(POLARSSL_GCM_C) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_128_GCM_SHA256 || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_256_GCM_SHA384 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 ) { dec_msglen = ssl->in_msglen - ( ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); dec_msglen -= 16; dec_msg = ssl->in_msg + ( ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); dec_msg_result = ssl->in_msg; ssl->in_msglen = dec_msglen; memcpy( add_data, ssl->in_ctr, 8 ); add_data[8] = ssl->in_msgtype; add_data[9] = ssl->major_ver; add_data[10] = ssl->minor_ver; add_data[11] = ( ssl->in_msglen >> 8 ) & 0xFF; add_data[12] = ssl->in_msglen & 0xFF; SSL_DEBUG_BUF( 4, "additional data used for AEAD", add_data, 13 ); memcpy( ssl->transform_in->iv_dec + ssl->transform_in->fixed_ivlen, ssl->in_msg, ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); SSL_DEBUG_BUF( 4, "IV used", ssl->transform_in->iv_dec, ssl->transform_in->ivlen ); SSL_DEBUG_BUF( 4, "TAG used", dec_msg + dec_msglen, 16 ); memcpy( ssl->transform_in->iv_dec + ssl->transform_in->fixed_ivlen, ssl->in_msg, ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); ret = gcm_auth_decrypt( (gcm_context *) ssl->transform_in->ctx_dec, dec_msglen, ssl->transform_in->iv_dec, ssl->transform_in->ivlen, add_data, 13, dec_msg + dec_msglen, 16, dec_msg, dec_msg_result ); if( ret != 0 ) { SSL_DEBUG_MSG( 1, ( "AEAD decrypt failed on validation (ret = -0x%02x)", -ret ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } } else #endif return( ret ); } else { /* * Decrypt and check the padding */ unsigned char *dec_msg; unsigned char *dec_msg_result; size_t dec_msglen; size_t minlen = 0; /* * Check immediate ciphertext sanity */ if( ssl->in_msglen % ssl->transform_in->ivlen != 0 ) { SSL_DEBUG_MSG( 1, ( "msglen (%d) %% ivlen (%d) != 0", ssl->in_msglen, ssl->transform_in->ivlen ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } if( ssl->minor_ver >= SSL_MINOR_VERSION_2 ) minlen += ssl->transform_in->ivlen; if( ssl->in_msglen < minlen + ssl->transform_in->ivlen || ssl->in_msglen < minlen + ssl->transform_in->maclen + 1 ) { SSL_DEBUG_MSG( 1, ( "msglen (%d) < max( ivlen(%d), maclen (%d) + 1 ) ( + expl IV )", ssl->in_msglen, ssl->transform_in->ivlen, ssl->transform_in->maclen ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } dec_msglen = ssl->in_msglen; dec_msg = ssl->in_msg; dec_msg_result = ssl->in_msg; /* * Initialize for prepended IV for block cipher in TLS v1.1 and up */ if( ssl->minor_ver >= SSL_MINOR_VERSION_2 ) { dec_msg += ssl->transform_in->ivlen; dec_msglen -= ssl->transform_in->ivlen; ssl->in_msglen -= ssl->transform_in->ivlen; for( i = 0; i < ssl->transform_in->ivlen; i++ ) ssl->transform_in->iv_dec[i] = ssl->in_msg[i]; } switch( ssl->transform_in->ivlen ) { #if defined(POLARSSL_DES_C) case 8: #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_DES_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_DES_CBC_SHA ) { des_crypt_cbc( (des_context *) ssl->transform_in->ctx_dec, DES_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); } else #endif des3_crypt_cbc( (des3_context *) ssl->transform_in->ctx_dec, DES_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); break; #endif case 16: #if defined(POLARSSL_AES_C) if ( ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_256_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 ) { aes_crypt_cbc( (aes_context *) ssl->transform_in->ctx_dec, AES_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); break; } #endif #if defined(POLARSSL_CAMELLIA_C) if ( ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 ) { camellia_crypt_cbc( (camellia_context *) ssl->transform_in->ctx_dec, CAMELLIA_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); break; } #endif default: return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } padlen = 1 + ssl->in_msg[ssl->in_msglen - 1]; if( ssl->in_msglen < ssl->transform_in->maclen + padlen ) { #if defined(POLARSSL_SSL_DEBUG_ALL) SSL_DEBUG_MSG( 1, ( "msglen (%d) < maclen (%d) + padlen (%d)", ssl->in_msglen, ssl->transform_in->maclen, padlen ) ); #endif padlen = 0; correct = 0; } if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( padlen > ssl->transform_in->ivlen ) { #if defined(POLARSSL_SSL_DEBUG_ALL) SSL_DEBUG_MSG( 1, ( "bad padding length: is %d, " "should be no more than %d", padlen, ssl->transform_in->ivlen ) ); #endif correct = 0; } } else { /* * TLSv1+: always check the padding up to the first failure * and fake check up to 256 bytes of padding */ size_t pad_count = 0, fake_pad_count = 0; size_t padding_idx = ssl->in_msglen - padlen - 1; for( i = 1; i <= padlen; i++ ) pad_count += ( ssl->in_msg[padding_idx + i] == padlen - 1 ); for( ; i <= 256; i++ ) fake_pad_count += ( ssl->in_msg[padding_idx + i] == padlen - 1 ); correct &= ( pad_count == padlen ); /* Only 1 on correct padding */ correct &= ( pad_count + fake_pad_count < 512 ); /* Always 1 */ #if defined(POLARSSL_SSL_DEBUG_ALL) if( padlen > 0 && correct == 0) SSL_DEBUG_MSG( 1, ( "bad padding byte detected" ) ); #endif padlen &= correct * 0x1FF; } } SSL_DEBUG_BUF( 4, "raw buffer after decryption", ssl->in_msg, ssl->in_msglen ); /* * Always compute the MAC (RFC4346, CBCTIME). */ ssl->in_msglen -= ( ssl->transform_in->maclen + padlen ); ssl->in_hdr[3] = (unsigned char)( ssl->in_msglen >> 8 ); ssl->in_hdr[4] = (unsigned char)( ssl->in_msglen ); memcpy( tmp, ssl->in_msg + ssl->in_msglen, ssl->transform_in->maclen ); if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( ssl->transform_in->maclen == 16 ) ssl_mac_md5( ssl->transform_in->mac_dec, ssl->in_msg, ssl->in_msglen, ssl->in_ctr, ssl->in_msgtype ); else if( ssl->transform_in->maclen == 20 ) ssl_mac_sha1( ssl->transform_in->mac_dec, ssl->in_msg, ssl->in_msglen, ssl->in_ctr, ssl->in_msgtype ); else if( ssl->transform_in->maclen == 32 ) ssl_mac_sha2( ssl->transform_in->mac_dec, ssl->in_msg, ssl->in_msglen, ssl->in_ctr, ssl->in_msgtype ); else if( ssl->transform_in->maclen != 0 ) { SSL_DEBUG_MSG( 1, ( "invalid MAC len: %d", ssl->transform_in->maclen ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } else { /* * Process MAC and always update for padlen afterwards to make * total time independent of padlen * * extra_run compensates MAC check for padlen * * Known timing attacks: * - Lucky Thirteen (http://www.isg.rhul.ac.uk/tls/TLStiming.pdf) * * We use ( ( Lx + 8 ) / 64 ) to handle 'negative Lx' values * correctly. (We round down instead of up, so -56 is the correct * value for our calculations instead of -55) */ int j, extra_run = 0; extra_run = ( 13 + ssl->in_msglen + padlen + 8 ) / 64 - ( 13 + ssl->in_msglen + 8 ) / 64; extra_run &= correct * 0xFF; if( ssl->transform_in->maclen == 16 ) { md5_context ctx; md5_hmac_starts( &ctx, ssl->transform_in->mac_dec, 16 ); md5_hmac_update( &ctx, ssl->in_ctr, ssl->in_msglen + 13 ); md5_hmac_finish( &ctx, ssl->in_msg + ssl->in_msglen ); for( j = 0; j < extra_run; j++ ) md5_process( &ctx, ssl->in_msg ); } else if( ssl->transform_in->maclen == 20 ) { sha1_context ctx; sha1_hmac_starts( &ctx, ssl->transform_in->mac_dec, 20 ); sha1_hmac_update( &ctx, ssl->in_ctr, ssl->in_msglen + 13 ); sha1_hmac_finish( &ctx, ssl->in_msg + ssl->in_msglen ); for( j = 0; j < extra_run; j++ ) sha1_process( &ctx, ssl->in_msg ); } else if( ssl->transform_in->maclen == 32 ) { sha2_context ctx; sha2_hmac_starts( &ctx, ssl->transform_in->mac_dec, 32, 0 ); sha2_hmac_update( &ctx, ssl->in_ctr, ssl->in_msglen + 13 ); sha2_hmac_finish( &ctx, ssl->in_msg + ssl->in_msglen ); for( j = 0; j < extra_run; j++ ) sha2_process( &ctx, ssl->in_msg ); } else if( ssl->transform_in->maclen != 0 ) { SSL_DEBUG_MSG( 1, ( "invalid MAC len: %d", ssl->transform_in->maclen ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } SSL_DEBUG_BUF( 4, "message mac", tmp, ssl->transform_in->maclen ); SSL_DEBUG_BUF( 4, "computed mac", ssl->in_msg + ssl->in_msglen, ssl->transform_in->maclen ); if( memcmp( tmp, ssl->in_msg + ssl->in_msglen, ssl->transform_in->maclen ) != 0 ) { #if defined(POLARSSL_SSL_DEBUG_ALL) SSL_DEBUG_MSG( 1, ( "message mac does not match" ) ); #endif correct = 0; } /* * Finally check the correct flag */ if( correct == 0 ) return( POLARSSL_ERR_SSL_INVALID_MAC ); if( ssl->in_msglen == 0 ) { ssl->nb_zero++; /* * Three or more empty messages may be a DoS attack * (excessive CPU consumption). */ if( ssl->nb_zero > 3 ) { SSL_DEBUG_MSG( 1, ( "received four consecutive empty " "messages, possible DoS attack" ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } } else ssl->nb_zero = 0; for( i = 8; i > 0; i-- ) if( ++ssl->in_ctr[i - 1] != 0 ) break; SSL_DEBUG_MSG( 2, ( "<= decrypt buf" ) ); return( 0 ); }
DoS
0
static int ssl_decrypt_buf( ssl_context *ssl ) { size_t i, padlen = 0, correct = 1; unsigned char tmp[POLARSSL_SSL_MAX_MAC_SIZE]; SSL_DEBUG_MSG( 2, ( "=> decrypt buf" ) ); if( ssl->in_msglen < ssl->transform_in->minlen ) { SSL_DEBUG_MSG( 1, ( "in_msglen (%d) < minlen (%d)", ssl->in_msglen, ssl->transform_in->minlen ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } if( ssl->transform_in->ivlen == 0 ) { #if defined(POLARSSL_ARC4_C) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_RC4_128_MD5 || ssl->session_in->ciphersuite == TLS_RSA_WITH_RC4_128_SHA ) { arc4_crypt( (arc4_context *) ssl->transform_in->ctx_dec, ssl->in_msglen, ssl->in_msg, ssl->in_msg ); } else #endif #if defined(POLARSSL_CIPHER_NULL_CIPHER) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_NULL_MD5 || ssl->session_in->ciphersuite == TLS_RSA_WITH_NULL_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_NULL_SHA256 ) { } else #endif return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } else if( ssl->transform_in->ivlen == 12 ) { unsigned char *dec_msg; unsigned char *dec_msg_result; size_t dec_msglen; unsigned char add_data[13]; int ret = POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE; #if defined(POLARSSL_AES_C) && defined(POLARSSL_GCM_C) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_128_GCM_SHA256 || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_256_GCM_SHA384 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 ) { dec_msglen = ssl->in_msglen - ( ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); dec_msglen -= 16; dec_msg = ssl->in_msg + ( ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); dec_msg_result = ssl->in_msg; ssl->in_msglen = dec_msglen; memcpy( add_data, ssl->in_ctr, 8 ); add_data[8] = ssl->in_msgtype; add_data[9] = ssl->major_ver; add_data[10] = ssl->minor_ver; add_data[11] = ( ssl->in_msglen >> 8 ) & 0xFF; add_data[12] = ssl->in_msglen & 0xFF; SSL_DEBUG_BUF( 4, "additional data used for AEAD", add_data, 13 ); memcpy( ssl->transform_in->iv_dec + ssl->transform_in->fixed_ivlen, ssl->in_msg, ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); SSL_DEBUG_BUF( 4, "IV used", ssl->transform_in->iv_dec, ssl->transform_in->ivlen ); SSL_DEBUG_BUF( 4, "TAG used", dec_msg + dec_msglen, 16 ); memcpy( ssl->transform_in->iv_dec + ssl->transform_in->fixed_ivlen, ssl->in_msg, ssl->transform_in->ivlen - ssl->transform_in->fixed_ivlen ); ret = gcm_auth_decrypt( (gcm_context *) ssl->transform_in->ctx_dec, dec_msglen, ssl->transform_in->iv_dec, ssl->transform_in->ivlen, add_data, 13, dec_msg + dec_msglen, 16, dec_msg, dec_msg_result ); if( ret != 0 ) { SSL_DEBUG_MSG( 1, ( "AEAD decrypt failed on validation (ret = -0x%02x)", -ret ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } } else #endif return( ret ); } else { /* * Decrypt and check the padding */ unsigned char *dec_msg; unsigned char *dec_msg_result; size_t dec_msglen; size_t minlen = 0; /* * Check immediate ciphertext sanity */ if( ssl->in_msglen % ssl->transform_in->ivlen != 0 ) { SSL_DEBUG_MSG( 1, ( "msglen (%d) %% ivlen (%d) != 0", ssl->in_msglen, ssl->transform_in->ivlen ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } if( ssl->minor_ver >= SSL_MINOR_VERSION_2 ) minlen += ssl->transform_in->ivlen; if( ssl->in_msglen < minlen + ssl->transform_in->ivlen || ssl->in_msglen < minlen + ssl->transform_in->maclen + 1 ) { SSL_DEBUG_MSG( 1, ( "msglen (%d) < max( ivlen(%d), maclen (%d) + 1 ) ( + expl IV )", ssl->in_msglen, ssl->transform_in->ivlen, ssl->transform_in->maclen ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } dec_msglen = ssl->in_msglen; dec_msg = ssl->in_msg; dec_msg_result = ssl->in_msg; /* * Initialize for prepended IV for block cipher in TLS v1.1 and up */ if( ssl->minor_ver >= SSL_MINOR_VERSION_2 ) { dec_msg += ssl->transform_in->ivlen; dec_msglen -= ssl->transform_in->ivlen; ssl->in_msglen -= ssl->transform_in->ivlen; for( i = 0; i < ssl->transform_in->ivlen; i++ ) ssl->transform_in->iv_dec[i] = ssl->in_msg[i]; } switch( ssl->transform_in->ivlen ) { #if defined(POLARSSL_DES_C) case 8: #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) if( ssl->session_in->ciphersuite == TLS_RSA_WITH_DES_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_DES_CBC_SHA ) { des_crypt_cbc( (des_context *) ssl->transform_in->ctx_dec, DES_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); } else #endif des3_crypt_cbc( (des3_context *) ssl->transform_in->ctx_dec, DES_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); break; #endif case 16: #if defined(POLARSSL_AES_C) if ( ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_RSA_WITH_AES_256_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 ) { aes_crypt_cbc( (aes_context *) ssl->transform_in->ctx_dec, AES_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); break; } #endif #if defined(POLARSSL_CAMELLIA_C) if ( ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA || ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 || ssl->session_in->ciphersuite == TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 ) { camellia_crypt_cbc( (camellia_context *) ssl->transform_in->ctx_dec, CAMELLIA_DECRYPT, dec_msglen, ssl->transform_in->iv_dec, dec_msg, dec_msg_result ); break; } #endif default: return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } padlen = 1 + ssl->in_msg[ssl->in_msglen - 1]; if( ssl->in_msglen < ssl->transform_in->maclen + padlen ) { #if defined(POLARSSL_SSL_DEBUG_ALL) SSL_DEBUG_MSG( 1, ( "msglen (%d) < maclen (%d) + padlen (%d)", ssl->in_msglen, ssl->transform_in->maclen, padlen ) ); #endif padlen = 0; correct = 0; } if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( padlen > ssl->transform_in->ivlen ) { #if defined(POLARSSL_SSL_DEBUG_ALL) SSL_DEBUG_MSG( 1, ( "bad padding length: is %d, " "should be no more than %d", padlen, ssl->transform_in->ivlen ) ); #endif correct = 0; } } else { /* * TLSv1+: always check the padding up to the first failure * and fake check up to 256 bytes of padding */ size_t pad_count = 0, fake_pad_count = 0; size_t padding_idx = ssl->in_msglen - padlen - 1; for( i = 1; i <= padlen; i++ ) pad_count += ( ssl->in_msg[padding_idx + i] == padlen - 1 ); for( ; i <= 256; i++ ) fake_pad_count += ( ssl->in_msg[padding_idx + i] == padlen - 1 ); correct &= ( pad_count == padlen ); /* Only 1 on correct padding */ correct &= ( pad_count + fake_pad_count < 512 ); /* Always 1 */ #if defined(POLARSSL_SSL_DEBUG_ALL) if( padlen > 0 && correct == 0) SSL_DEBUG_MSG( 1, ( "bad padding byte detected" ) ); #endif padlen &= correct * 0x1FF; } } SSL_DEBUG_BUF( 4, "raw buffer after decryption", ssl->in_msg, ssl->in_msglen ); /* * Always compute the MAC (RFC4346, CBCTIME). */ ssl->in_msglen -= ( ssl->transform_in->maclen + padlen ); ssl->in_hdr[3] = (unsigned char)( ssl->in_msglen >> 8 ); ssl->in_hdr[4] = (unsigned char)( ssl->in_msglen ); memcpy( tmp, ssl->in_msg + ssl->in_msglen, ssl->transform_in->maclen ); if( ssl->minor_ver == SSL_MINOR_VERSION_0 ) { if( ssl->transform_in->maclen == 16 ) ssl_mac_md5( ssl->transform_in->mac_dec, ssl->in_msg, ssl->in_msglen, ssl->in_ctr, ssl->in_msgtype ); else if( ssl->transform_in->maclen == 20 ) ssl_mac_sha1( ssl->transform_in->mac_dec, ssl->in_msg, ssl->in_msglen, ssl->in_ctr, ssl->in_msgtype ); else if( ssl->transform_in->maclen == 32 ) ssl_mac_sha2( ssl->transform_in->mac_dec, ssl->in_msg, ssl->in_msglen, ssl->in_ctr, ssl->in_msgtype ); else if( ssl->transform_in->maclen != 0 ) { SSL_DEBUG_MSG( 1, ( "invalid MAC len: %d", ssl->transform_in->maclen ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } else { /* * Process MAC and always update for padlen afterwards to make * total time independent of padlen * * extra_run compensates MAC check for padlen * * Known timing attacks: * - Lucky Thirteen (http://www.isg.rhul.ac.uk/tls/TLStiming.pdf) * * We use ( ( Lx + 8 ) / 64 ) to handle 'negative Lx' values * correctly. (We round down instead of up, so -56 is the correct * value for our calculations instead of -55) */ int j, extra_run = 0; extra_run = ( 13 + ssl->in_msglen + padlen + 8 ) / 64 - ( 13 + ssl->in_msglen + 8 ) / 64; extra_run &= correct * 0xFF; if( ssl->transform_in->maclen == 16 ) { md5_context ctx; md5_hmac_starts( &ctx, ssl->transform_in->mac_dec, 16 ); md5_hmac_update( &ctx, ssl->in_ctr, ssl->in_msglen + 13 ); md5_hmac_finish( &ctx, ssl->in_msg + ssl->in_msglen ); for( j = 0; j < extra_run; j++ ) md5_process( &ctx, ssl->in_msg ); } else if( ssl->transform_in->maclen == 20 ) { sha1_context ctx; sha1_hmac_starts( &ctx, ssl->transform_in->mac_dec, 20 ); sha1_hmac_update( &ctx, ssl->in_ctr, ssl->in_msglen + 13 ); sha1_hmac_finish( &ctx, ssl->in_msg + ssl->in_msglen ); for( j = 0; j < extra_run; j++ ) sha1_process( &ctx, ssl->in_msg ); } else if( ssl->transform_in->maclen == 32 ) { sha2_context ctx; sha2_hmac_starts( &ctx, ssl->transform_in->mac_dec, 32, 0 ); sha2_hmac_update( &ctx, ssl->in_ctr, ssl->in_msglen + 13 ); sha2_hmac_finish( &ctx, ssl->in_msg + ssl->in_msglen ); for( j = 0; j < extra_run; j++ ) sha2_process( &ctx, ssl->in_msg ); } else if( ssl->transform_in->maclen != 0 ) { SSL_DEBUG_MSG( 1, ( "invalid MAC len: %d", ssl->transform_in->maclen ) ); return( POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE ); } } SSL_DEBUG_BUF( 4, "message mac", tmp, ssl->transform_in->maclen ); SSL_DEBUG_BUF( 4, "computed mac", ssl->in_msg + ssl->in_msglen, ssl->transform_in->maclen ); if( memcmp( tmp, ssl->in_msg + ssl->in_msglen, ssl->transform_in->maclen ) != 0 ) { #if defined(POLARSSL_SSL_DEBUG_ALL) SSL_DEBUG_MSG( 1, ( "message mac does not match" ) ); #endif correct = 0; } /* * Finally check the correct flag */ if( correct == 0 ) return( POLARSSL_ERR_SSL_INVALID_MAC ); if( ssl->in_msglen == 0 ) { ssl->nb_zero++; /* * Three or more empty messages may be a DoS attack * (excessive CPU consumption). */ if( ssl->nb_zero > 3 ) { SSL_DEBUG_MSG( 1, ( "received four consecutive empty " "messages, possible DoS attack" ) ); return( POLARSSL_ERR_SSL_INVALID_MAC ); } } else ssl->nb_zero = 0; for( i = 8; i > 0; i-- ) if( ++ssl->in_ctr[i - 1] != 0 ) break; SSL_DEBUG_MSG( 2, ( "<= decrypt buf" ) ); return( 0 ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,189
int ssl_fetch_input( ssl_context *ssl, size_t nb_want ) { int ret; size_t len; SSL_DEBUG_MSG( 2, ( "=> fetch input" ) ); while( ssl->in_left < nb_want ) { len = nb_want - ssl->in_left; ret = ssl->f_recv( ssl->p_recv, ssl->in_hdr + ssl->in_left, len ); SSL_DEBUG_MSG( 2, ( "in_left: %d, nb_want: %d", ssl->in_left, nb_want ) ); SSL_DEBUG_RET( 2, "ssl->f_recv", ret ); if( ret == 0 ) return( POLARSSL_ERR_SSL_CONN_EOF ); if( ret < 0 ) return( ret ); ssl->in_left += ret; } SSL_DEBUG_MSG( 2, ( "<= fetch input" ) ); return( 0 ); }
DoS
0
int ssl_fetch_input( ssl_context *ssl, size_t nb_want ) { int ret; size_t len; SSL_DEBUG_MSG( 2, ( "=> fetch input" ) ); while( ssl->in_left < nb_want ) { len = nb_want - ssl->in_left; ret = ssl->f_recv( ssl->p_recv, ssl->in_hdr + ssl->in_left, len ); SSL_DEBUG_MSG( 2, ( "in_left: %d, nb_want: %d", ssl->in_left, nb_want ) ); SSL_DEBUG_RET( 2, "ssl->f_recv", ret ); if( ret == 0 ) return( POLARSSL_ERR_SSL_CONN_EOF ); if( ret < 0 ) return( ret ); ssl->in_left += ret; } SSL_DEBUG_MSG( 2, ( "<= fetch input" ) ); return( 0 ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,190
int ssl_flush_output( ssl_context *ssl ) { int ret; unsigned char *buf; SSL_DEBUG_MSG( 2, ( "=> flush output" ) ); while( ssl->out_left > 0 ) { SSL_DEBUG_MSG( 2, ( "message length: %d, out_left: %d", 5 + ssl->out_msglen, ssl->out_left ) ); if( ssl->out_msglen < ssl->out_left ) { size_t header_left = ssl->out_left - ssl->out_msglen; buf = ssl->out_hdr + 5 - header_left; ret = ssl->f_send( ssl->p_send, buf, header_left ); SSL_DEBUG_RET( 2, "ssl->f_send (header)", ret ); if( ret <= 0 ) return( ret ); ssl->out_left -= ret; } buf = ssl->out_msg + ssl->out_msglen - ssl->out_left; ret = ssl->f_send( ssl->p_send, buf, ssl->out_left ); SSL_DEBUG_RET( 2, "ssl->f_send", ret ); if( ret <= 0 ) return( ret ); ssl->out_left -= ret; } SSL_DEBUG_MSG( 2, ( "<= flush output" ) ); return( 0 ); }
DoS
0
int ssl_flush_output( ssl_context *ssl ) { int ret; unsigned char *buf; SSL_DEBUG_MSG( 2, ( "=> flush output" ) ); while( ssl->out_left > 0 ) { SSL_DEBUG_MSG( 2, ( "message length: %d, out_left: %d", 5 + ssl->out_msglen, ssl->out_left ) ); if( ssl->out_msglen < ssl->out_left ) { size_t header_left = ssl->out_left - ssl->out_msglen; buf = ssl->out_hdr + 5 - header_left; ret = ssl->f_send( ssl->p_send, buf, header_left ); SSL_DEBUG_RET( 2, "ssl->f_send (header)", ret ); if( ret <= 0 ) return( ret ); ssl->out_left -= ret; } buf = ssl->out_msg + ssl->out_msglen - ssl->out_left; ret = ssl->f_send( ssl->p_send, buf, ssl->out_left ); SSL_DEBUG_RET( 2, "ssl->f_send", ret ); if( ret <= 0 ) return( ret ); ssl->out_left -= ret; } SSL_DEBUG_MSG( 2, ( "<= flush output" ) ); return( 0 ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,191
void ssl_free( ssl_context *ssl ) { SSL_DEBUG_MSG( 2, ( "=> free" ) ); free( ssl->ciphersuites ); if( ssl->out_ctr != NULL ) { memset( ssl->out_ctr, 0, SSL_BUFFER_LEN ); free( ssl->out_ctr ); } if( ssl->in_ctr != NULL ) { memset( ssl->in_ctr, 0, SSL_BUFFER_LEN ); free( ssl->in_ctr ); } #if defined(POLARSSL_DHM_C) mpi_free( &ssl->dhm_P ); mpi_free( &ssl->dhm_G ); #endif if( ssl->transform ) { ssl_transform_free( ssl->transform ); free( ssl->transform ); } if( ssl->handshake ) { ssl_handshake_free( ssl->handshake ); ssl_transform_free( ssl->transform_negotiate ); ssl_session_free( ssl->session_negotiate ); free( ssl->handshake ); free( ssl->transform_negotiate ); free( ssl->session_negotiate ); } if( ssl->session ) { ssl_session_free( ssl->session ); free( ssl->session ); } if ( ssl->hostname != NULL) { memset( ssl->hostname, 0, ssl->hostname_len ); free( ssl->hostname ); ssl->hostname_len = 0; } #if defined(POLARSSL_SSL_HW_RECORD_ACCEL) if( ssl_hw_record_finish != NULL ) { SSL_DEBUG_MSG( 2, ( "going for ssl_hw_record_finish()" ) ); ssl_hw_record_finish( ssl ); } #endif SSL_DEBUG_MSG( 2, ( "<= free" ) ); /* Actually clear after last debug message */ memset( ssl, 0, sizeof( ssl_context ) ); }
DoS
0
void ssl_free( ssl_context *ssl ) { SSL_DEBUG_MSG( 2, ( "=> free" ) ); free( ssl->ciphersuites ); if( ssl->out_ctr != NULL ) { memset( ssl->out_ctr, 0, SSL_BUFFER_LEN ); free( ssl->out_ctr ); } if( ssl->in_ctr != NULL ) { memset( ssl->in_ctr, 0, SSL_BUFFER_LEN ); free( ssl->in_ctr ); } #if defined(POLARSSL_DHM_C) mpi_free( &ssl->dhm_P ); mpi_free( &ssl->dhm_G ); #endif if( ssl->transform ) { ssl_transform_free( ssl->transform ); free( ssl->transform ); } if( ssl->handshake ) { ssl_handshake_free( ssl->handshake ); ssl_transform_free( ssl->transform_negotiate ); ssl_session_free( ssl->session_negotiate ); free( ssl->handshake ); free( ssl->transform_negotiate ); free( ssl->session_negotiate ); } if( ssl->session ) { ssl_session_free( ssl->session ); free( ssl->session ); } if ( ssl->hostname != NULL) { memset( ssl->hostname, 0, ssl->hostname_len ); free( ssl->hostname ); ssl->hostname_len = 0; } #if defined(POLARSSL_SSL_HW_RECORD_ACCEL) if( ssl_hw_record_finish != NULL ) { SSL_DEBUG_MSG( 2, ( "going for ssl_hw_record_finish()" ) ); ssl_hw_record_finish( ssl ); } #endif SSL_DEBUG_MSG( 2, ( "<= free" ) ); /* Actually clear after last debug message */ memset( ssl, 0, sizeof( ssl_context ) ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,192
size_t ssl_get_bytes_avail( const ssl_context *ssl ) { return( ssl->in_offt == NULL ? 0 : ssl->in_msglen ); }
DoS
0
size_t ssl_get_bytes_avail( const ssl_context *ssl ) { return( ssl->in_offt == NULL ? 0 : ssl->in_msglen ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,193
const char *ssl_get_ciphersuite_name( const int ciphersuite_id ) { switch( ciphersuite_id ) { #if defined(POLARSSL_ARC4_C) case TLS_RSA_WITH_RC4_128_MD5: return( "TLS-RSA-WITH-RC4-128-MD5" ); case TLS_RSA_WITH_RC4_128_SHA: return( "TLS-RSA-WITH-RC4-128-SHA" ); #endif #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_3DES_EDE_CBC_SHA: return( "TLS-RSA-WITH-3DES-EDE-CBC-SHA" ); case TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA: return( "TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA" ); #endif #if defined(POLARSSL_AES_C) case TLS_RSA_WITH_AES_128_CBC_SHA: return( "TLS-RSA-WITH-AES-128-CBC-SHA" ); case TLS_DHE_RSA_WITH_AES_128_CBC_SHA: return( "TLS-DHE-RSA-WITH-AES-128-CBC-SHA" ); case TLS_RSA_WITH_AES_256_CBC_SHA: return( "TLS-RSA-WITH-AES-256-CBC-SHA" ); case TLS_DHE_RSA_WITH_AES_256_CBC_SHA: return( "TLS-DHE-RSA-WITH-AES-256-CBC-SHA" ); #if defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_AES_128_CBC_SHA256: return( "TLS-RSA-WITH-AES-128-CBC-SHA256" ); case TLS_RSA_WITH_AES_256_CBC_SHA256: return( "TLS-RSA-WITH-AES-256-CBC-SHA256" ); case TLS_DHE_RSA_WITH_AES_128_CBC_SHA256: return( "TLS-DHE-RSA-WITH-AES-128-CBC-SHA256" ); case TLS_DHE_RSA_WITH_AES_256_CBC_SHA256: return( "TLS-DHE-RSA-WITH-AES-256-CBC-SHA256" ); #endif #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_AES_128_GCM_SHA256: return( "TLS-RSA-WITH-AES-128-GCM-SHA256" ); case TLS_RSA_WITH_AES_256_GCM_SHA384: return( "TLS-RSA-WITH-AES-256-GCM-SHA384" ); #endif #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA4_C) case TLS_DHE_RSA_WITH_AES_128_GCM_SHA256: return( "TLS-DHE-RSA-WITH-AES-128-GCM-SHA256" ); case TLS_DHE_RSA_WITH_AES_256_GCM_SHA384: return( "TLS-DHE-RSA-WITH-AES-256-GCM-SHA384" ); #endif #endif /* POLARSSL_AES_C */ #if defined(POLARSSL_CAMELLIA_C) case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA: return( "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA" ); case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA: return( "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA" ); case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA: return( "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA" ); case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA: return( "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA" ); #if defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256: return( "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256" ); case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256: return( "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256" ); case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256: return( "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256" ); case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256: return( "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256" ); #endif #endif #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) #if defined(POLARSSL_CIPHER_NULL_CIPHER) case TLS_RSA_WITH_NULL_MD5: return( "TLS-RSA-WITH-NULL-MD5" ); case TLS_RSA_WITH_NULL_SHA: return( "TLS-RSA-WITH-NULL-SHA" ); case TLS_RSA_WITH_NULL_SHA256: return( "TLS-RSA-WITH-NULL-SHA256" ); #endif /* defined(POLARSSL_CIPHER_NULL_CIPHER) */ #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_DES_CBC_SHA: return( "TLS-RSA-WITH-DES-CBC-SHA" ); case TLS_DHE_RSA_WITH_DES_CBC_SHA: return( "TLS-DHE-RSA-WITH-DES-CBC-SHA" ); #endif #endif /* defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) */ default: break; } return( "unknown" ); }
DoS
0
const char *ssl_get_ciphersuite_name( const int ciphersuite_id ) { switch( ciphersuite_id ) { #if defined(POLARSSL_ARC4_C) case TLS_RSA_WITH_RC4_128_MD5: return( "TLS-RSA-WITH-RC4-128-MD5" ); case TLS_RSA_WITH_RC4_128_SHA: return( "TLS-RSA-WITH-RC4-128-SHA" ); #endif #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_3DES_EDE_CBC_SHA: return( "TLS-RSA-WITH-3DES-EDE-CBC-SHA" ); case TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA: return( "TLS-DHE-RSA-WITH-3DES-EDE-CBC-SHA" ); #endif #if defined(POLARSSL_AES_C) case TLS_RSA_WITH_AES_128_CBC_SHA: return( "TLS-RSA-WITH-AES-128-CBC-SHA" ); case TLS_DHE_RSA_WITH_AES_128_CBC_SHA: return( "TLS-DHE-RSA-WITH-AES-128-CBC-SHA" ); case TLS_RSA_WITH_AES_256_CBC_SHA: return( "TLS-RSA-WITH-AES-256-CBC-SHA" ); case TLS_DHE_RSA_WITH_AES_256_CBC_SHA: return( "TLS-DHE-RSA-WITH-AES-256-CBC-SHA" ); #if defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_AES_128_CBC_SHA256: return( "TLS-RSA-WITH-AES-128-CBC-SHA256" ); case TLS_RSA_WITH_AES_256_CBC_SHA256: return( "TLS-RSA-WITH-AES-256-CBC-SHA256" ); case TLS_DHE_RSA_WITH_AES_128_CBC_SHA256: return( "TLS-DHE-RSA-WITH-AES-128-CBC-SHA256" ); case TLS_DHE_RSA_WITH_AES_256_CBC_SHA256: return( "TLS-DHE-RSA-WITH-AES-256-CBC-SHA256" ); #endif #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_AES_128_GCM_SHA256: return( "TLS-RSA-WITH-AES-128-GCM-SHA256" ); case TLS_RSA_WITH_AES_256_GCM_SHA384: return( "TLS-RSA-WITH-AES-256-GCM-SHA384" ); #endif #if defined(POLARSSL_GCM_C) && defined(POLARSSL_SHA4_C) case TLS_DHE_RSA_WITH_AES_128_GCM_SHA256: return( "TLS-DHE-RSA-WITH-AES-128-GCM-SHA256" ); case TLS_DHE_RSA_WITH_AES_256_GCM_SHA384: return( "TLS-DHE-RSA-WITH-AES-256-GCM-SHA384" ); #endif #endif /* POLARSSL_AES_C */ #if defined(POLARSSL_CAMELLIA_C) case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA: return( "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA" ); case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA: return( "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA" ); case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA: return( "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA" ); case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA: return( "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA" ); #if defined(POLARSSL_SHA2_C) case TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256: return( "TLS-RSA-WITH-CAMELLIA-128-CBC-SHA256" ); case TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256: return( "TLS-DHE-RSA-WITH-CAMELLIA-128-CBC-SHA256" ); case TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256: return( "TLS-RSA-WITH-CAMELLIA-256-CBC-SHA256" ); case TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256: return( "TLS-DHE-RSA-WITH-CAMELLIA-256-CBC-SHA256" ); #endif #endif #if defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) #if defined(POLARSSL_CIPHER_NULL_CIPHER) case TLS_RSA_WITH_NULL_MD5: return( "TLS-RSA-WITH-NULL-MD5" ); case TLS_RSA_WITH_NULL_SHA: return( "TLS-RSA-WITH-NULL-SHA" ); case TLS_RSA_WITH_NULL_SHA256: return( "TLS-RSA-WITH-NULL-SHA256" ); #endif /* defined(POLARSSL_CIPHER_NULL_CIPHER) */ #if defined(POLARSSL_DES_C) case TLS_RSA_WITH_DES_CBC_SHA: return( "TLS-RSA-WITH-DES-CBC-SHA" ); case TLS_DHE_RSA_WITH_DES_CBC_SHA: return( "TLS-DHE-RSA-WITH-DES-CBC-SHA" ); #endif #endif /* defined(POLARSSL_ENABLE_WEAK_CIPHERSUITES) */ default: break; } return( "unknown" ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,194
int ssl_get_verify_result( const ssl_context *ssl ) { return( ssl->verify_result ); }
DoS
0
int ssl_get_verify_result( const ssl_context *ssl ) { return( ssl->verify_result ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,195
const char *ssl_get_version( const ssl_context *ssl ) { switch( ssl->minor_ver ) { case SSL_MINOR_VERSION_0: return( "SSLv3.0" ); case SSL_MINOR_VERSION_1: return( "TLSv1.0" ); case SSL_MINOR_VERSION_2: return( "TLSv1.1" ); case SSL_MINOR_VERSION_3: return( "TLSv1.2" ); default: break; } return( "unknown" ); }
DoS
0
const char *ssl_get_version( const ssl_context *ssl ) { switch( ssl->minor_ver ) { case SSL_MINOR_VERSION_0: return( "SSLv3.0" ); case SSL_MINOR_VERSION_1: return( "TLSv1.0" ); case SSL_MINOR_VERSION_2: return( "TLSv1.1" ); case SSL_MINOR_VERSION_3: return( "TLSv1.2" ); default: break; } return( "unknown" ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,196
void ssl_handshake_free( ssl_handshake_params *handshake ) { #if defined(POLARSSL_DHM_C) dhm_free( &handshake->dhm_ctx ); #endif memset( handshake, 0, sizeof( ssl_handshake_params ) ); }
DoS
0
void ssl_handshake_free( ssl_handshake_params *handshake ) { #if defined(POLARSSL_DHM_C) dhm_free( &handshake->dhm_ctx ); #endif memset( handshake, 0, sizeof( ssl_handshake_params ) ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,197
int ssl_handshake_step( ssl_context *ssl ) { int ret = POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE; #if defined(POLARSSL_SSL_CLI_C) if( ssl->endpoint == SSL_IS_CLIENT ) ret = ssl_handshake_client_step( ssl ); #endif #if defined(POLARSSL_SSL_SRV_C) if( ssl->endpoint == SSL_IS_SERVER ) ret = ssl_handshake_server_step( ssl ); #endif return( ret ); }
DoS
0
int ssl_handshake_step( ssl_context *ssl ) { int ret = POLARSSL_ERR_SSL_FEATURE_UNAVAILABLE; #if defined(POLARSSL_SSL_CLI_C) if( ssl->endpoint == SSL_IS_CLIENT ) ret = ssl_handshake_client_step( ssl ); #endif #if defined(POLARSSL_SSL_SRV_C) if( ssl->endpoint == SSL_IS_SERVER ) ret = ssl_handshake_server_step( ssl ); #endif return( ret ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,198
void ssl_handshake_wrapup( ssl_context *ssl ) { SSL_DEBUG_MSG( 3, ( "=> handshake wrapup" ) ); /* * Free our handshake params */ ssl_handshake_free( ssl->handshake ); free( ssl->handshake ); ssl->handshake = NULL; /* * Switch in our now active transform context */ if( ssl->transform ) { ssl_transform_free( ssl->transform ); free( ssl->transform ); } ssl->transform = ssl->transform_negotiate; ssl->transform_negotiate = NULL; if( ssl->session ) { ssl_session_free( ssl->session ); free( ssl->session ); } ssl->session = ssl->session_negotiate; ssl->session_negotiate = NULL; /* * Add cache entry */ if( ssl->f_set_cache != NULL ) if( ssl->f_set_cache( ssl->p_set_cache, ssl->session ) != 0 ) SSL_DEBUG_MSG( 1, ( "cache did not store session" ) ); ssl->state++; SSL_DEBUG_MSG( 3, ( "<= handshake wrapup" ) ); }
DoS
0
void ssl_handshake_wrapup( ssl_context *ssl ) { SSL_DEBUG_MSG( 3, ( "=> handshake wrapup" ) ); /* * Free our handshake params */ ssl_handshake_free( ssl->handshake ); free( ssl->handshake ); ssl->handshake = NULL; /* * Switch in our now active transform context */ if( ssl->transform ) { ssl_transform_free( ssl->transform ); free( ssl->transform ); } ssl->transform = ssl->transform_negotiate; ssl->transform_negotiate = NULL; if( ssl->session ) { ssl_session_free( ssl->session ); free( ssl->session ); } ssl->session = ssl->session_negotiate; ssl->session_negotiate = NULL; /* * Add cache entry */ if( ssl->f_set_cache != NULL ) if( ssl->f_set_cache( ssl->p_set_cache, ssl->session ) != 0 ) SSL_DEBUG_MSG( 1, ( "cache did not store session" ) ); ssl->state++; SSL_DEBUG_MSG( 3, ( "<= handshake wrapup" ) ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null
23,199
int ssl_init( ssl_context *ssl ) { int ret; int len = SSL_BUFFER_LEN; memset( ssl, 0, sizeof( ssl_context ) ); /* * Sane defaults */ ssl->rsa_decrypt = ssl_rsa_decrypt; ssl->rsa_sign = ssl_rsa_sign; ssl->rsa_key_len = ssl_rsa_key_len; ssl->min_major_ver = SSL_MAJOR_VERSION_3; ssl->min_minor_ver = SSL_MINOR_VERSION_0; ssl->ciphersuites = malloc( sizeof(int *) * 4 ); ssl_set_ciphersuites( ssl, ssl_default_ciphersuites ); #if defined(POLARSSL_DHM_C) if( ( ret = mpi_read_string( &ssl->dhm_P, 16, POLARSSL_DHM_RFC5114_MODP_1024_P) ) != 0 || ( ret = mpi_read_string( &ssl->dhm_G, 16, POLARSSL_DHM_RFC5114_MODP_1024_G) ) != 0 ) { SSL_DEBUG_RET( 1, "mpi_read_string", ret ); return( ret ); } #endif /* * Prepare base structures */ ssl->in_ctr = (unsigned char *) malloc( len ); ssl->in_hdr = ssl->in_ctr + 8; ssl->in_msg = ssl->in_ctr + 13; if( ssl->in_ctr == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } ssl->out_ctr = (unsigned char *) malloc( len ); ssl->out_hdr = ssl->out_ctr + 8; ssl->out_msg = ssl->out_ctr + 40; if( ssl->out_ctr == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len ) ); free( ssl-> in_ctr ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memset( ssl-> in_ctr, 0, SSL_BUFFER_LEN ); memset( ssl->out_ctr, 0, SSL_BUFFER_LEN ); ssl->hostname = NULL; ssl->hostname_len = 0; if( ( ret = ssl_handshake_init( ssl ) ) != 0 ) return( ret ); return( 0 ); }
DoS
0
int ssl_init( ssl_context *ssl ) { int ret; int len = SSL_BUFFER_LEN; memset( ssl, 0, sizeof( ssl_context ) ); /* * Sane defaults */ ssl->rsa_decrypt = ssl_rsa_decrypt; ssl->rsa_sign = ssl_rsa_sign; ssl->rsa_key_len = ssl_rsa_key_len; ssl->min_major_ver = SSL_MAJOR_VERSION_3; ssl->min_minor_ver = SSL_MINOR_VERSION_0; ssl->ciphersuites = malloc( sizeof(int *) * 4 ); ssl_set_ciphersuites( ssl, ssl_default_ciphersuites ); #if defined(POLARSSL_DHM_C) if( ( ret = mpi_read_string( &ssl->dhm_P, 16, POLARSSL_DHM_RFC5114_MODP_1024_P) ) != 0 || ( ret = mpi_read_string( &ssl->dhm_G, 16, POLARSSL_DHM_RFC5114_MODP_1024_G) ) != 0 ) { SSL_DEBUG_RET( 1, "mpi_read_string", ret ); return( ret ); } #endif /* * Prepare base structures */ ssl->in_ctr = (unsigned char *) malloc( len ); ssl->in_hdr = ssl->in_ctr + 8; ssl->in_msg = ssl->in_ctr + 13; if( ssl->in_ctr == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len ) ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } ssl->out_ctr = (unsigned char *) malloc( len ); ssl->out_hdr = ssl->out_ctr + 8; ssl->out_msg = ssl->out_ctr + 40; if( ssl->out_ctr == NULL ) { SSL_DEBUG_MSG( 1, ( "malloc(%d bytes) failed", len ) ); free( ssl-> in_ctr ); return( POLARSSL_ERR_SSL_MALLOC_FAILED ); } memset( ssl-> in_ctr, 0, SSL_BUFFER_LEN ); memset( ssl->out_ctr, 0, SSL_BUFFER_LEN ); ssl->hostname = NULL; ssl->hostname_len = 0; if( ( ret = ssl_handshake_init( ssl ) ) != 0 ) return( ret ); return( 0 ); }
@@ -2375,8 +2375,8 @@ int ssl_parse_certificate( ssl_context *ssl ) return( POLARSSL_ERR_SSL_BAD_HS_CERTIFICATE ); } - ret = x509parse_crt( ssl->session_negotiate->peer_cert, ssl->in_msg + i, - n ); + ret = x509parse_crt_der( ssl->session_negotiate->peer_cert, + ssl->in_msg + i, n ); if( ret != 0 ) { SSL_DEBUG_RET( 1, " x509parse_crt", ret );
CWE-20
null
null