idx int64 | func_before string | Vulnerability Classification string | vul int64 | func_after string | patch string | CWE ID string | lines_before string | lines_after string |
|---|---|---|---|---|---|---|---|---|
26,700 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
/* Address WBINVD may be executed by guest */
if (need_emulate_wbinvd(vcpu)) {
if (kvm_x86_ops->has_wbinvd_exit())
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
smp_call_function_single(vcpu->cpu,
wbinvd_ipi, NULL, 1);
}
kvm_x86_ops->vcpu_load(vcpu, cpu);
/* Apply any externally detected TSC adjustments (due to suspend) */
if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
vcpu->arch.tsc_offset_adjustment = 0;
set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
}
if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
native_read_tsc() - vcpu->arch.last_host_tsc;
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
if (check_tsc_unstable()) {
u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);
kvm_x86_ops->write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1;
}
/*
* On a host with synchronized TSC, there is no need to update
* kvmclock on vcpu->cpu migration
*/
if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
if (vcpu->cpu != cpu)
kvm_migrate_timers(vcpu);
vcpu->cpu = cpu;
}
accumulate_steal_time(vcpu);
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
}
| DoS Mem. Corr. | 0 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
/* Address WBINVD may be executed by guest */
if (need_emulate_wbinvd(vcpu)) {
if (kvm_x86_ops->has_wbinvd_exit())
cpumask_set_cpu(cpu, vcpu->arch.wbinvd_dirty_mask);
else if (vcpu->cpu != -1 && vcpu->cpu != cpu)
smp_call_function_single(vcpu->cpu,
wbinvd_ipi, NULL, 1);
}
kvm_x86_ops->vcpu_load(vcpu, cpu);
/* Apply any externally detected TSC adjustments (due to suspend) */
if (unlikely(vcpu->arch.tsc_offset_adjustment)) {
adjust_tsc_offset_host(vcpu, vcpu->arch.tsc_offset_adjustment);
vcpu->arch.tsc_offset_adjustment = 0;
set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
}
if (unlikely(vcpu->cpu != cpu) || check_tsc_unstable()) {
s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
native_read_tsc() - vcpu->arch.last_host_tsc;
if (tsc_delta < 0)
mark_tsc_unstable("KVM discovered backwards TSC");
if (check_tsc_unstable()) {
u64 offset = kvm_x86_ops->compute_tsc_offset(vcpu,
vcpu->arch.last_guest_tsc);
kvm_x86_ops->write_tsc_offset(vcpu, offset);
vcpu->arch.tsc_catchup = 1;
}
/*
* On a host with synchronized TSC, there is no need to update
* kvmclock on vcpu->cpu migration
*/
if (!vcpu->kvm->arch.use_master_clock || vcpu->cpu == -1)
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
if (vcpu->cpu != cpu)
kvm_migrate_timers(vcpu);
vcpu->cpu = cpu;
}
accumulate_steal_time(vcpu);
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,701 | long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg;
int r = -ENOTTY;
/*
* This union makes it completely explicit to gcc-3.x
* that these two variables' stack usage should be
* combined, not added together.
*/
union {
struct kvm_pit_state ps;
struct kvm_pit_state2 ps2;
struct kvm_pit_config pit_config;
} u;
switch (ioctl) {
case KVM_SET_TSS_ADDR:
r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
break;
case KVM_SET_IDENTITY_MAP_ADDR: {
u64 ident_addr;
r = -EFAULT;
if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
goto out;
r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
break;
}
case KVM_SET_NR_MMU_PAGES:
r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
break;
case KVM_GET_NR_MMU_PAGES:
r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
break;
case KVM_CREATE_IRQCHIP: {
struct kvm_pic *vpic;
mutex_lock(&kvm->lock);
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
r = -EINVAL;
if (atomic_read(&kvm->online_vcpus))
goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
r = kvm_ioapic_init(kvm);
if (r) {
mutex_lock(&kvm->slots_lock);
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
&vpic->dev_master);
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
&vpic->dev_slave);
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
&vpic->dev_eclr);
mutex_unlock(&kvm->slots_lock);
kfree(vpic);
goto create_irqchip_unlock;
}
} else
goto create_irqchip_unlock;
smp_wmb();
kvm->arch.vpic = vpic;
smp_wmb();
r = kvm_setup_default_irq_routing(kvm);
if (r) {
mutex_lock(&kvm->slots_lock);
mutex_lock(&kvm->irq_lock);
kvm_ioapic_destroy(kvm);
kvm_destroy_pic(kvm);
mutex_unlock(&kvm->irq_lock);
mutex_unlock(&kvm->slots_lock);
}
create_irqchip_unlock:
mutex_unlock(&kvm->lock);
break;
}
case KVM_CREATE_PIT:
u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
goto create_pit;
case KVM_CREATE_PIT2:
r = -EFAULT;
if (copy_from_user(&u.pit_config, argp,
sizeof(struct kvm_pit_config)))
goto out;
create_pit:
mutex_lock(&kvm->slots_lock);
r = -EEXIST;
if (kvm->arch.vpit)
goto create_pit_unlock;
r = -ENOMEM;
kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
if (kvm->arch.vpit)
r = 0;
create_pit_unlock:
mutex_unlock(&kvm->slots_lock);
break;
case KVM_GET_IRQCHIP: {
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
struct kvm_irqchip *chip;
chip = memdup_user(argp, sizeof(*chip));
if (IS_ERR(chip)) {
r = PTR_ERR(chip);
goto out;
}
r = -ENXIO;
if (!irqchip_in_kernel(kvm))
goto get_irqchip_out;
r = kvm_vm_ioctl_get_irqchip(kvm, chip);
if (r)
goto get_irqchip_out;
r = -EFAULT;
if (copy_to_user(argp, chip, sizeof *chip))
goto get_irqchip_out;
r = 0;
get_irqchip_out:
kfree(chip);
break;
}
case KVM_SET_IRQCHIP: {
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
struct kvm_irqchip *chip;
chip = memdup_user(argp, sizeof(*chip));
if (IS_ERR(chip)) {
r = PTR_ERR(chip);
goto out;
}
r = -ENXIO;
if (!irqchip_in_kernel(kvm))
goto set_irqchip_out;
r = kvm_vm_ioctl_set_irqchip(kvm, chip);
if (r)
goto set_irqchip_out;
r = 0;
set_irqchip_out:
kfree(chip);
break;
}
case KVM_GET_PIT: {
r = -EFAULT;
if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
goto out;
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
goto out;
r = 0;
break;
}
case KVM_SET_PIT: {
r = -EFAULT;
if (copy_from_user(&u.ps, argp, sizeof u.ps))
goto out;
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
break;
}
case KVM_GET_PIT2: {
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
goto out;
r = 0;
break;
}
case KVM_SET_PIT2: {
r = -EFAULT;
if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
goto out;
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
break;
}
case KVM_REINJECT_CONTROL: {
struct kvm_reinject_control control;
r = -EFAULT;
if (copy_from_user(&control, argp, sizeof(control)))
goto out;
r = kvm_vm_ioctl_reinject(kvm, &control);
break;
}
case KVM_XEN_HVM_CONFIG: {
r = -EFAULT;
if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
sizeof(struct kvm_xen_hvm_config)))
goto out;
r = -EINVAL;
if (kvm->arch.xen_hvm_config.flags)
goto out;
r = 0;
break;
}
case KVM_SET_CLOCK: {
struct kvm_clock_data user_ns;
u64 now_ns;
s64 delta;
r = -EFAULT;
if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
goto out;
r = -EINVAL;
if (user_ns.flags)
goto out;
r = 0;
local_irq_disable();
now_ns = get_kernel_ns();
delta = user_ns.clock - now_ns;
local_irq_enable();
kvm->arch.kvmclock_offset = delta;
break;
}
case KVM_GET_CLOCK: {
struct kvm_clock_data user_ns;
u64 now_ns;
local_irq_disable();
now_ns = get_kernel_ns();
user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
local_irq_enable();
user_ns.flags = 0;
memset(&user_ns.pad, 0, sizeof(user_ns.pad));
r = -EFAULT;
if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
goto out;
r = 0;
break;
}
default:
;
}
out:
return r;
}
| DoS Mem. Corr. | 0 | long kvm_arch_vm_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg;
int r = -ENOTTY;
/*
* This union makes it completely explicit to gcc-3.x
* that these two variables' stack usage should be
* combined, not added together.
*/
union {
struct kvm_pit_state ps;
struct kvm_pit_state2 ps2;
struct kvm_pit_config pit_config;
} u;
switch (ioctl) {
case KVM_SET_TSS_ADDR:
r = kvm_vm_ioctl_set_tss_addr(kvm, arg);
break;
case KVM_SET_IDENTITY_MAP_ADDR: {
u64 ident_addr;
r = -EFAULT;
if (copy_from_user(&ident_addr, argp, sizeof ident_addr))
goto out;
r = kvm_vm_ioctl_set_identity_map_addr(kvm, ident_addr);
break;
}
case KVM_SET_NR_MMU_PAGES:
r = kvm_vm_ioctl_set_nr_mmu_pages(kvm, arg);
break;
case KVM_GET_NR_MMU_PAGES:
r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
break;
case KVM_CREATE_IRQCHIP: {
struct kvm_pic *vpic;
mutex_lock(&kvm->lock);
r = -EEXIST;
if (kvm->arch.vpic)
goto create_irqchip_unlock;
r = -EINVAL;
if (atomic_read(&kvm->online_vcpus))
goto create_irqchip_unlock;
r = -ENOMEM;
vpic = kvm_create_pic(kvm);
if (vpic) {
r = kvm_ioapic_init(kvm);
if (r) {
mutex_lock(&kvm->slots_lock);
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
&vpic->dev_master);
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
&vpic->dev_slave);
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
&vpic->dev_eclr);
mutex_unlock(&kvm->slots_lock);
kfree(vpic);
goto create_irqchip_unlock;
}
} else
goto create_irqchip_unlock;
smp_wmb();
kvm->arch.vpic = vpic;
smp_wmb();
r = kvm_setup_default_irq_routing(kvm);
if (r) {
mutex_lock(&kvm->slots_lock);
mutex_lock(&kvm->irq_lock);
kvm_ioapic_destroy(kvm);
kvm_destroy_pic(kvm);
mutex_unlock(&kvm->irq_lock);
mutex_unlock(&kvm->slots_lock);
}
create_irqchip_unlock:
mutex_unlock(&kvm->lock);
break;
}
case KVM_CREATE_PIT:
u.pit_config.flags = KVM_PIT_SPEAKER_DUMMY;
goto create_pit;
case KVM_CREATE_PIT2:
r = -EFAULT;
if (copy_from_user(&u.pit_config, argp,
sizeof(struct kvm_pit_config)))
goto out;
create_pit:
mutex_lock(&kvm->slots_lock);
r = -EEXIST;
if (kvm->arch.vpit)
goto create_pit_unlock;
r = -ENOMEM;
kvm->arch.vpit = kvm_create_pit(kvm, u.pit_config.flags);
if (kvm->arch.vpit)
r = 0;
create_pit_unlock:
mutex_unlock(&kvm->slots_lock);
break;
case KVM_GET_IRQCHIP: {
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
struct kvm_irqchip *chip;
chip = memdup_user(argp, sizeof(*chip));
if (IS_ERR(chip)) {
r = PTR_ERR(chip);
goto out;
}
r = -ENXIO;
if (!irqchip_in_kernel(kvm))
goto get_irqchip_out;
r = kvm_vm_ioctl_get_irqchip(kvm, chip);
if (r)
goto get_irqchip_out;
r = -EFAULT;
if (copy_to_user(argp, chip, sizeof *chip))
goto get_irqchip_out;
r = 0;
get_irqchip_out:
kfree(chip);
break;
}
case KVM_SET_IRQCHIP: {
/* 0: PIC master, 1: PIC slave, 2: IOAPIC */
struct kvm_irqchip *chip;
chip = memdup_user(argp, sizeof(*chip));
if (IS_ERR(chip)) {
r = PTR_ERR(chip);
goto out;
}
r = -ENXIO;
if (!irqchip_in_kernel(kvm))
goto set_irqchip_out;
r = kvm_vm_ioctl_set_irqchip(kvm, chip);
if (r)
goto set_irqchip_out;
r = 0;
set_irqchip_out:
kfree(chip);
break;
}
case KVM_GET_PIT: {
r = -EFAULT;
if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
goto out;
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
goto out;
r = 0;
break;
}
case KVM_SET_PIT: {
r = -EFAULT;
if (copy_from_user(&u.ps, argp, sizeof u.ps))
goto out;
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
break;
}
case KVM_GET_PIT2: {
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_get_pit2(kvm, &u.ps2);
if (r)
goto out;
r = -EFAULT;
if (copy_to_user(argp, &u.ps2, sizeof(u.ps2)))
goto out;
r = 0;
break;
}
case KVM_SET_PIT2: {
r = -EFAULT;
if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
goto out;
r = -ENXIO;
if (!kvm->arch.vpit)
goto out;
r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
break;
}
case KVM_REINJECT_CONTROL: {
struct kvm_reinject_control control;
r = -EFAULT;
if (copy_from_user(&control, argp, sizeof(control)))
goto out;
r = kvm_vm_ioctl_reinject(kvm, &control);
break;
}
case KVM_XEN_HVM_CONFIG: {
r = -EFAULT;
if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
sizeof(struct kvm_xen_hvm_config)))
goto out;
r = -EINVAL;
if (kvm->arch.xen_hvm_config.flags)
goto out;
r = 0;
break;
}
case KVM_SET_CLOCK: {
struct kvm_clock_data user_ns;
u64 now_ns;
s64 delta;
r = -EFAULT;
if (copy_from_user(&user_ns, argp, sizeof(user_ns)))
goto out;
r = -EINVAL;
if (user_ns.flags)
goto out;
r = 0;
local_irq_disable();
now_ns = get_kernel_ns();
delta = user_ns.clock - now_ns;
local_irq_enable();
kvm->arch.kvmclock_offset = delta;
break;
}
case KVM_GET_CLOCK: {
struct kvm_clock_data user_ns;
u64 now_ns;
local_irq_disable();
now_ns = get_kernel_ns();
user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
local_irq_enable();
user_ns.flags = 0;
memset(&user_ns.pad, 0, sizeof(user_ns.pad));
r = -EFAULT;
if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
goto out;
r = 0;
break;
}
default:
;
}
out:
return r;
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,702 | int kvm_dev_ioctl_check_extension(long ext)
{
int r;
switch (ext) {
case KVM_CAP_IRQCHIP:
case KVM_CAP_HLT:
case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
case KVM_CAP_SET_TSS_ADDR:
case KVM_CAP_EXT_CPUID:
case KVM_CAP_CLOCKSOURCE:
case KVM_CAP_PIT:
case KVM_CAP_NOP_IO_DELAY:
case KVM_CAP_MP_STATE:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_USER_NMI:
case KVM_CAP_REINJECT_CONTROL:
case KVM_CAP_IRQ_INJECT_STATUS:
case KVM_CAP_ASSIGN_DEV_IRQ:
case KVM_CAP_IRQFD:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_PIT2:
case KVM_CAP_PIT_STATE2:
case KVM_CAP_SET_IDENTITY_MAP_ADDR:
case KVM_CAP_XEN_HVM:
case KVM_CAP_ADJUST_CLOCK:
case KVM_CAP_VCPU_EVENTS:
case KVM_CAP_HYPERV:
case KVM_CAP_HYPERV_VAPIC:
case KVM_CAP_HYPERV_SPIN:
case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP:
case KVM_CAP_XSAVE:
case KVM_CAP_ASYNC_PF:
case KVM_CAP_GET_TSC_KHZ:
case KVM_CAP_PCI_2_3:
case KVM_CAP_KVMCLOCK_CTRL:
case KVM_CAP_READONLY_MEM:
case KVM_CAP_IRQFD_RESAMPLE:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
case KVM_CAP_VAPIC:
r = !kvm_x86_ops->cpu_has_accelerated_tpr();
break;
case KVM_CAP_NR_VCPUS:
r = KVM_SOFT_MAX_VCPUS;
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_NR_MEMSLOTS:
r = KVM_USER_MEM_SLOTS;
break;
case KVM_CAP_PV_MMU: /* obsolete */
r = 0;
break;
case KVM_CAP_IOMMU:
r = iommu_present(&pci_bus_type);
break;
case KVM_CAP_MCE:
r = KVM_MAX_MCE_BANKS;
break;
case KVM_CAP_XCRS:
r = cpu_has_xsave;
break;
case KVM_CAP_TSC_CONTROL:
r = kvm_has_tsc_control;
break;
case KVM_CAP_TSC_DEADLINE_TIMER:
r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
break;
default:
r = 0;
break;
}
return r;
}
| DoS Mem. Corr. | 0 | int kvm_dev_ioctl_check_extension(long ext)
{
int r;
switch (ext) {
case KVM_CAP_IRQCHIP:
case KVM_CAP_HLT:
case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
case KVM_CAP_SET_TSS_ADDR:
case KVM_CAP_EXT_CPUID:
case KVM_CAP_CLOCKSOURCE:
case KVM_CAP_PIT:
case KVM_CAP_NOP_IO_DELAY:
case KVM_CAP_MP_STATE:
case KVM_CAP_SYNC_MMU:
case KVM_CAP_USER_NMI:
case KVM_CAP_REINJECT_CONTROL:
case KVM_CAP_IRQ_INJECT_STATUS:
case KVM_CAP_ASSIGN_DEV_IRQ:
case KVM_CAP_IRQFD:
case KVM_CAP_IOEVENTFD:
case KVM_CAP_PIT2:
case KVM_CAP_PIT_STATE2:
case KVM_CAP_SET_IDENTITY_MAP_ADDR:
case KVM_CAP_XEN_HVM:
case KVM_CAP_ADJUST_CLOCK:
case KVM_CAP_VCPU_EVENTS:
case KVM_CAP_HYPERV:
case KVM_CAP_HYPERV_VAPIC:
case KVM_CAP_HYPERV_SPIN:
case KVM_CAP_PCI_SEGMENT:
case KVM_CAP_DEBUGREGS:
case KVM_CAP_X86_ROBUST_SINGLESTEP:
case KVM_CAP_XSAVE:
case KVM_CAP_ASYNC_PF:
case KVM_CAP_GET_TSC_KHZ:
case KVM_CAP_PCI_2_3:
case KVM_CAP_KVMCLOCK_CTRL:
case KVM_CAP_READONLY_MEM:
case KVM_CAP_IRQFD_RESAMPLE:
r = 1;
break;
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
case KVM_CAP_VAPIC:
r = !kvm_x86_ops->cpu_has_accelerated_tpr();
break;
case KVM_CAP_NR_VCPUS:
r = KVM_SOFT_MAX_VCPUS;
break;
case KVM_CAP_MAX_VCPUS:
r = KVM_MAX_VCPUS;
break;
case KVM_CAP_NR_MEMSLOTS:
r = KVM_USER_MEM_SLOTS;
break;
case KVM_CAP_PV_MMU: /* obsolete */
r = 0;
break;
case KVM_CAP_IOMMU:
r = iommu_present(&pci_bus_type);
break;
case KVM_CAP_MCE:
r = KVM_MAX_MCE_BANKS;
break;
case KVM_CAP_XCRS:
r = cpu_has_xsave;
break;
case KVM_CAP_TSC_CONTROL:
r = kvm_has_tsc_control;
break;
case KVM_CAP_TSC_DEADLINE_TIMER:
r = boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER);
break;
default:
r = 0;
break;
}
return r;
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,703 | static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{
gpa_t gpa = data & ~0x3f;
/* Bits 2:5 are reserved, Should be zero */
if (data & 0x3c)
return 1;
vcpu->arch.apf.msr_val = data;
if (!(data & KVM_ASYNC_PF_ENABLED)) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
return 0;
}
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
return 1;
vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
kvm_async_pf_wakeup_all(vcpu);
return 0;
}
| DoS Mem. Corr. | 0 | static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{
gpa_t gpa = data & ~0x3f;
/* Bits 2:5 are reserved, Should be zero */
if (data & 0x3c)
return 1;
vcpu->arch.apf.msr_val = data;
if (!(data & KVM_ASYNC_PF_ENABLED)) {
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
return 0;
}
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
return 1;
vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
kvm_async_pf_wakeup_all(vcpu);
return 0;
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,704 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
kvm_mmu_sync_roots(vcpu);
kvm_mmu_flush_tlb(vcpu);
return 0;
}
if (is_long_mode(vcpu)) {
if (kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) {
if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS)
return 1;
} else
if (cr3 & CR3_L_MODE_RESERVED_BITS)
return 1;
} else {
if (is_pae(vcpu)) {
if (cr3 & CR3_PAE_RESERVED_BITS)
return 1;
if (is_paging(vcpu) &&
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
return 1;
}
/*
* We don't check reserved bits in nonpae mode, because
* this isn't enforced, and VMware depends on this.
*/
}
/*
* Does the new cr3 value map to physical memory? (Note, we
* catch an invalid cr3 even in real-mode, because it would
* cause trouble later on when we turn on paging anyway.)
*
* A real CPU would silently accept an invalid cr3 and would
* attempt to use it - with largely undefined (and often hard
* to debug) behavior on the guest side.
*/
if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
return 1;
vcpu->arch.cr3 = cr3;
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
vcpu->arch.mmu.new_cr3(vcpu);
return 0;
}
| DoS Mem. Corr. | 0 | int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
{
if (cr3 == kvm_read_cr3(vcpu) && !pdptrs_changed(vcpu)) {
kvm_mmu_sync_roots(vcpu);
kvm_mmu_flush_tlb(vcpu);
return 0;
}
if (is_long_mode(vcpu)) {
if (kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)) {
if (cr3 & CR3_PCID_ENABLED_RESERVED_BITS)
return 1;
} else
if (cr3 & CR3_L_MODE_RESERVED_BITS)
return 1;
} else {
if (is_pae(vcpu)) {
if (cr3 & CR3_PAE_RESERVED_BITS)
return 1;
if (is_paging(vcpu) &&
!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))
return 1;
}
/*
* We don't check reserved bits in nonpae mode, because
* this isn't enforced, and VMware depends on this.
*/
}
/*
* Does the new cr3 value map to physical memory? (Note, we
* catch an invalid cr3 even in real-mode, because it would
* cause trouble later on when we turn on paging anyway.)
*
* A real CPU would silently accept an invalid cr3 and would
* attempt to use it - with largely undefined (and often hard
* to debug) behavior on the guest side.
*/
if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
return 1;
vcpu->arch.cr3 = cr3;
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
vcpu->arch.mmu.new_cr3(vcpu);
return 0;
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,705 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
X86_CR4_PAE | X86_CR4_SMEP;
if (cr4 & CR4_RESERVED_BITS)
return 1;
if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
return 1;
if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
return 1;
if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS))
return 1;
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE))
return 1;
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & pdptr_bits)
&& !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
kvm_read_cr3(vcpu)))
return 1;
if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
if (!guest_cpuid_has_pcid(vcpu))
return 1;
/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
return 1;
}
if (kvm_x86_ops->set_cr4(vcpu, cr4))
return 1;
if (((cr4 ^ old_cr4) & pdptr_bits) ||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);
if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
kvm_update_cpuid(vcpu);
return 0;
}
| DoS Mem. Corr. | 0 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{
unsigned long old_cr4 = kvm_read_cr4(vcpu);
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
X86_CR4_PAE | X86_CR4_SMEP;
if (cr4 & CR4_RESERVED_BITS)
return 1;
if (!guest_cpuid_has_xsave(vcpu) && (cr4 & X86_CR4_OSXSAVE))
return 1;
if (!guest_cpuid_has_smep(vcpu) && (cr4 & X86_CR4_SMEP))
return 1;
if (!guest_cpuid_has_fsgsbase(vcpu) && (cr4 & X86_CR4_RDWRGSFS))
return 1;
if (is_long_mode(vcpu)) {
if (!(cr4 & X86_CR4_PAE))
return 1;
} else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
&& ((cr4 ^ old_cr4) & pdptr_bits)
&& !load_pdptrs(vcpu, vcpu->arch.walk_mmu,
kvm_read_cr3(vcpu)))
return 1;
if ((cr4 & X86_CR4_PCIDE) && !(old_cr4 & X86_CR4_PCIDE)) {
if (!guest_cpuid_has_pcid(vcpu))
return 1;
/* PCID can not be enabled when cr3[11:0]!=000H or EFER.LMA=0 */
if ((kvm_read_cr3(vcpu) & X86_CR3_PCID_MASK) || !is_long_mode(vcpu))
return 1;
}
if (kvm_x86_ops->set_cr4(vcpu, cr4))
return 1;
if (((cr4 ^ old_cr4) & pdptr_bits) ||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
kvm_mmu_reset_context(vcpu);
if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
kvm_update_cpuid(vcpu);
return 0;
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,706 | static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
{
u32 thresh_lo, thresh_hi;
int use_scaling = 0;
/* Compute a scale to convert nanoseconds in TSC cycles */
kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
&vcpu->arch.virtual_tsc_shift,
&vcpu->arch.virtual_tsc_mult);
vcpu->arch.virtual_tsc_khz = this_tsc_khz;
/*
* Compute the variation in TSC rate which is acceptable
* within the range of tolerance and decide if the
* rate being applied is within that bounds of the hardware
* rate. If so, no scaling or compensation need be done.
*/
thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) {
pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
use_scaling = 1;
}
kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
}
| DoS Mem. Corr. | 0 | static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
{
u32 thresh_lo, thresh_hi;
int use_scaling = 0;
/* Compute a scale to convert nanoseconds in TSC cycles */
kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
&vcpu->arch.virtual_tsc_shift,
&vcpu->arch.virtual_tsc_mult);
vcpu->arch.virtual_tsc_khz = this_tsc_khz;
/*
* Compute the variation in TSC rate which is acceptable
* within the range of tolerance and decide if the
* rate being applied is within that bounds of the hardware
* rate. If so, no scaling or compensation need be done.
*/
thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) {
pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
use_scaling = 1;
}
kvm_x86_ops->set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,707 | static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
return -EINVAL;
if (irqchip_in_kernel(vcpu->kvm))
return -ENXIO;
kvm_queue_interrupt(vcpu, irq->irq, false);
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0;
}
| DoS Mem. Corr. | 0 | static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
struct kvm_interrupt *irq)
{
if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
return -EINVAL;
if (irqchip_in_kernel(vcpu->kvm))
return -ENXIO;
kvm_queue_interrupt(vcpu, irq->irq, false);
kvm_make_request(KVM_REQ_EVENT, vcpu);
return 0;
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,708 | static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
{
atomic_set(&vcpu->arch.nmi_queued, 0);
vcpu->arch.nmi_pending = 0;
vcpu->arch.nmi_injected = false;
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
vcpu->arch.dr6 = DR6_FIXED_1;
vcpu->arch.dr7 = DR7_FIXED_1;
kvm_update_dr7(vcpu);
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.apf.msr_val = 0;
vcpu->arch.st.msr_val = 0;
kvmclock_reset(vcpu);
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
vcpu->arch.apf.halted = false;
kvm_pmu_reset(vcpu);
memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
vcpu->arch.regs_avail = ~0;
vcpu->arch.regs_dirty = ~0;
return kvm_x86_ops->vcpu_reset(vcpu);
}
| DoS Mem. Corr. | 0 | static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
{
atomic_set(&vcpu->arch.nmi_queued, 0);
vcpu->arch.nmi_pending = 0;
vcpu->arch.nmi_injected = false;
memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
vcpu->arch.dr6 = DR6_FIXED_1;
vcpu->arch.dr7 = DR7_FIXED_1;
kvm_update_dr7(vcpu);
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.apf.msr_val = 0;
vcpu->arch.st.msr_val = 0;
kvmclock_reset(vcpu);
kvm_clear_async_pf_completion_queue(vcpu);
kvm_async_pf_hash_reset(vcpu);
vcpu->arch.apf.halted = false;
kvm_pmu_reset(vcpu);
memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
vcpu->arch.regs_avail = ~0;
vcpu->arch.regs_dirty = ~0;
return kvm_x86_ops->vcpu_reset(vcpu);
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,709 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event)
{
if (!irqchip_in_kernel(kvm))
return -ENXIO;
irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event->irq, irq_event->level);
return 0;
}
| DoS Mem. Corr. | 0 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event)
{
if (!irqchip_in_kernel(kvm))
return -ENXIO;
irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event->irq, irq_event->level);
return 0;
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,710 | void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
struct kvm *kvm = vcpu->kvm;
u64 offset, ns, elapsed;
unsigned long flags;
s64 usdiff;
bool matched;
u64 data = msr->data;
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
ns = get_kernel_ns();
elapsed = ns - kvm->arch.last_tsc_nsec;
/* n.b - signed multiplication and division required */
usdiff = data - kvm->arch.last_tsc_write;
#ifdef CONFIG_X86_64
usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
#else
/* do_div() only does unsigned */
asm("idivl %2; xor %%edx, %%edx"
: "=A"(usdiff)
: "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
#endif
do_div(elapsed, 1000);
usdiff -= elapsed;
if (usdiff < 0)
usdiff = -usdiff;
/*
* Special case: TSC write with a small delta (1 second) of virtual
* cycle time against real time is interpreted as an attempt to
* synchronize the CPU.
*
* For a reliable TSC, we can match TSC offsets, and for an unstable
* TSC, we add elapsed time in this computation. We could let the
* compensation code attempt to catch up if we fall behind, but
* it's better to try to match offsets from the beginning.
*/
if (usdiff < USEC_PER_SEC &&
vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
if (!check_tsc_unstable()) {
offset = kvm->arch.cur_tsc_offset;
pr_debug("kvm: matched tsc offset for %llu\n", data);
} else {
u64 delta = nsec_to_cycles(vcpu, elapsed);
data += delta;
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
}
matched = true;
} else {
/*
* We split periods of matched TSC writes into generations.
* For each generation, we track the original measured
* nanosecond time, offset, and write, so if TSCs are in
* sync, we can match exact offset, and if not, we can match
* exact software computation in compute_guest_tsc()
*
* These values are tracked in kvm->arch.cur_xxx variables.
*/
kvm->arch.cur_tsc_generation++;
kvm->arch.cur_tsc_nsec = ns;
kvm->arch.cur_tsc_write = data;
kvm->arch.cur_tsc_offset = offset;
matched = false;
pr_debug("kvm: new tsc generation %u, clock %llu\n",
kvm->arch.cur_tsc_generation, data);
}
/*
* We also track th most recent recorded KHZ, write and time to
* allow the matching interval to be extended at each write.
*/
kvm->arch.last_tsc_nsec = ns;
kvm->arch.last_tsc_write = data;
kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
/* Reset of TSC must disable overshoot protection below */
vcpu->arch.hv_clock.tsc_timestamp = 0;
vcpu->arch.last_guest_tsc = data;
/* Keep track of which generation this VCPU has synchronized to */
vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
update_ia32_tsc_adjust_msr(vcpu, offset);
kvm_x86_ops->write_tsc_offset(vcpu, offset);
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
if (matched)
kvm->arch.nr_vcpus_matched_tsc++;
else
kvm->arch.nr_vcpus_matched_tsc = 0;
kvm_track_tsc_matching(vcpu);
spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
}
| DoS Mem. Corr. | 0 | void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
struct kvm *kvm = vcpu->kvm;
u64 offset, ns, elapsed;
unsigned long flags;
s64 usdiff;
bool matched;
u64 data = msr->data;
raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
ns = get_kernel_ns();
elapsed = ns - kvm->arch.last_tsc_nsec;
/* n.b - signed multiplication and division required */
usdiff = data - kvm->arch.last_tsc_write;
#ifdef CONFIG_X86_64
usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
#else
/* do_div() only does unsigned */
asm("idivl %2; xor %%edx, %%edx"
: "=A"(usdiff)
: "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
#endif
do_div(elapsed, 1000);
usdiff -= elapsed;
if (usdiff < 0)
usdiff = -usdiff;
/*
* Special case: TSC write with a small delta (1 second) of virtual
* cycle time against real time is interpreted as an attempt to
* synchronize the CPU.
*
* For a reliable TSC, we can match TSC offsets, and for an unstable
* TSC, we add elapsed time in this computation. We could let the
* compensation code attempt to catch up if we fall behind, but
* it's better to try to match offsets from the beginning.
*/
if (usdiff < USEC_PER_SEC &&
vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
if (!check_tsc_unstable()) {
offset = kvm->arch.cur_tsc_offset;
pr_debug("kvm: matched tsc offset for %llu\n", data);
} else {
u64 delta = nsec_to_cycles(vcpu, elapsed);
data += delta;
offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
}
matched = true;
} else {
/*
* We split periods of matched TSC writes into generations.
* For each generation, we track the original measured
* nanosecond time, offset, and write, so if TSCs are in
* sync, we can match exact offset, and if not, we can match
* exact software computation in compute_guest_tsc()
*
* These values are tracked in kvm->arch.cur_xxx variables.
*/
kvm->arch.cur_tsc_generation++;
kvm->arch.cur_tsc_nsec = ns;
kvm->arch.cur_tsc_write = data;
kvm->arch.cur_tsc_offset = offset;
matched = false;
pr_debug("kvm: new tsc generation %u, clock %llu\n",
kvm->arch.cur_tsc_generation, data);
}
/*
* We also track th most recent recorded KHZ, write and time to
* allow the matching interval to be extended at each write.
*/
kvm->arch.last_tsc_nsec = ns;
kvm->arch.last_tsc_write = data;
kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
/* Reset of TSC must disable overshoot protection below */
vcpu->arch.hv_clock.tsc_timestamp = 0;
vcpu->arch.last_guest_tsc = data;
/* Keep track of which generation this VCPU has synchronized to */
vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
update_ia32_tsc_adjust_msr(vcpu, offset);
kvm_x86_ops->write_tsc_offset(vcpu, offset);
raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
if (matched)
kvm->arch.nr_vcpus_matched_tsc++;
else
kvm->arch.nr_vcpus_matched_tsc = 0;
kvm_track_tsc_matching(vcpu);
spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,711 | static void pvclock_gtod_update_fn(struct work_struct *work)
{
struct kvm *kvm;
struct kvm_vcpu *vcpu;
int i;
raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm)
set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests);
atomic_set(&kvm_guest_has_master_clock, 0);
raw_spin_unlock(&kvm_lock);
}
| DoS Mem. Corr. | 0 | static void pvclock_gtod_update_fn(struct work_struct *work)
{
struct kvm *kvm;
struct kvm_vcpu *vcpu;
int i;
raw_spin_lock(&kvm_lock);
list_for_each_entry(kvm, &vm_list, vm_list)
kvm_for_each_vcpu(i, vcpu, kvm)
set_bit(KVM_REQ_MASTERCLOCK_UPDATE, &vcpu->requests);
atomic_set(&kvm_guest_has_master_clock, 0);
raw_spin_unlock(&kvm_lock);
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,712 | static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
bool write_fault_to_shadow_pgtable)
{
gpa_t gpa = cr2;
pfn_t pfn;
if (!vcpu->arch.mmu.direct_map) {
/*
* Write permission should be allowed since only
* write access need to be emulated.
*/
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
/*
* If the mapping is invalid in guest, let cpu retry
* it to generate fault.
*/
if (gpa == UNMAPPED_GVA)
return true;
}
/*
* Do not retry the unhandleable instruction if it faults on the
* readonly host memory, otherwise it will goto a infinite loop:
* retry instruction -> write #PF -> emulation fail -> retry
* instruction -> ...
*/
pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
/*
* If the instruction failed on the error pfn, it can not be fixed,
* report the error to userspace.
*/
if (is_error_noslot_pfn(pfn))
return false;
kvm_release_pfn_clean(pfn);
/* The instructions are well-emulated on direct mmu. */
if (vcpu->arch.mmu.direct_map) {
unsigned int indirect_shadow_pages;
spin_lock(&vcpu->kvm->mmu_lock);
indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
spin_unlock(&vcpu->kvm->mmu_lock);
if (indirect_shadow_pages)
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
return true;
}
/*
* if emulation was due to access to shadowed page table
* and it failed try to unshadow page and re-enter the
* guest to let CPU execute the instruction.
*/
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
/*
* If the access faults on its page table, it can not
* be fixed by unprotecting shadow page and it should
* be reported to userspace.
*/
return !write_fault_to_shadow_pgtable;
}
| DoS Mem. Corr. | 0 | static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
bool write_fault_to_shadow_pgtable)
{
gpa_t gpa = cr2;
pfn_t pfn;
if (!vcpu->arch.mmu.direct_map) {
/*
* Write permission should be allowed since only
* write access need to be emulated.
*/
gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
/*
* If the mapping is invalid in guest, let cpu retry
* it to generate fault.
*/
if (gpa == UNMAPPED_GVA)
return true;
}
/*
* Do not retry the unhandleable instruction if it faults on the
* readonly host memory, otherwise it will goto a infinite loop:
* retry instruction -> write #PF -> emulation fail -> retry
* instruction -> ...
*/
pfn = gfn_to_pfn(vcpu->kvm, gpa_to_gfn(gpa));
/*
* If the instruction failed on the error pfn, it can not be fixed,
* report the error to userspace.
*/
if (is_error_noslot_pfn(pfn))
return false;
kvm_release_pfn_clean(pfn);
/* The instructions are well-emulated on direct mmu. */
if (vcpu->arch.mmu.direct_map) {
unsigned int indirect_shadow_pages;
spin_lock(&vcpu->kvm->mmu_lock);
indirect_shadow_pages = vcpu->kvm->arch.indirect_shadow_pages;
spin_unlock(&vcpu->kvm->mmu_lock);
if (indirect_shadow_pages)
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
return true;
}
/*
* if emulation was due to access to shadowed page table
* and it failed try to unshadow page and re-enter the
* guest to let CPU execute the instruction.
*/
kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(gpa));
/*
* If the access faults on its page table, it can not
* be fixed by unprotecting shadow page and it should
* be reported to userspace.
*/
return !write_fault_to_shadow_pgtable;
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,713 | static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
u64 old_efer = vcpu->arch.efer;
if (efer & efer_reserved_bits)
return 1;
if (is_paging(vcpu)
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
return 1;
if (efer & EFER_FFXSR) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
return 1;
}
if (efer & EFER_SVME) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
return 1;
}
efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA;
kvm_x86_ops->set_efer(vcpu, efer);
/* Update reserved bits */
if ((efer ^ old_efer) & EFER_NX)
kvm_mmu_reset_context(vcpu);
return 0;
}
| DoS Mem. Corr. | 0 | static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
{
u64 old_efer = vcpu->arch.efer;
if (efer & efer_reserved_bits)
return 1;
if (is_paging(vcpu)
&& (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
return 1;
if (efer & EFER_FFXSR) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
return 1;
}
if (efer & EFER_SVME) {
struct kvm_cpuid_entry2 *feat;
feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
return 1;
}
efer &= ~EFER_LMA;
efer |= vcpu->arch.efer & EFER_LMA;
kvm_x86_ops->set_efer(vcpu, efer);
/* Update reserved bits */
if ((efer ^ old_efer) & EFER_NX)
kvm_mmu_reset_context(vcpu);
return 0;
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,714 | static void update_eoi_exitmap(struct kvm_vcpu *vcpu)
{
u64 eoi_exit_bitmap[4];
memset(eoi_exit_bitmap, 0, 32);
kvm_ioapic_calculate_eoi_exitmap(vcpu, eoi_exit_bitmap);
kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
}
| DoS Mem. Corr. | 0 | static void update_eoi_exitmap(struct kvm_vcpu *vcpu)
{
u64 eoi_exit_bitmap[4];
memset(eoi_exit_bitmap, 0, 32);
kvm_ioapic_calculate_eoi_exitmap(vcpu, eoi_exit_bitmap);
kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,715 | static int vapic_enter(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct page *page;
if (!apic || !apic->vapic_addr)
return 0;
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
if (is_error_page(page))
return -EFAULT;
vcpu->arch.apic->vapic_page = page;
return 0;
}
| DoS Mem. Corr. | 0 | static int vapic_enter(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct page *page;
if (!apic || !apic->vapic_addr)
return 0;
page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
if (is_error_page(page))
return -EFAULT;
vcpu->arch.apic->vapic_page = page;
return 0;
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,716 | int x86_emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2,
int emulation_type,
void *insn,
int insn_len)
{
int r;
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
bool writeback = true;
bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
/*
* Clear write_fault_to_shadow_pgtable here to ensure it is
* never reused.
*/
vcpu->arch.write_fault_to_shadow_pgtable = false;
kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
init_emulate_ctxt(vcpu);
ctxt->interruptibility = 0;
ctxt->have_exception = false;
ctxt->perm_ok = false;
ctxt->only_vendor_specific_insn
= emulation_type & EMULTYPE_TRAP_UD;
r = x86_decode_insn(ctxt, insn, insn_len);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
if (r != EMULATION_OK) {
if (emulation_type & EMULTYPE_TRAP_UD)
return EMULATE_FAIL;
if (reexecute_instruction(vcpu, cr2,
write_fault_to_spt))
return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL;
return handle_emulation_failure(vcpu);
}
}
if (emulation_type & EMULTYPE_SKIP) {
kvm_rip_write(vcpu, ctxt->_eip);
return EMULATE_DONE;
}
if (retry_instruction(ctxt, cr2, emulation_type))
return EMULATE_DONE;
/* this is needed for vmware backdoor interface to work since it
changes registers values during IO operation */
if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
emulator_invalidate_register_cache(ctxt);
}
restart:
r = x86_emulate_insn(ctxt);
if (r == EMULATION_INTERCEPTED)
return EMULATE_DONE;
if (r == EMULATION_FAILED) {
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt))
return EMULATE_DONE;
return handle_emulation_failure(vcpu);
}
if (ctxt->have_exception) {
inject_emulated_exception(vcpu);
r = EMULATE_DONE;
} else if (vcpu->arch.pio.count) {
if (!vcpu->arch.pio.in)
vcpu->arch.pio.count = 0;
else {
writeback = false;
vcpu->arch.complete_userspace_io = complete_emulated_pio;
}
r = EMULATE_DO_MMIO;
} else if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
writeback = false;
r = EMULATE_DO_MMIO;
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
} else if (r == EMULATION_RESTART)
goto restart;
else
r = EMULATE_DONE;
if (writeback) {
toggle_interruptibility(vcpu, ctxt->interruptibility);
kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
} else
vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
return r;
}
| DoS Mem. Corr. | 0 | int x86_emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2,
int emulation_type,
void *insn,
int insn_len)
{
int r;
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
bool writeback = true;
bool write_fault_to_spt = vcpu->arch.write_fault_to_shadow_pgtable;
/*
* Clear write_fault_to_shadow_pgtable here to ensure it is
* never reused.
*/
vcpu->arch.write_fault_to_shadow_pgtable = false;
kvm_clear_exception_queue(vcpu);
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
init_emulate_ctxt(vcpu);
ctxt->interruptibility = 0;
ctxt->have_exception = false;
ctxt->perm_ok = false;
ctxt->only_vendor_specific_insn
= emulation_type & EMULTYPE_TRAP_UD;
r = x86_decode_insn(ctxt, insn, insn_len);
trace_kvm_emulate_insn_start(vcpu);
++vcpu->stat.insn_emulation;
if (r != EMULATION_OK) {
if (emulation_type & EMULTYPE_TRAP_UD)
return EMULATE_FAIL;
if (reexecute_instruction(vcpu, cr2,
write_fault_to_spt))
return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL;
return handle_emulation_failure(vcpu);
}
}
if (emulation_type & EMULTYPE_SKIP) {
kvm_rip_write(vcpu, ctxt->_eip);
return EMULATE_DONE;
}
if (retry_instruction(ctxt, cr2, emulation_type))
return EMULATE_DONE;
/* this is needed for vmware backdoor interface to work since it
changes registers values during IO operation */
if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
emulator_invalidate_register_cache(ctxt);
}
restart:
r = x86_emulate_insn(ctxt);
if (r == EMULATION_INTERCEPTED)
return EMULATE_DONE;
if (r == EMULATION_FAILED) {
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt))
return EMULATE_DONE;
return handle_emulation_failure(vcpu);
}
if (ctxt->have_exception) {
inject_emulated_exception(vcpu);
r = EMULATE_DONE;
} else if (vcpu->arch.pio.count) {
if (!vcpu->arch.pio.in)
vcpu->arch.pio.count = 0;
else {
writeback = false;
vcpu->arch.complete_userspace_io = complete_emulated_pio;
}
r = EMULATE_DO_MMIO;
} else if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
writeback = false;
r = EMULATE_DO_MMIO;
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
} else if (r == EMULATION_RESTART)
goto restart;
else
r = EMULATE_DONE;
if (writeback) {
toggle_interruptibility(vcpu, ctxt->interruptibility);
kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
} else
vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
return r;
}
| @@ -1406,10 +1406,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
- void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
- struct pvclock_vcpu_time_info *guest_hv_clock;
+ struct pvclock_vcpu_time_info guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
@@ -1463,7 +1462,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
local_irq_restore(flags);
- if (!vcpu->time_page)
+ if (!vcpu->pv_time_enabled)
return 0;
/*
@@ -1525,12 +1524,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
*/
vcpu->hv_clock.version += 2;
- shared_kaddr = kmap_atomic(vcpu->time_page);
-
- guest_hv_clock = shared_kaddr + vcpu->time_offset;
+ if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+ &guest_hv_clock, sizeof(guest_hv_clock))))
+ return 0;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
- pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+ pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1542,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.flags = pvclock_flags;
- memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
- sizeof(vcpu->hv_clock));
-
- kunmap_atomic(shared_kaddr);
-
- mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock));
return 0;
}
@@ -1837,10 +1833,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
static void kvmclock_reset(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.time_page) {
- kvm_release_page_dirty(vcpu->arch.time_page);
- vcpu->arch.time_page = NULL;
- }
+ vcpu->arch.pv_time_enabled = false;
}
static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1940,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
break;
case MSR_KVM_SYSTEM_TIME_NEW:
case MSR_KVM_SYSTEM_TIME: {
+ u64 gpa_offset;
kvmclock_reset(vcpu);
vcpu->arch.time = data;
@@ -1956,19 +1950,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!(data & 1))
break;
- /* ...but clean it before doing the actual write */
- vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ gpa_offset = data & ~(PAGE_MASK | 1);
/* Check that the address is 32-byte aligned. */
- if (vcpu->arch.time_offset &
- (sizeof(struct pvclock_vcpu_time_info) - 1))
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
break;
- vcpu->arch.time_page =
- gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
-
- if (is_error_page(vcpu->arch.time_page))
- vcpu->arch.time_page = NULL;
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.pv_time, data & ~1ULL))
+ vcpu->arch.pv_time_enabled = false;
+ else
+ vcpu->arch.pv_time_enabled = true;
break;
}
@@ -2972,7 +2964,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
*/
static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
{
- if (!vcpu->arch.time_page)
+ if (!vcpu->arch.pv_time_enabled)
return -EINVAL;
vcpu->arch.pvclock_set_guest_stopped_request = true;
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -6723,6 +6715,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
goto fail_free_wbinvd_dirty_mask;
vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+ vcpu->arch.pv_time_enabled = false;
kvm_async_pf_hash_reset(vcpu);
kvm_pmu_init(vcpu);
| CWE-399 | null | null |
26,717 | static int kvm_guest_time_update(struct kvm_vcpu *v)
{
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
struct pvclock_vcpu_time_info *guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
kernel_ns = 0;
host_tsc = 0;
/*
* If the host uses TSC clock, then passthrough TSC as stable
* to the guest.
*/
spin_lock(&ka->pvclock_gtod_sync_lock);
use_master_clock = ka->use_master_clock;
if (use_master_clock) {
host_tsc = ka->master_cycle_now;
kernel_ns = ka->master_kernel_ns;
}
spin_unlock(&ka->pvclock_gtod_sync_lock);
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
if (unlikely(this_tsc_khz == 0)) {
local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
return 1;
}
if (!use_master_clock) {
host_tsc = native_read_tsc();
kernel_ns = get_kernel_ns();
}
tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);
/*
* We may have to catch up the TSC to match elapsed wall clock
* time for two reasons, even if kvmclock is used.
* 1) CPU could have been running below the maximum TSC rate
* 2) Broken TSC compensation resets the base at each VCPU
* entry to avoid unknown leaps of TSC even when running
* again on the same CPU. This may cause apparent elapsed
* time to disappear, and the guest to stand still or run
* very slowly.
*/
if (vcpu->tsc_catchup) {
u64 tsc = compute_guest_tsc(v, kernel_ns);
if (tsc > tsc_timestamp) {
adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
tsc_timestamp = tsc;
}
}
local_irq_restore(flags);
if (!vcpu->time_page)
return 0;
/*
* Time as measured by the TSC may go backwards when resetting the base
* tsc_timestamp. The reason for this is that the TSC resolution is
* higher than the resolution of the other clock scales. Thus, many
* possible measurments of the TSC correspond to one measurement of any
* other clock, and so a spread of values is possible. This is not a
* problem for the computation of the nanosecond clock; with TSC rates
* around 1GHZ, there can only be a few cycles which correspond to one
* nanosecond value, and any path through this code will inevitably
* take longer than that. However, with the kernel_ns value itself,
* the precision may be much lower, down to HZ granularity. If the
* first sampling of TSC against kernel_ns ends in the low part of the
* range, and the second in the high end of the range, we can get:
*
* (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
*
* As the sampling errors potentially range in the thousands of cycles,
* it is possible such a time value has already been observed by the
* guest. To protect against this, we must compute the system time as
* observed by the guest and ensure the new system time is greater.
*/
max_kernel_ns = 0;
if (vcpu->hv_clock.tsc_timestamp) {
max_kernel_ns = vcpu->last_guest_tsc -
vcpu->hv_clock.tsc_timestamp;
max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
vcpu->hv_clock.tsc_to_system_mul,
vcpu->hv_clock.tsc_shift);
max_kernel_ns += vcpu->last_kernel_ns;
}
if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
&vcpu->hv_clock.tsc_shift,
&vcpu->hv_clock.tsc_to_system_mul);
vcpu->hw_tsc_khz = this_tsc_khz;
}
/* with a master <monotonic time, tsc value> tuple,
* pvclock clock reads always increase at the (scaled) rate
* of guest TSC - no need to deal with sampling errors.
*/
if (!use_master_clock) {
if (max_kernel_ns > kernel_ns)
kernel_ns = max_kernel_ns;
}
/* With all the info we got, fill in the values */
vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
vcpu->last_kernel_ns = kernel_ns;
vcpu->last_guest_tsc = tsc_timestamp;
/*
* The interface expects us to write an even number signaling that the
* update is finished. Since the guest won't see the intermediate
* state, we just increase by 2 at the end.
*/
vcpu->hv_clock.version += 2;
shared_kaddr = kmap_atomic(vcpu->time_page);
guest_hv_clock = shared_kaddr + vcpu->time_offset;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
vcpu->pvclock_set_guest_stopped_request = false;
}
/* If the host uses TSC clocksource, then it is stable */
if (use_master_clock)
pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
vcpu->hv_clock.flags = pvclock_flags;
memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
sizeof(vcpu->hv_clock));
kunmap_atomic(shared_kaddr);
mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
return 0;
}
| DoS Overflow Mem. Corr. | 0 | static int kvm_guest_time_update(struct kvm_vcpu *v)
{
unsigned long flags, this_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
void *shared_kaddr;
s64 kernel_ns, max_kernel_ns;
u64 tsc_timestamp, host_tsc;
struct pvclock_vcpu_time_info *guest_hv_clock;
u8 pvclock_flags;
bool use_master_clock;
kernel_ns = 0;
host_tsc = 0;
/*
* If the host uses TSC clock, then passthrough TSC as stable
* to the guest.
*/
spin_lock(&ka->pvclock_gtod_sync_lock);
use_master_clock = ka->use_master_clock;
if (use_master_clock) {
host_tsc = ka->master_cycle_now;
kernel_ns = ka->master_kernel_ns;
}
spin_unlock(&ka->pvclock_gtod_sync_lock);
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
if (unlikely(this_tsc_khz == 0)) {
local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
return 1;
}
if (!use_master_clock) {
host_tsc = native_read_tsc();
kernel_ns = get_kernel_ns();
}
tsc_timestamp = kvm_x86_ops->read_l1_tsc(v, host_tsc);
/*
* We may have to catch up the TSC to match elapsed wall clock
* time for two reasons, even if kvmclock is used.
* 1) CPU could have been running below the maximum TSC rate
* 2) Broken TSC compensation resets the base at each VCPU
* entry to avoid unknown leaps of TSC even when running
* again on the same CPU. This may cause apparent elapsed
* time to disappear, and the guest to stand still or run
* very slowly.
*/
if (vcpu->tsc_catchup) {
u64 tsc = compute_guest_tsc(v, kernel_ns);
if (tsc > tsc_timestamp) {
adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
tsc_timestamp = tsc;
}
}
local_irq_restore(flags);
if (!vcpu->time_page)
return 0;
/*
* Time as measured by the TSC may go backwards when resetting the base
* tsc_timestamp. The reason for this is that the TSC resolution is
* higher than the resolution of the other clock scales. Thus, many
* possible measurments of the TSC correspond to one measurement of any
* other clock, and so a spread of values is possible. This is not a
* problem for the computation of the nanosecond clock; with TSC rates
* around 1GHZ, there can only be a few cycles which correspond to one
* nanosecond value, and any path through this code will inevitably
* take longer than that. However, with the kernel_ns value itself,
* the precision may be much lower, down to HZ granularity. If the
* first sampling of TSC against kernel_ns ends in the low part of the
* range, and the second in the high end of the range, we can get:
*
* (TSC - offset_low) * S + kns_old > (TSC - offset_high) * S + kns_new
*
* As the sampling errors potentially range in the thousands of cycles,
* it is possible such a time value has already been observed by the
* guest. To protect against this, we must compute the system time as
* observed by the guest and ensure the new system time is greater.
*/
max_kernel_ns = 0;
if (vcpu->hv_clock.tsc_timestamp) {
max_kernel_ns = vcpu->last_guest_tsc -
vcpu->hv_clock.tsc_timestamp;
max_kernel_ns = pvclock_scale_delta(max_kernel_ns,
vcpu->hv_clock.tsc_to_system_mul,
vcpu->hv_clock.tsc_shift);
max_kernel_ns += vcpu->last_kernel_ns;
}
if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
kvm_get_time_scale(NSEC_PER_SEC / 1000, this_tsc_khz,
&vcpu->hv_clock.tsc_shift,
&vcpu->hv_clock.tsc_to_system_mul);
vcpu->hw_tsc_khz = this_tsc_khz;
}
/* with a master <monotonic time, tsc value> tuple,
* pvclock clock reads always increase at the (scaled) rate
* of guest TSC - no need to deal with sampling errors.
*/
if (!use_master_clock) {
if (max_kernel_ns > kernel_ns)
kernel_ns = max_kernel_ns;
}
/* With all the info we got, fill in the values */
vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
vcpu->last_kernel_ns = kernel_ns;
vcpu->last_guest_tsc = tsc_timestamp;
/*
* The interface expects us to write an even number signaling that the
* update is finished. Since the guest won't see the intermediate
* state, we just increase by 2 at the end.
*/
vcpu->hv_clock.version += 2;
shared_kaddr = kmap_atomic(vcpu->time_page);
guest_hv_clock = shared_kaddr + vcpu->time_offset;
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
if (vcpu->pvclock_set_guest_stopped_request) {
pvclock_flags |= PVCLOCK_GUEST_STOPPED;
vcpu->pvclock_set_guest_stopped_request = false;
}
/* If the host uses TSC clocksource, then it is stable */
if (use_master_clock)
pvclock_flags |= PVCLOCK_TSC_STABLE_BIT;
vcpu->hv_clock.flags = pvclock_flags;
memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
sizeof(vcpu->hv_clock));
kunmap_atomic(shared_kaddr);
mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
return 0;
}
| @@ -1959,6 +1959,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
/* ...but clean it before doing the actual write */
vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+ /* Check that the address is 32-byte aligned. */
+ if (vcpu->arch.time_offset &
+ (sizeof(struct pvclock_vcpu_time_info) - 1))
+ break;
+
vcpu->arch.time_page =
gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
| CWE-119 | null | null |
26,718 | static int install_process_keyring(void)
{
struct cred *new;
int ret;
new = prepare_creds();
if (!new)
return -ENOMEM;
ret = install_process_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
return ret != -EEXIST ? ret : 0;
}
return commit_creds(new);
}
| DoS | 0 | static int install_process_keyring(void)
{
struct cred *new;
int ret;
new = prepare_creds();
if (!new)
return -ENOMEM;
ret = install_process_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
return ret != -EEXIST ? ret : 0;
}
return commit_creds(new);
}
| @@ -57,7 +57,7 @@ int install_user_keyrings(void)
kenter("%p{%u}", user, uid);
- if (user->uid_keyring) {
+ if (user->uid_keyring && user->session_keyring) {
kleave(" = 0 [exist]");
return 0;
} | CWE-362 | null | null |
26,719 | int install_process_keyring_to_cred(struct cred *new)
{
struct key *keyring;
if (new->process_keyring)
return -EEXIST;
keyring = keyring_alloc("_pid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
new->process_keyring = keyring;
return 0;
}
| DoS | 0 | int install_process_keyring_to_cred(struct cred *new)
{
struct key *keyring;
if (new->process_keyring)
return -EEXIST;
keyring = keyring_alloc("_pid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
new->process_keyring = keyring;
return 0;
}
| @@ -57,7 +57,7 @@ int install_user_keyrings(void)
kenter("%p{%u}", user, uid);
- if (user->uid_keyring) {
+ if (user->uid_keyring && user->session_keyring) {
kleave(" = 0 [exist]");
return 0;
} | CWE-362 | null | null |
26,720 | static int install_session_keyring(struct key *keyring)
{
struct cred *new;
int ret;
new = prepare_creds();
if (!new)
return -ENOMEM;
ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0) {
abort_creds(new);
return ret;
}
return commit_creds(new);
}
| DoS | 0 | static int install_session_keyring(struct key *keyring)
{
struct cred *new;
int ret;
new = prepare_creds();
if (!new)
return -ENOMEM;
ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0) {
abort_creds(new);
return ret;
}
return commit_creds(new);
}
| @@ -57,7 +57,7 @@ int install_user_keyrings(void)
kenter("%p{%u}", user, uid);
- if (user->uid_keyring) {
+ if (user->uid_keyring && user->session_keyring) {
kleave(" = 0 [exist]");
return 0;
} | CWE-362 | null | null |
26,721 | int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
{
unsigned long flags;
struct key *old;
might_sleep();
/* create an empty session keyring */
if (!keyring) {
flags = KEY_ALLOC_QUOTA_OVERRUN;
if (cred->session_keyring)
flags = KEY_ALLOC_IN_QUOTA;
keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred,
KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
flags, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
} else {
atomic_inc(&keyring->usage);
}
/* install the keyring */
old = cred->session_keyring;
rcu_assign_pointer(cred->session_keyring, keyring);
if (old)
key_put(old);
return 0;
}
| DoS | 0 | int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
{
unsigned long flags;
struct key *old;
might_sleep();
/* create an empty session keyring */
if (!keyring) {
flags = KEY_ALLOC_QUOTA_OVERRUN;
if (cred->session_keyring)
flags = KEY_ALLOC_IN_QUOTA;
keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred,
KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
flags, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
} else {
atomic_inc(&keyring->usage);
}
/* install the keyring */
old = cred->session_keyring;
rcu_assign_pointer(cred->session_keyring, keyring);
if (old)
key_put(old);
return 0;
}
| @@ -57,7 +57,7 @@ int install_user_keyrings(void)
kenter("%p{%u}", user, uid);
- if (user->uid_keyring) {
+ if (user->uid_keyring && user->session_keyring) {
kleave(" = 0 [exist]");
return 0;
} | CWE-362 | null | null |
26,722 | long join_session_keyring(const char *name)
{
const struct cred *old;
struct cred *new;
struct key *keyring;
long ret, serial;
new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();
/* if no name is provided, install an anonymous keyring */
if (!name) {
ret = install_session_keyring_to_cred(new, NULL);
if (ret < 0)
goto error;
serial = new->session_keyring->serial;
ret = commit_creds(new);
if (ret == 0)
ret = serial;
goto okay;
}
/* allow the user to join or create a named keyring */
mutex_lock(&key_session_mutex);
/* look for an existing keyring of this name */
keyring = find_keyring_by_name(name, false);
if (PTR_ERR(keyring) == -ENOKEY) {
/* not found - try and create a new one */
keyring = keyring_alloc(
name, old->uid, old->gid, old,
KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK,
KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
}
} else if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
} else if (keyring == new->session_keyring) {
ret = 0;
goto error2;
}
/* we've got a keyring - now to install it */
ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0)
goto error2;
commit_creds(new);
mutex_unlock(&key_session_mutex);
ret = keyring->serial;
key_put(keyring);
okay:
return ret;
error2:
mutex_unlock(&key_session_mutex);
error:
abort_creds(new);
return ret;
}
| DoS | 0 | long join_session_keyring(const char *name)
{
const struct cred *old;
struct cred *new;
struct key *keyring;
long ret, serial;
new = prepare_creds();
if (!new)
return -ENOMEM;
old = current_cred();
/* if no name is provided, install an anonymous keyring */
if (!name) {
ret = install_session_keyring_to_cred(new, NULL);
if (ret < 0)
goto error;
serial = new->session_keyring->serial;
ret = commit_creds(new);
if (ret == 0)
ret = serial;
goto okay;
}
/* allow the user to join or create a named keyring */
mutex_lock(&key_session_mutex);
/* look for an existing keyring of this name */
keyring = find_keyring_by_name(name, false);
if (PTR_ERR(keyring) == -ENOKEY) {
/* not found - try and create a new one */
keyring = keyring_alloc(
name, old->uid, old->gid, old,
KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK,
KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
}
} else if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
} else if (keyring == new->session_keyring) {
ret = 0;
goto error2;
}
/* we've got a keyring - now to install it */
ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0)
goto error2;
commit_creds(new);
mutex_unlock(&key_session_mutex);
ret = keyring->serial;
key_put(keyring);
okay:
return ret;
error2:
mutex_unlock(&key_session_mutex);
error:
abort_creds(new);
return ret;
}
| @@ -57,7 +57,7 @@ int install_user_keyrings(void)
kenter("%p{%u}", user, uid);
- if (user->uid_keyring) {
+ if (user->uid_keyring && user->session_keyring) {
kleave(" = 0 [exist]");
return 0;
} | CWE-362 | null | null |
26,723 | void key_change_session_keyring(struct callback_head *twork)
{
const struct cred *old = current_cred();
struct cred *new = container_of(twork, struct cred, rcu);
if (unlikely(current->flags & PF_EXITING)) {
put_cred(new);
return;
}
new-> uid = old-> uid;
new-> euid = old-> euid;
new-> suid = old-> suid;
new->fsuid = old->fsuid;
new-> gid = old-> gid;
new-> egid = old-> egid;
new-> sgid = old-> sgid;
new->fsgid = old->fsgid;
new->user = get_uid(old->user);
new->user_ns = get_user_ns(old->user_ns);
new->group_info = get_group_info(old->group_info);
new->securebits = old->securebits;
new->cap_inheritable = old->cap_inheritable;
new->cap_permitted = old->cap_permitted;
new->cap_effective = old->cap_effective;
new->cap_bset = old->cap_bset;
new->jit_keyring = old->jit_keyring;
new->thread_keyring = key_get(old->thread_keyring);
new->process_keyring = key_get(old->process_keyring);
security_transfer_creds(new, old);
commit_creds(new);
}
| DoS | 0 | void key_change_session_keyring(struct callback_head *twork)
{
const struct cred *old = current_cred();
struct cred *new = container_of(twork, struct cred, rcu);
if (unlikely(current->flags & PF_EXITING)) {
put_cred(new);
return;
}
new-> uid = old-> uid;
new-> euid = old-> euid;
new-> suid = old-> suid;
new->fsuid = old->fsuid;
new-> gid = old-> gid;
new-> egid = old-> egid;
new-> sgid = old-> sgid;
new->fsgid = old->fsgid;
new->user = get_uid(old->user);
new->user_ns = get_user_ns(old->user_ns);
new->group_info = get_group_info(old->group_info);
new->securebits = old->securebits;
new->cap_inheritable = old->cap_inheritable;
new->cap_permitted = old->cap_permitted;
new->cap_effective = old->cap_effective;
new->cap_bset = old->cap_bset;
new->jit_keyring = old->jit_keyring;
new->thread_keyring = key_get(old->thread_keyring);
new->process_keyring = key_get(old->process_keyring);
security_transfer_creds(new, old);
commit_creds(new);
}
| @@ -57,7 +57,7 @@ int install_user_keyrings(void)
kenter("%p{%u}", user, uid);
- if (user->uid_keyring) {
+ if (user->uid_keyring && user->session_keyring) {
kleave(" = 0 [exist]");
return 0;
} | CWE-362 | null | null |
26,724 | void key_fsgid_changed(struct task_struct *tsk)
{
/* update the ownership of the thread keyring */
BUG_ON(!tsk->cred);
if (tsk->cred->thread_keyring) {
down_write(&tsk->cred->thread_keyring->sem);
tsk->cred->thread_keyring->gid = tsk->cred->fsgid;
up_write(&tsk->cred->thread_keyring->sem);
}
}
| DoS | 0 | void key_fsgid_changed(struct task_struct *tsk)
{
/* update the ownership of the thread keyring */
BUG_ON(!tsk->cred);
if (tsk->cred->thread_keyring) {
down_write(&tsk->cred->thread_keyring->sem);
tsk->cred->thread_keyring->gid = tsk->cred->fsgid;
up_write(&tsk->cred->thread_keyring->sem);
}
}
| @@ -57,7 +57,7 @@ int install_user_keyrings(void)
kenter("%p{%u}", user, uid);
- if (user->uid_keyring) {
+ if (user->uid_keyring && user->session_keyring) {
kleave(" = 0 [exist]");
return 0;
} | CWE-362 | null | null |
26,725 | void key_fsuid_changed(struct task_struct *tsk)
{
/* update the ownership of the thread keyring */
BUG_ON(!tsk->cred);
if (tsk->cred->thread_keyring) {
down_write(&tsk->cred->thread_keyring->sem);
tsk->cred->thread_keyring->uid = tsk->cred->fsuid;
up_write(&tsk->cred->thread_keyring->sem);
}
}
| DoS | 0 | void key_fsuid_changed(struct task_struct *tsk)
{
/* update the ownership of the thread keyring */
BUG_ON(!tsk->cred);
if (tsk->cred->thread_keyring) {
down_write(&tsk->cred->thread_keyring->sem);
tsk->cred->thread_keyring->uid = tsk->cred->fsuid;
up_write(&tsk->cred->thread_keyring->sem);
}
}
| @@ -57,7 +57,7 @@ int install_user_keyrings(void)
kenter("%p{%u}", user, uid);
- if (user->uid_keyring) {
+ if (user->uid_keyring && user->session_keyring) {
kleave(" = 0 [exist]");
return 0;
} | CWE-362 | null | null |
26,726 | key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
key_perm_t perm)
{
struct request_key_auth *rka;
const struct cred *cred;
struct key *key;
key_ref_t key_ref, skey_ref;
int ret;
try_again:
cred = get_current_cred();
key_ref = ERR_PTR(-ENOKEY);
switch (id) {
case KEY_SPEC_THREAD_KEYRING:
if (!cred->thread_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
ret = install_thread_keyring();
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error;
}
goto reget_creds;
}
key = cred->thread_keyring;
atomic_inc(&key->usage);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_PROCESS_KEYRING:
if (!cred->process_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
ret = install_process_keyring();
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error;
}
goto reget_creds;
}
key = cred->process_keyring;
atomic_inc(&key->usage);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_SESSION_KEYRING:
if (!cred->session_keyring) {
/* always install a session keyring upon access if one
* doesn't exist yet */
ret = install_user_keyrings();
if (ret < 0)
goto error;
if (lflags & KEY_LOOKUP_CREATE)
ret = join_session_keyring(NULL);
else
ret = install_session_keyring(
cred->user->session_keyring);
if (ret < 0)
goto error;
goto reget_creds;
} else if (cred->session_keyring ==
cred->user->session_keyring &&
lflags & KEY_LOOKUP_CREATE) {
ret = join_session_keyring(NULL);
if (ret < 0)
goto error;
goto reget_creds;
}
rcu_read_lock();
key = rcu_dereference(cred->session_keyring);
atomic_inc(&key->usage);
rcu_read_unlock();
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_KEYRING:
if (!cred->user->uid_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
key = cred->user->uid_keyring;
atomic_inc(&key->usage);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_SESSION_KEYRING:
if (!cred->user->session_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
key = cred->user->session_keyring;
atomic_inc(&key->usage);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_GROUP_KEYRING:
/* group keyrings are not yet supported */
key_ref = ERR_PTR(-EINVAL);
goto error;
case KEY_SPEC_REQKEY_AUTH_KEY:
key = cred->request_key_auth;
if (!key)
goto error;
atomic_inc(&key->usage);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_REQUESTOR_KEYRING:
if (!cred->request_key_auth)
goto error;
down_read(&cred->request_key_auth->sem);
if (test_bit(KEY_FLAG_REVOKED,
&cred->request_key_auth->flags)) {
key_ref = ERR_PTR(-EKEYREVOKED);
key = NULL;
} else {
rka = cred->request_key_auth->payload.data;
key = rka->dest_keyring;
atomic_inc(&key->usage);
}
up_read(&cred->request_key_auth->sem);
if (!key)
goto error;
key_ref = make_key_ref(key, 1);
break;
default:
key_ref = ERR_PTR(-EINVAL);
if (id < 1)
goto error;
key = key_lookup(id);
if (IS_ERR(key)) {
key_ref = ERR_CAST(key);
goto error;
}
key_ref = make_key_ref(key, 0);
/* check to see if we possess the key */
skey_ref = search_process_keyrings(key->type, key,
lookup_user_key_possessed,
cred);
if (!IS_ERR(skey_ref)) {
key_put(key);
key_ref = skey_ref;
}
break;
}
/* unlink does not use the nominated key in any way, so can skip all
* the permission checks as it is only concerned with the keyring */
if (lflags & KEY_LOOKUP_FOR_UNLINK) {
ret = 0;
goto error;
}
if (!(lflags & KEY_LOOKUP_PARTIAL)) {
ret = wait_for_key_construction(key, true);
switch (ret) {
case -ERESTARTSYS:
goto invalid_key;
default:
if (perm)
goto invalid_key;
case 0:
break;
}
} else if (perm) {
ret = key_validate(key);
if (ret < 0)
goto invalid_key;
}
ret = -EIO;
if (!(lflags & KEY_LOOKUP_PARTIAL) &&
!test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
goto invalid_key;
/* check the permissions */
ret = key_task_permission(key_ref, cred, perm);
if (ret < 0)
goto invalid_key;
key->last_used_at = current_kernel_time().tv_sec;
error:
put_cred(cred);
return key_ref;
invalid_key:
key_ref_put(key_ref);
key_ref = ERR_PTR(ret);
goto error;
/* if we attempted to install a keyring, then it may have caused new
* creds to be installed */
reget_creds:
put_cred(cred);
goto try_again;
}
| DoS | 0 | key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
key_perm_t perm)
{
struct request_key_auth *rka;
const struct cred *cred;
struct key *key;
key_ref_t key_ref, skey_ref;
int ret;
try_again:
cred = get_current_cred();
key_ref = ERR_PTR(-ENOKEY);
switch (id) {
case KEY_SPEC_THREAD_KEYRING:
if (!cred->thread_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
ret = install_thread_keyring();
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error;
}
goto reget_creds;
}
key = cred->thread_keyring;
atomic_inc(&key->usage);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_PROCESS_KEYRING:
if (!cred->process_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
ret = install_process_keyring();
if (ret < 0) {
key_ref = ERR_PTR(ret);
goto error;
}
goto reget_creds;
}
key = cred->process_keyring;
atomic_inc(&key->usage);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_SESSION_KEYRING:
if (!cred->session_keyring) {
/* always install a session keyring upon access if one
* doesn't exist yet */
ret = install_user_keyrings();
if (ret < 0)
goto error;
if (lflags & KEY_LOOKUP_CREATE)
ret = join_session_keyring(NULL);
else
ret = install_session_keyring(
cred->user->session_keyring);
if (ret < 0)
goto error;
goto reget_creds;
} else if (cred->session_keyring ==
cred->user->session_keyring &&
lflags & KEY_LOOKUP_CREATE) {
ret = join_session_keyring(NULL);
if (ret < 0)
goto error;
goto reget_creds;
}
rcu_read_lock();
key = rcu_dereference(cred->session_keyring);
atomic_inc(&key->usage);
rcu_read_unlock();
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_KEYRING:
if (!cred->user->uid_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
key = cred->user->uid_keyring;
atomic_inc(&key->usage);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_SESSION_KEYRING:
if (!cred->user->session_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
key = cred->user->session_keyring;
atomic_inc(&key->usage);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_GROUP_KEYRING:
/* group keyrings are not yet supported */
key_ref = ERR_PTR(-EINVAL);
goto error;
case KEY_SPEC_REQKEY_AUTH_KEY:
key = cred->request_key_auth;
if (!key)
goto error;
atomic_inc(&key->usage);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_REQUESTOR_KEYRING:
if (!cred->request_key_auth)
goto error;
down_read(&cred->request_key_auth->sem);
if (test_bit(KEY_FLAG_REVOKED,
&cred->request_key_auth->flags)) {
key_ref = ERR_PTR(-EKEYREVOKED);
key = NULL;
} else {
rka = cred->request_key_auth->payload.data;
key = rka->dest_keyring;
atomic_inc(&key->usage);
}
up_read(&cred->request_key_auth->sem);
if (!key)
goto error;
key_ref = make_key_ref(key, 1);
break;
default:
key_ref = ERR_PTR(-EINVAL);
if (id < 1)
goto error;
key = key_lookup(id);
if (IS_ERR(key)) {
key_ref = ERR_CAST(key);
goto error;
}
key_ref = make_key_ref(key, 0);
/* check to see if we possess the key */
skey_ref = search_process_keyrings(key->type, key,
lookup_user_key_possessed,
cred);
if (!IS_ERR(skey_ref)) {
key_put(key);
key_ref = skey_ref;
}
break;
}
/* unlink does not use the nominated key in any way, so can skip all
* the permission checks as it is only concerned with the keyring */
if (lflags & KEY_LOOKUP_FOR_UNLINK) {
ret = 0;
goto error;
}
if (!(lflags & KEY_LOOKUP_PARTIAL)) {
ret = wait_for_key_construction(key, true);
switch (ret) {
case -ERESTARTSYS:
goto invalid_key;
default:
if (perm)
goto invalid_key;
case 0:
break;
}
} else if (perm) {
ret = key_validate(key);
if (ret < 0)
goto invalid_key;
}
ret = -EIO;
if (!(lflags & KEY_LOOKUP_PARTIAL) &&
!test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
goto invalid_key;
/* check the permissions */
ret = key_task_permission(key_ref, cred, perm);
if (ret < 0)
goto invalid_key;
key->last_used_at = current_kernel_time().tv_sec;
error:
put_cred(cred);
return key_ref;
invalid_key:
key_ref_put(key_ref);
key_ref = ERR_PTR(ret);
goto error;
/* if we attempted to install a keyring, then it may have caused new
* creds to be installed */
reget_creds:
put_cred(cred);
goto try_again;
}
| @@ -57,7 +57,7 @@ int install_user_keyrings(void)
kenter("%p{%u}", user, uid);
- if (user->uid_keyring) {
+ if (user->uid_keyring && user->session_keyring) {
kleave(" = 0 [exist]");
return 0;
} | CWE-362 | null | null |
26,727 | key_ref_t search_my_process_keyrings(struct key_type *type,
const void *description,
key_match_func_t match,
bool no_state_check,
const struct cred *cred)
{
key_ref_t key_ref, ret, err;
/* we want to return -EAGAIN or -ENOKEY if any of the keyrings were
* searchable, but we failed to find a key or we found a negative key;
* otherwise we want to return a sample error (probably -EACCES) if
* none of the keyrings were searchable
*
* in terms of priority: success > -ENOKEY > -EAGAIN > other error
*/
key_ref = NULL;
ret = NULL;
err = ERR_PTR(-EAGAIN);
/* search the thread keyring first */
if (cred->thread_keyring) {
key_ref = keyring_search_aux(
make_key_ref(cred->thread_keyring, 1),
cred, type, description, match, no_state_check);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* search the process keyring second */
if (cred->process_keyring) {
key_ref = keyring_search_aux(
make_key_ref(cred->process_keyring, 1),
cred, type, description, match, no_state_check);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* search the session keyring */
if (cred->session_keyring) {
rcu_read_lock();
key_ref = keyring_search_aux(
make_key_ref(rcu_dereference(cred->session_keyring), 1),
cred, type, description, match, no_state_check);
rcu_read_unlock();
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* or search the user-session keyring */
else if (cred->user->session_keyring) {
key_ref = keyring_search_aux(
make_key_ref(cred->user->session_keyring, 1),
cred, type, description, match, no_state_check);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* no key - decide on the error we're going to go for */
key_ref = ret ? ret : err;
found:
return key_ref;
}
| DoS | 0 | key_ref_t search_my_process_keyrings(struct key_type *type,
const void *description,
key_match_func_t match,
bool no_state_check,
const struct cred *cred)
{
key_ref_t key_ref, ret, err;
/* we want to return -EAGAIN or -ENOKEY if any of the keyrings were
* searchable, but we failed to find a key or we found a negative key;
* otherwise we want to return a sample error (probably -EACCES) if
* none of the keyrings were searchable
*
* in terms of priority: success > -ENOKEY > -EAGAIN > other error
*/
key_ref = NULL;
ret = NULL;
err = ERR_PTR(-EAGAIN);
/* search the thread keyring first */
if (cred->thread_keyring) {
key_ref = keyring_search_aux(
make_key_ref(cred->thread_keyring, 1),
cred, type, description, match, no_state_check);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* search the process keyring second */
if (cred->process_keyring) {
key_ref = keyring_search_aux(
make_key_ref(cred->process_keyring, 1),
cred, type, description, match, no_state_check);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* search the session keyring */
if (cred->session_keyring) {
rcu_read_lock();
key_ref = keyring_search_aux(
make_key_ref(rcu_dereference(cred->session_keyring), 1),
cred, type, description, match, no_state_check);
rcu_read_unlock();
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* or search the user-session keyring */
else if (cred->user->session_keyring) {
key_ref = keyring_search_aux(
make_key_ref(cred->user->session_keyring, 1),
cred, type, description, match, no_state_check);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
if (ret)
break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
default:
err = key_ref;
break;
}
}
/* no key - decide on the error we're going to go for */
key_ref = ret ? ret : err;
found:
return key_ref;
}
| @@ -57,7 +57,7 @@ int install_user_keyrings(void)
kenter("%p{%u}", user, uid);
- if (user->uid_keyring) {
+ if (user->uid_keyring && user->session_keyring) {
kleave(" = 0 [exist]");
return 0;
} | CWE-362 | null | null |
26,728 | key_ref_t search_process_keyrings(struct key_type *type,
const void *description,
key_match_func_t match,
const struct cred *cred)
{
struct request_key_auth *rka;
key_ref_t key_ref, ret = ERR_PTR(-EACCES), err;
might_sleep();
key_ref = search_my_process_keyrings(type, description, match,
false, cred);
if (!IS_ERR(key_ref))
goto found;
err = key_ref;
/* if this process has an instantiation authorisation key, then we also
* search the keyrings of the process mentioned there
* - we don't permit access to request_key auth keys via this method
*/
if (cred->request_key_auth &&
cred == current_cred() &&
type != &key_type_request_key_auth
) {
/* defend against the auth key being revoked */
down_read(&cred->request_key_auth->sem);
if (key_validate(cred->request_key_auth) == 0) {
rka = cred->request_key_auth->payload.data;
key_ref = search_process_keyrings(type, description,
match, rka->cred);
up_read(&cred->request_key_auth->sem);
if (!IS_ERR(key_ref))
goto found;
ret = key_ref;
} else {
up_read(&cred->request_key_auth->sem);
}
}
/* no key - decide on the error we're going to go for */
if (err == ERR_PTR(-ENOKEY) || ret == ERR_PTR(-ENOKEY))
key_ref = ERR_PTR(-ENOKEY);
else if (err == ERR_PTR(-EACCES))
key_ref = ret;
else
key_ref = err;
found:
return key_ref;
}
| DoS | 0 | key_ref_t search_process_keyrings(struct key_type *type,
const void *description,
key_match_func_t match,
const struct cred *cred)
{
struct request_key_auth *rka;
key_ref_t key_ref, ret = ERR_PTR(-EACCES), err;
might_sleep();
key_ref = search_my_process_keyrings(type, description, match,
false, cred);
if (!IS_ERR(key_ref))
goto found;
err = key_ref;
/* if this process has an instantiation authorisation key, then we also
* search the keyrings of the process mentioned there
* - we don't permit access to request_key auth keys via this method
*/
if (cred->request_key_auth &&
cred == current_cred() &&
type != &key_type_request_key_auth
) {
/* defend against the auth key being revoked */
down_read(&cred->request_key_auth->sem);
if (key_validate(cred->request_key_auth) == 0) {
rka = cred->request_key_auth->payload.data;
key_ref = search_process_keyrings(type, description,
match, rka->cred);
up_read(&cred->request_key_auth->sem);
if (!IS_ERR(key_ref))
goto found;
ret = key_ref;
} else {
up_read(&cred->request_key_auth->sem);
}
}
/* no key - decide on the error we're going to go for */
if (err == ERR_PTR(-ENOKEY) || ret == ERR_PTR(-ENOKEY))
key_ref = ERR_PTR(-ENOKEY);
else if (err == ERR_PTR(-EACCES))
key_ref = ret;
else
key_ref = err;
found:
return key_ref;
}
| @@ -57,7 +57,7 @@ int install_user_keyrings(void)
kenter("%p{%u}", user, uid);
- if (user->uid_keyring) {
+ if (user->uid_keyring && user->session_keyring) {
kleave(" = 0 [exist]");
return 0;
} | CWE-362 | null | null |
26,729 | static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
{
__u8 *buffer;
int buffer_size;
int i;
int err;
__u8 cs = 0;
struct ti_i2c_desc *i2c_header;
struct ti_i2c_image_header *img_header;
struct ti_i2c_firmware_rec *firmware_rec;
const struct firmware *fw;
const char *fw_name = "edgeport/down3.bin";
/* In order to update the I2C firmware we must change the type 2 record
* to type 0xF2. This will force the UMP to come up in Boot Mode.
* Then while in boot mode, the driver will download the latest
* firmware (padded to 15.5k) into the UMP ram. And finally when the
* device comes back up in download mode the driver will cause the new
* firmware to be copied from the UMP Ram to I2C and the firmware will
* update the record type from 0xf2 to 0x02.
*/
/* Allocate a 15.5k buffer + 2 bytes for version number
* (Firmware Record) */
buffer_size = (((1024 * 16) - 512 ) +
sizeof(struct ti_i2c_firmware_rec));
buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!buffer) {
dev_err(dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
memset(buffer, 0xff, buffer_size);
err = request_firmware(&fw, fw_name, dev);
if (err) {
dev_err(dev, "Failed to load image \"%s\" err %d\n",
fw_name, err);
kfree(buffer);
return err;
}
/* Save Download Version Number */
OperationalMajorVersion = fw->data[0];
OperationalMinorVersion = fw->data[1];
OperationalBuildNumber = fw->data[2] | (fw->data[3] << 8);
/* Copy version number into firmware record */
firmware_rec = (struct ti_i2c_firmware_rec *)buffer;
firmware_rec->Ver_Major = OperationalMajorVersion;
firmware_rec->Ver_Minor = OperationalMinorVersion;
/* Pointer to fw_down memory image */
img_header = (struct ti_i2c_image_header *)&fw->data[4];
memcpy(buffer + sizeof(struct ti_i2c_firmware_rec),
&fw->data[4 + sizeof(struct ti_i2c_image_header)],
le16_to_cpu(img_header->Length));
release_firmware(fw);
for (i=0; i < buffer_size; i++) {
cs = (__u8)(cs + buffer[i]);
}
kfree(buffer);
/* Build new header */
i2c_header = (struct ti_i2c_desc *)header;
firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data;
i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK;
i2c_header->Size = (__u16)buffer_size;
i2c_header->CheckSum = cs;
firmware_rec->Ver_Major = OperationalMajorVersion;
firmware_rec->Ver_Minor = OperationalMinorVersion;
return 0;
}
| DoS | 0 | static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
{
__u8 *buffer;
int buffer_size;
int i;
int err;
__u8 cs = 0;
struct ti_i2c_desc *i2c_header;
struct ti_i2c_image_header *img_header;
struct ti_i2c_firmware_rec *firmware_rec;
const struct firmware *fw;
const char *fw_name = "edgeport/down3.bin";
/* In order to update the I2C firmware we must change the type 2 record
* to type 0xF2. This will force the UMP to come up in Boot Mode.
* Then while in boot mode, the driver will download the latest
* firmware (padded to 15.5k) into the UMP ram. And finally when the
* device comes back up in download mode the driver will cause the new
* firmware to be copied from the UMP Ram to I2C and the firmware will
* update the record type from 0xf2 to 0x02.
*/
/* Allocate a 15.5k buffer + 2 bytes for version number
* (Firmware Record) */
buffer_size = (((1024 * 16) - 512 ) +
sizeof(struct ti_i2c_firmware_rec));
buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!buffer) {
dev_err(dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
memset(buffer, 0xff, buffer_size);
err = request_firmware(&fw, fw_name, dev);
if (err) {
dev_err(dev, "Failed to load image \"%s\" err %d\n",
fw_name, err);
kfree(buffer);
return err;
}
/* Save Download Version Number */
OperationalMajorVersion = fw->data[0];
OperationalMinorVersion = fw->data[1];
OperationalBuildNumber = fw->data[2] | (fw->data[3] << 8);
/* Copy version number into firmware record */
firmware_rec = (struct ti_i2c_firmware_rec *)buffer;
firmware_rec->Ver_Major = OperationalMajorVersion;
firmware_rec->Ver_Minor = OperationalMinorVersion;
/* Pointer to fw_down memory image */
img_header = (struct ti_i2c_image_header *)&fw->data[4];
memcpy(buffer + sizeof(struct ti_i2c_firmware_rec),
&fw->data[4 + sizeof(struct ti_i2c_image_header)],
le16_to_cpu(img_header->Length));
release_firmware(fw);
for (i=0; i < buffer_size; i++) {
cs = (__u8)(cs + buffer[i]);
}
kfree(buffer);
/* Build new header */
i2c_header = (struct ti_i2c_desc *)header;
firmware_rec = (struct ti_i2c_firmware_rec*)i2c_header->Data;
i2c_header->Type = I2C_DESC_TYPE_FIRMWARE_BLANK;
i2c_header->Size = (__u16)buffer_size;
i2c_header->CheckSum = cs;
firmware_rec->Ver_Major = OperationalMajorVersion;
firmware_rec->Ver_Minor = OperationalMinorVersion;
return 0;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,730 | static int bulk_xfer(struct usb_serial *serial, void *buffer,
int length, int *num_sent)
{
int status;
status = usb_bulk_msg(serial->dev,
usb_sndbulkpipe(serial->dev,
serial->port[0]->bulk_out_endpointAddress),
buffer, length, num_sent, 1000);
return status;
}
| DoS | 0 | static int bulk_xfer(struct usb_serial *serial, void *buffer,
int length, int *num_sent)
{
int status;
status = usb_bulk_msg(serial->dev,
usb_sndbulkpipe(serial->dev,
serial->port[0]->bulk_out_endpointAddress),
buffer, length, num_sent, 1000);
return status;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,731 | static void change_port_settings(struct tty_struct *tty,
struct edgeport_port *edge_port, struct ktermios *old_termios)
{
struct device *dev = &edge_port->port->dev;
struct ump_uart_config *config;
int baud;
unsigned cflag;
int status;
int port_number = edge_port->port->number -
edge_port->port->serial->minor;
dev_dbg(dev, "%s - port %d\n", __func__, edge_port->port->number);
config = kmalloc (sizeof (*config), GFP_KERNEL);
if (!config) {
tty->termios = *old_termios;
dev_err(dev, "%s - out of memory\n", __func__);
return;
}
cflag = tty->termios.c_cflag;
config->wFlags = 0;
/* These flags must be set */
config->wFlags |= UMP_MASK_UART_FLAGS_RECEIVE_MS_INT;
config->wFlags |= UMP_MASK_UART_FLAGS_AUTO_START_ON_ERR;
config->bUartMode = (__u8)(edge_port->bUartMode);
switch (cflag & CSIZE) {
case CS5:
config->bDataBits = UMP_UART_CHAR5BITS;
dev_dbg(dev, "%s - data bits = 5\n", __func__);
break;
case CS6:
config->bDataBits = UMP_UART_CHAR6BITS;
dev_dbg(dev, "%s - data bits = 6\n", __func__);
break;
case CS7:
config->bDataBits = UMP_UART_CHAR7BITS;
dev_dbg(dev, "%s - data bits = 7\n", __func__);
break;
default:
case CS8:
config->bDataBits = UMP_UART_CHAR8BITS;
dev_dbg(dev, "%s - data bits = 8\n", __func__);
break;
}
if (cflag & PARENB) {
if (cflag & PARODD) {
config->wFlags |= UMP_MASK_UART_FLAGS_PARITY;
config->bParity = UMP_UART_ODDPARITY;
dev_dbg(dev, "%s - parity = odd\n", __func__);
} else {
config->wFlags |= UMP_MASK_UART_FLAGS_PARITY;
config->bParity = UMP_UART_EVENPARITY;
dev_dbg(dev, "%s - parity = even\n", __func__);
}
} else {
config->bParity = UMP_UART_NOPARITY;
dev_dbg(dev, "%s - parity = none\n", __func__);
}
if (cflag & CSTOPB) {
config->bStopBits = UMP_UART_STOPBIT2;
dev_dbg(dev, "%s - stop bits = 2\n", __func__);
} else {
config->bStopBits = UMP_UART_STOPBIT1;
dev_dbg(dev, "%s - stop bits = 1\n", __func__);
}
/* figure out the flow control settings */
if (cflag & CRTSCTS) {
config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X_CTS_FLOW;
config->wFlags |= UMP_MASK_UART_FLAGS_RTS_FLOW;
dev_dbg(dev, "%s - RTS/CTS is enabled\n", __func__);
} else {
dev_dbg(dev, "%s - RTS/CTS is disabled\n", __func__);
tty->hw_stopped = 0;
restart_read(edge_port);
}
/* if we are implementing XON/XOFF, set the start and stop
character in the device */
config->cXon = START_CHAR(tty);
config->cXoff = STOP_CHAR(tty);
/* if we are implementing INBOUND XON/XOFF */
if (I_IXOFF(tty)) {
config->wFlags |= UMP_MASK_UART_FLAGS_IN_X;
dev_dbg(dev, "%s - INBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n",
__func__, config->cXon, config->cXoff);
} else
dev_dbg(dev, "%s - INBOUND XON/XOFF is disabled\n", __func__);
/* if we are implementing OUTBOUND XON/XOFF */
if (I_IXON(tty)) {
config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X;
dev_dbg(dev, "%s - OUTBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n",
__func__, config->cXon, config->cXoff);
} else
dev_dbg(dev, "%s - OUTBOUND XON/XOFF is disabled\n", __func__);
tty->termios.c_cflag &= ~CMSPAR;
/* Round the baud rate */
baud = tty_get_baud_rate(tty);
if (!baud) {
/* pick a default, any default... */
baud = 9600;
} else
tty_encode_baud_rate(tty, baud, baud);
edge_port->baud_rate = baud;
config->wBaudRate = (__u16)((461550L + baud/2) / baud);
/* FIXME: Recompute actual baud from divisor here */
dev_dbg(dev, "%s - baud rate = %d, wBaudRate = %d\n", __func__, baud, config->wBaudRate);
dev_dbg(dev, "wBaudRate: %d\n", (int)(461550L / config->wBaudRate));
dev_dbg(dev, "wFlags: 0x%x\n", config->wFlags);
dev_dbg(dev, "bDataBits: %d\n", config->bDataBits);
dev_dbg(dev, "bParity: %d\n", config->bParity);
dev_dbg(dev, "bStopBits: %d\n", config->bStopBits);
dev_dbg(dev, "cXon: %d\n", config->cXon);
dev_dbg(dev, "cXoff: %d\n", config->cXoff);
dev_dbg(dev, "bUartMode: %d\n", config->bUartMode);
/* move the word values into big endian mode */
cpu_to_be16s(&config->wFlags);
cpu_to_be16s(&config->wBaudRate);
status = send_cmd(edge_port->port->serial->dev, UMPC_SET_CONFIG,
(__u8)(UMPM_UART1_PORT + port_number),
0, (__u8 *)config, sizeof(*config));
if (status)
dev_dbg(dev, "%s - error %d when trying to write config to device\n",
__func__, status);
kfree(config);
}
| DoS | 0 | static void change_port_settings(struct tty_struct *tty,
struct edgeport_port *edge_port, struct ktermios *old_termios)
{
struct device *dev = &edge_port->port->dev;
struct ump_uart_config *config;
int baud;
unsigned cflag;
int status;
int port_number = edge_port->port->number -
edge_port->port->serial->minor;
dev_dbg(dev, "%s - port %d\n", __func__, edge_port->port->number);
config = kmalloc (sizeof (*config), GFP_KERNEL);
if (!config) {
tty->termios = *old_termios;
dev_err(dev, "%s - out of memory\n", __func__);
return;
}
cflag = tty->termios.c_cflag;
config->wFlags = 0;
/* These flags must be set */
config->wFlags |= UMP_MASK_UART_FLAGS_RECEIVE_MS_INT;
config->wFlags |= UMP_MASK_UART_FLAGS_AUTO_START_ON_ERR;
config->bUartMode = (__u8)(edge_port->bUartMode);
switch (cflag & CSIZE) {
case CS5:
config->bDataBits = UMP_UART_CHAR5BITS;
dev_dbg(dev, "%s - data bits = 5\n", __func__);
break;
case CS6:
config->bDataBits = UMP_UART_CHAR6BITS;
dev_dbg(dev, "%s - data bits = 6\n", __func__);
break;
case CS7:
config->bDataBits = UMP_UART_CHAR7BITS;
dev_dbg(dev, "%s - data bits = 7\n", __func__);
break;
default:
case CS8:
config->bDataBits = UMP_UART_CHAR8BITS;
dev_dbg(dev, "%s - data bits = 8\n", __func__);
break;
}
if (cflag & PARENB) {
if (cflag & PARODD) {
config->wFlags |= UMP_MASK_UART_FLAGS_PARITY;
config->bParity = UMP_UART_ODDPARITY;
dev_dbg(dev, "%s - parity = odd\n", __func__);
} else {
config->wFlags |= UMP_MASK_UART_FLAGS_PARITY;
config->bParity = UMP_UART_EVENPARITY;
dev_dbg(dev, "%s - parity = even\n", __func__);
}
} else {
config->bParity = UMP_UART_NOPARITY;
dev_dbg(dev, "%s - parity = none\n", __func__);
}
if (cflag & CSTOPB) {
config->bStopBits = UMP_UART_STOPBIT2;
dev_dbg(dev, "%s - stop bits = 2\n", __func__);
} else {
config->bStopBits = UMP_UART_STOPBIT1;
dev_dbg(dev, "%s - stop bits = 1\n", __func__);
}
/* figure out the flow control settings */
if (cflag & CRTSCTS) {
config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X_CTS_FLOW;
config->wFlags |= UMP_MASK_UART_FLAGS_RTS_FLOW;
dev_dbg(dev, "%s - RTS/CTS is enabled\n", __func__);
} else {
dev_dbg(dev, "%s - RTS/CTS is disabled\n", __func__);
tty->hw_stopped = 0;
restart_read(edge_port);
}
/* if we are implementing XON/XOFF, set the start and stop
character in the device */
config->cXon = START_CHAR(tty);
config->cXoff = STOP_CHAR(tty);
/* if we are implementing INBOUND XON/XOFF */
if (I_IXOFF(tty)) {
config->wFlags |= UMP_MASK_UART_FLAGS_IN_X;
dev_dbg(dev, "%s - INBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n",
__func__, config->cXon, config->cXoff);
} else
dev_dbg(dev, "%s - INBOUND XON/XOFF is disabled\n", __func__);
/* if we are implementing OUTBOUND XON/XOFF */
if (I_IXON(tty)) {
config->wFlags |= UMP_MASK_UART_FLAGS_OUT_X;
dev_dbg(dev, "%s - OUTBOUND XON/XOFF is enabled, XON = %2x, XOFF = %2x\n",
__func__, config->cXon, config->cXoff);
} else
dev_dbg(dev, "%s - OUTBOUND XON/XOFF is disabled\n", __func__);
tty->termios.c_cflag &= ~CMSPAR;
/* Round the baud rate */
baud = tty_get_baud_rate(tty);
if (!baud) {
/* pick a default, any default... */
baud = 9600;
} else
tty_encode_baud_rate(tty, baud, baud);
edge_port->baud_rate = baud;
config->wBaudRate = (__u16)((461550L + baud/2) / baud);
/* FIXME: Recompute actual baud from divisor here */
dev_dbg(dev, "%s - baud rate = %d, wBaudRate = %d\n", __func__, baud, config->wBaudRate);
dev_dbg(dev, "wBaudRate: %d\n", (int)(461550L / config->wBaudRate));
dev_dbg(dev, "wFlags: 0x%x\n", config->wFlags);
dev_dbg(dev, "bDataBits: %d\n", config->bDataBits);
dev_dbg(dev, "bParity: %d\n", config->bParity);
dev_dbg(dev, "bStopBits: %d\n", config->bStopBits);
dev_dbg(dev, "cXon: %d\n", config->cXon);
dev_dbg(dev, "cXoff: %d\n", config->cXoff);
dev_dbg(dev, "bUartMode: %d\n", config->bUartMode);
/* move the word values into big endian mode */
cpu_to_be16s(&config->wFlags);
cpu_to_be16s(&config->wBaudRate);
status = send_cmd(edge_port->port->serial->dev, UMPC_SET_CONFIG,
(__u8)(UMPM_UART1_PORT + port_number),
0, (__u8 *)config, sizeof(*config));
if (status)
dev_dbg(dev, "%s - error %d when trying to write config to device\n",
__func__, status);
kfree(config);
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,732 | static int check_i2c_image(struct edgeport_serial *serial)
{
struct device *dev = &serial->serial->dev->dev;
int status = 0;
struct ti_i2c_desc *rom_desc;
int start_address = 2;
__u8 *buffer;
__u16 ttype;
rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
if (!rom_desc) {
dev_err(dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
buffer = kmalloc(TI_MAX_I2C_SIZE, GFP_KERNEL);
if (!buffer) {
dev_err(dev, "%s - out of memory when allocating buffer\n",
__func__);
kfree(rom_desc);
return -ENOMEM;
}
/* Read the first byte (Signature0) must be 0x52 or 0x10 */
status = read_rom(serial, 0, 1, buffer);
if (status)
goto out;
if (*buffer != UMP5152 && *buffer != UMP3410) {
dev_err(dev, "%s - invalid buffer signature\n", __func__);
status = -ENODEV;
goto out;
}
do {
/* Validate the I2C */
status = read_rom(serial,
start_address,
sizeof(struct ti_i2c_desc),
(__u8 *)rom_desc);
if (status)
break;
if ((start_address + sizeof(struct ti_i2c_desc) +
rom_desc->Size) > TI_MAX_I2C_SIZE) {
status = -ENODEV;
dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__);
break;
}
dev_dbg(dev, "%s Type = 0x%x\n", __func__, rom_desc->Type);
/* Skip type 2 record */
ttype = rom_desc->Type & 0x0f;
if (ttype != I2C_DESC_TYPE_FIRMWARE_BASIC
&& ttype != I2C_DESC_TYPE_FIRMWARE_AUTO) {
/* Read the descriptor data */
status = read_rom(serial, start_address +
sizeof(struct ti_i2c_desc),
rom_desc->Size, buffer);
if (status)
break;
status = valid_csum(rom_desc, buffer);
if (status)
break;
}
start_address = start_address + sizeof(struct ti_i2c_desc) +
rom_desc->Size;
} while ((rom_desc->Type != I2C_DESC_TYPE_ION) &&
(start_address < TI_MAX_I2C_SIZE));
if ((rom_desc->Type != I2C_DESC_TYPE_ION) ||
(start_address > TI_MAX_I2C_SIZE))
status = -ENODEV;
out:
kfree(buffer);
kfree(rom_desc);
return status;
}
| DoS | 0 | static int check_i2c_image(struct edgeport_serial *serial)
{
struct device *dev = &serial->serial->dev->dev;
int status = 0;
struct ti_i2c_desc *rom_desc;
int start_address = 2;
__u8 *buffer;
__u16 ttype;
rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
if (!rom_desc) {
dev_err(dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
buffer = kmalloc(TI_MAX_I2C_SIZE, GFP_KERNEL);
if (!buffer) {
dev_err(dev, "%s - out of memory when allocating buffer\n",
__func__);
kfree(rom_desc);
return -ENOMEM;
}
/* Read the first byte (Signature0) must be 0x52 or 0x10 */
status = read_rom(serial, 0, 1, buffer);
if (status)
goto out;
if (*buffer != UMP5152 && *buffer != UMP3410) {
dev_err(dev, "%s - invalid buffer signature\n", __func__);
status = -ENODEV;
goto out;
}
do {
/* Validate the I2C */
status = read_rom(serial,
start_address,
sizeof(struct ti_i2c_desc),
(__u8 *)rom_desc);
if (status)
break;
if ((start_address + sizeof(struct ti_i2c_desc) +
rom_desc->Size) > TI_MAX_I2C_SIZE) {
status = -ENODEV;
dev_dbg(dev, "%s - structure too big, erroring out.\n", __func__);
break;
}
dev_dbg(dev, "%s Type = 0x%x\n", __func__, rom_desc->Type);
/* Skip type 2 record */
ttype = rom_desc->Type & 0x0f;
if (ttype != I2C_DESC_TYPE_FIRMWARE_BASIC
&& ttype != I2C_DESC_TYPE_FIRMWARE_AUTO) {
/* Read the descriptor data */
status = read_rom(serial, start_address +
sizeof(struct ti_i2c_desc),
rom_desc->Size, buffer);
if (status)
break;
status = valid_csum(rom_desc, buffer);
if (status)
break;
}
start_address = start_address + sizeof(struct ti_i2c_desc) +
rom_desc->Size;
} while ((rom_desc->Type != I2C_DESC_TYPE_ION) &&
(start_address < TI_MAX_I2C_SIZE));
if ((rom_desc->Type != I2C_DESC_TYPE_ION) ||
(start_address > TI_MAX_I2C_SIZE))
status = -ENODEV;
out:
kfree(buffer);
kfree(rom_desc);
return status;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,733 | static int config_boot_dev(struct usb_device *dev)
{
return 0;
}
| DoS | 0 | static int config_boot_dev(struct usb_device *dev)
{
return 0;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,734 | static int download_code(struct edgeport_serial *serial, __u8 *image,
int image_length)
{
int status = 0;
int pos;
int transfer;
int done;
/* Transfer firmware image */
for (pos = 0; pos < image_length; ) {
/* Read the next buffer from file */
transfer = image_length - pos;
if (transfer > EDGE_FW_BULK_MAX_PACKET_SIZE)
transfer = EDGE_FW_BULK_MAX_PACKET_SIZE;
/* Transfer data */
status = bulk_xfer(serial->serial, &image[pos],
transfer, &done);
if (status)
break;
/* Advance buffer pointer */
pos += done;
}
return status;
}
| DoS | 0 | static int download_code(struct edgeport_serial *serial, __u8 *image,
int image_length)
{
int status = 0;
int pos;
int transfer;
int done;
/* Transfer firmware image */
for (pos = 0; pos < image_length; ) {
/* Read the next buffer from file */
transfer = image_length - pos;
if (transfer > EDGE_FW_BULK_MAX_PACKET_SIZE)
transfer = EDGE_FW_BULK_MAX_PACKET_SIZE;
/* Transfer data */
status = bulk_xfer(serial->serial, &image[pos],
transfer, &done);
if (status)
break;
/* Advance buffer pointer */
pos += done;
}
return status;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,735 | static void edge_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status;
int bv = 0; /* Off */
/* chase the port close */
chase_port(edge_port, 0, 0);
if (break_state == -1)
bv = 1; /* On */
status = ti_do_config(edge_port, UMPC_SET_CLR_BREAK, bv);
if (status)
dev_dbg(&port->dev, "%s - error %d sending break set/clear command.\n",
__func__, status);
}
| DoS | 0 | static void edge_break(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status;
int bv = 0; /* Off */
/* chase the port close */
chase_port(edge_port, 0, 0);
if (break_state == -1)
bv = 1; /* On */
status = ti_do_config(edge_port, UMPC_SET_CLR_BREAK, bv);
if (status)
dev_dbg(&port->dev, "%s - error %d sending break set/clear command.\n",
__func__, status);
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,736 | static void edge_bulk_out_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status = urb->status;
struct tty_struct *tty;
edge_port->ep_write_urb_in_use = 0;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_err_console(port, "%s - nonzero write bulk status "
"received: %d\n", __func__, status);
}
/* send any buffered data */
tty = tty_port_tty_get(&port->port);
edge_send(tty);
tty_kref_put(tty);
}
| DoS | 0 | static void edge_bulk_out_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status = urb->status;
struct tty_struct *tty;
edge_port->ep_write_urb_in_use = 0;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_err_console(port, "%s - nonzero write bulk status "
"received: %d\n", __func__, status);
}
/* send any buffered data */
tty = tty_port_tty_get(&port->port);
edge_send(tty);
tty_kref_put(tty);
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,737 | static int edge_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int chars = 0;
unsigned long flags;
if (edge_port == NULL)
return 0;
if (edge_port->close_pending == 1)
return 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
chars = kfifo_len(&edge_port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
return chars;
}
| DoS | 0 | static int edge_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int chars = 0;
unsigned long flags;
if (edge_port == NULL)
return 0;
if (edge_port->close_pending == 1)
return 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
chars = kfifo_len(&edge_port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
return chars;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,738 | static void edge_close(struct usb_serial_port *port)
{
struct edgeport_serial *edge_serial;
struct edgeport_port *edge_port;
struct usb_serial *serial = port->serial;
int port_number;
edge_serial = usb_get_serial_data(port->serial);
edge_port = usb_get_serial_port_data(port);
if (edge_serial == NULL || edge_port == NULL)
return;
/* The bulkreadcompletion routine will check
* this flag and dump add read data */
edge_port->close_pending = 1;
/* chase the port close and flush */
chase_port(edge_port, (HZ * closing_wait) / 100, 1);
usb_kill_urb(port->read_urb);
usb_kill_urb(port->write_urb);
edge_port->ep_write_urb_in_use = 0;
/* assuming we can still talk to the device,
* send a close port command to it */
dev_dbg(&port->dev, "%s - send umpc_close_port\n", __func__);
port_number = port->number - port->serial->minor;
mutex_lock(&serial->disc_mutex);
if (!serial->disconnected) {
send_cmd(serial->dev,
UMPC_CLOSE_PORT,
(__u8)(UMPM_UART1_PORT + port_number),
0,
NULL,
0);
}
mutex_unlock(&serial->disc_mutex);
mutex_lock(&edge_serial->es_lock);
--edge_port->edge_serial->num_ports_open;
if (edge_port->edge_serial->num_ports_open <= 0) {
/* last port is now closed, let's shut down our interrupt urb */
usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
edge_port->edge_serial->num_ports_open = 0;
}
mutex_unlock(&edge_serial->es_lock);
edge_port->close_pending = 0;
}
| DoS | 0 | static void edge_close(struct usb_serial_port *port)
{
struct edgeport_serial *edge_serial;
struct edgeport_port *edge_port;
struct usb_serial *serial = port->serial;
int port_number;
edge_serial = usb_get_serial_data(port->serial);
edge_port = usb_get_serial_port_data(port);
if (edge_serial == NULL || edge_port == NULL)
return;
/* The bulkreadcompletion routine will check
* this flag and dump add read data */
edge_port->close_pending = 1;
/* chase the port close and flush */
chase_port(edge_port, (HZ * closing_wait) / 100, 1);
usb_kill_urb(port->read_urb);
usb_kill_urb(port->write_urb);
edge_port->ep_write_urb_in_use = 0;
/* assuming we can still talk to the device,
* send a close port command to it */
dev_dbg(&port->dev, "%s - send umpc_close_port\n", __func__);
port_number = port->number - port->serial->minor;
mutex_lock(&serial->disc_mutex);
if (!serial->disconnected) {
send_cmd(serial->dev,
UMPC_CLOSE_PORT,
(__u8)(UMPM_UART1_PORT + port_number),
0,
NULL,
0);
}
mutex_unlock(&serial->disc_mutex);
mutex_lock(&edge_serial->es_lock);
--edge_port->edge_serial->num_ports_open;
if (edge_port->edge_serial->num_ports_open <= 0) {
/* last port is now closed, let's shut down our interrupt urb */
usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
edge_port->edge_serial->num_ports_open = 0;
}
mutex_unlock(&edge_serial->es_lock);
edge_port->close_pending = 0;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,739 | static int edge_create_sysfs_attrs(struct usb_serial_port *port)
{
return device_create_file(&port->dev, &dev_attr_uart_mode);
}
| DoS | 0 | static int edge_create_sysfs_attrs(struct usb_serial_port *port)
{
return device_create_file(&port->dev, &dev_attr_uart_mode);
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,740 | static void edge_disconnect(struct usb_serial *serial)
{
}
| DoS | 0 | static void edge_disconnect(struct usb_serial *serial)
{
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,741 | static int edge_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
struct async_icount *ic = &edge_port->icount;
icount->cts = ic->cts;
icount->dsr = ic->dsr;
icount->rng = ic->rng;
icount->dcd = ic->dcd;
icount->tx = ic->tx;
icount->rx = ic->rx;
icount->frame = ic->frame;
icount->parity = ic->parity;
icount->overrun = ic->overrun;
icount->brk = ic->brk;
icount->buf_overrun = ic->buf_overrun;
return 0;
}
| DoS | 0 | static int edge_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
struct async_icount *ic = &edge_port->icount;
icount->cts = ic->cts;
icount->dsr = ic->dsr;
icount->rng = ic->rng;
icount->dcd = ic->dcd;
icount->tx = ic->tx;
icount->rx = ic->rx;
icount->frame = ic->frame;
icount->parity = ic->parity;
icount->overrun = ic->overrun;
icount->brk = ic->brk;
icount->buf_overrun = ic->buf_overrun;
return 0;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,742 | static void edge_interrupt_callback(struct urb *urb)
{
struct edgeport_serial *edge_serial = urb->context;
struct usb_serial_port *port;
struct edgeport_port *edge_port;
struct device *dev;
unsigned char *data = urb->transfer_buffer;
int length = urb->actual_length;
int port_number;
int function;
int retval;
__u8 lsr;
__u8 msr;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_err(&urb->dev->dev, "%s - nonzero urb status received: "
"%d\n", __func__, status);
goto exit;
}
if (!length) {
dev_dbg(&urb->dev->dev, "%s - no data in urb\n", __func__);
goto exit;
}
dev = &edge_serial->serial->dev->dev;
usb_serial_debug_data(dev, __func__, length, data);
if (length != 2) {
dev_dbg(dev, "%s - expecting packet of size 2, got %d\n", __func__, length);
goto exit;
}
port_number = TIUMP_GET_PORT_FROM_CODE(data[0]);
function = TIUMP_GET_FUNC_FROM_CODE(data[0]);
dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__,
port_number, function, data[1]);
port = edge_serial->serial->port[port_number];
edge_port = usb_get_serial_port_data(port);
if (!edge_port) {
dev_dbg(dev, "%s - edge_port not found\n", __func__);
return;
}
switch (function) {
case TIUMP_INTERRUPT_CODE_LSR:
lsr = map_line_status(data[1]);
if (lsr & UMP_UART_LSR_DATA_MASK) {
/* Save the LSR event for bulk read
completion routine */
dev_dbg(dev, "%s - LSR Event Port %u LSR Status = %02x\n",
__func__, port_number, lsr);
edge_port->lsr_event = 1;
edge_port->lsr_mask = lsr;
} else {
dev_dbg(dev, "%s - ===== Port %d LSR Status = %02x ======\n",
__func__, port_number, lsr);
handle_new_lsr(edge_port, 0, lsr, 0);
}
break;
case TIUMP_INTERRUPT_CODE_MSR: /* MSR */
/* Copy MSR from UMP */
msr = data[1];
dev_dbg(dev, "%s - ===== Port %u MSR Status = %02x ======\n",
__func__, port_number, msr);
handle_new_msr(edge_port, msr);
break;
default:
dev_err(&urb->dev->dev,
"%s - Unknown Interrupt code from UMP %x\n",
__func__, data[1]);
break;
}
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
| DoS | 0 | static void edge_interrupt_callback(struct urb *urb)
{
struct edgeport_serial *edge_serial = urb->context;
struct usb_serial_port *port;
struct edgeport_port *edge_port;
struct device *dev;
unsigned char *data = urb->transfer_buffer;
int length = urb->actual_length;
int port_number;
int function;
int retval;
__u8 lsr;
__u8 msr;
int status = urb->status;
switch (status) {
case 0:
/* success */
break;
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(&urb->dev->dev, "%s - urb shutting down with status: %d\n",
__func__, status);
return;
default:
dev_err(&urb->dev->dev, "%s - nonzero urb status received: "
"%d\n", __func__, status);
goto exit;
}
if (!length) {
dev_dbg(&urb->dev->dev, "%s - no data in urb\n", __func__);
goto exit;
}
dev = &edge_serial->serial->dev->dev;
usb_serial_debug_data(dev, __func__, length, data);
if (length != 2) {
dev_dbg(dev, "%s - expecting packet of size 2, got %d\n", __func__, length);
goto exit;
}
port_number = TIUMP_GET_PORT_FROM_CODE(data[0]);
function = TIUMP_GET_FUNC_FROM_CODE(data[0]);
dev_dbg(dev, "%s - port_number %d, function %d, info 0x%x\n", __func__,
port_number, function, data[1]);
port = edge_serial->serial->port[port_number];
edge_port = usb_get_serial_port_data(port);
if (!edge_port) {
dev_dbg(dev, "%s - edge_port not found\n", __func__);
return;
}
switch (function) {
case TIUMP_INTERRUPT_CODE_LSR:
lsr = map_line_status(data[1]);
if (lsr & UMP_UART_LSR_DATA_MASK) {
/* Save the LSR event for bulk read
completion routine */
dev_dbg(dev, "%s - LSR Event Port %u LSR Status = %02x\n",
__func__, port_number, lsr);
edge_port->lsr_event = 1;
edge_port->lsr_mask = lsr;
} else {
dev_dbg(dev, "%s - ===== Port %d LSR Status = %02x ======\n",
__func__, port_number, lsr);
handle_new_lsr(edge_port, 0, lsr, 0);
}
break;
case TIUMP_INTERRUPT_CODE_MSR: /* MSR */
/* Copy MSR from UMP */
msr = data[1];
dev_dbg(dev, "%s - ===== Port %u MSR Status = %02x ======\n",
__func__, port_number, msr);
handle_new_msr(edge_port, msr);
break;
default:
dev_err(&urb->dev->dev,
"%s - Unknown Interrupt code from UMP %x\n",
__func__, data[1]);
break;
}
exit:
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev,
"%s - usb_submit_urb failed with result %d\n",
__func__, retval);
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,743 | static int edge_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
struct async_icount cnow;
struct async_icount cprev;
dev_dbg(&port->dev, "%s - port %d, cmd = 0x%x\n", __func__, port->number, cmd);
switch (cmd) {
case TIOCGSERIAL:
dev_dbg(&port->dev, "%s - TIOCGSERIAL\n", __func__);
return get_serial_info(edge_port,
(struct serial_struct __user *) arg);
case TIOCMIWAIT:
dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__);
cprev = edge_port->icount;
while (1) {
interruptible_sleep_on(&edge_port->delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
cnow = edge_port->icount;
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
return -EIO; /* no change => error */
if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) {
return 0;
}
cprev = cnow;
}
/* not reached */
break;
}
return -ENOIOCTLCMD;
}
| DoS | 0 | static int edge_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
struct async_icount cnow;
struct async_icount cprev;
dev_dbg(&port->dev, "%s - port %d, cmd = 0x%x\n", __func__, port->number, cmd);
switch (cmd) {
case TIOCGSERIAL:
dev_dbg(&port->dev, "%s - TIOCGSERIAL\n", __func__);
return get_serial_info(edge_port,
(struct serial_struct __user *) arg);
case TIOCMIWAIT:
dev_dbg(&port->dev, "%s - TIOCMIWAIT\n", __func__);
cprev = edge_port->icount;
while (1) {
interruptible_sleep_on(&edge_port->delta_msr_wait);
/* see if a signal did it */
if (signal_pending(current))
return -ERESTARTSYS;
cnow = edge_port->icount;
if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
return -EIO; /* no change => error */
if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) {
return 0;
}
cprev = cnow;
}
/* not reached */
break;
}
return -ENOIOCTLCMD;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,744 | static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
struct edgeport_serial *edge_serial;
struct usb_device *dev;
struct urb *urb;
int port_number;
int status;
u16 open_settings;
u8 transaction_timeout;
if (edge_port == NULL)
return -ENODEV;
port_number = port->number - port->serial->minor;
switch (port_number) {
case 0:
edge_port->uart_base = UMPMEM_BASE_UART1;
edge_port->dma_address = UMPD_OEDB1_ADDRESS;
break;
case 1:
edge_port->uart_base = UMPMEM_BASE_UART2;
edge_port->dma_address = UMPD_OEDB2_ADDRESS;
break;
default:
dev_err(&port->dev, "Unknown port number!!!\n");
return -ENODEV;
}
dev_dbg(&port->dev, "%s - port_number = %d, uart_base = %04x, dma_address = %04x\n",
__func__, port_number, edge_port->uart_base, edge_port->dma_address);
dev = port->serial->dev;
memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount));
init_waitqueue_head(&edge_port->delta_msr_wait);
/* turn off loopback */
status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0);
if (status) {
dev_err(&port->dev,
"%s - cannot send clear loopback command, %d\n",
__func__, status);
return status;
}
/* set up the port settings */
if (tty)
edge_set_termios(tty, port, &tty->termios);
/* open up the port */
/* milliseconds to timeout for DMA transfer */
transaction_timeout = 2;
edge_port->ump_read_timeout =
max(20, ((transaction_timeout * 3) / 2));
/* milliseconds to timeout for DMA transfer */
open_settings = (u8)(UMP_DMA_MODE_CONTINOUS |
UMP_PIPE_TRANS_TIMEOUT_ENA |
(transaction_timeout << 2));
dev_dbg(&port->dev, "%s - Sending UMPC_OPEN_PORT\n", __func__);
/* Tell TI to open and start the port */
status = send_cmd(dev, UMPC_OPEN_PORT,
(u8)(UMPM_UART1_PORT + port_number), open_settings, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot send open command, %d\n",
__func__, status);
return status;
}
/* Start the DMA? */
status = send_cmd(dev, UMPC_START_PORT,
(u8)(UMPM_UART1_PORT + port_number), 0, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot send start DMA command, %d\n",
__func__, status);
return status;
}
/* Clear TX and RX buffers in UMP */
status = purge_port(port, UMP_PORT_DIR_OUT | UMP_PORT_DIR_IN);
if (status) {
dev_err(&port->dev,
"%s - cannot send clear buffers command, %d\n",
__func__, status);
return status;
}
/* Read Initial MSR */
status = ti_vread_sync(dev, UMPC_READ_MSR, 0,
(__u16)(UMPM_UART1_PORT + port_number),
&edge_port->shadow_msr, 1);
if (status) {
dev_err(&port->dev, "%s - cannot send read MSR command, %d\n",
__func__, status);
return status;
}
dev_dbg(&port->dev, "ShadowMSR 0x%X\n", edge_port->shadow_msr);
/* Set Initial MCR */
edge_port->shadow_mcr = MCR_RTS | MCR_DTR;
dev_dbg(&port->dev, "ShadowMCR 0x%X\n", edge_port->shadow_mcr);
edge_serial = edge_port->edge_serial;
if (mutex_lock_interruptible(&edge_serial->es_lock))
return -ERESTARTSYS;
if (edge_serial->num_ports_open == 0) {
/* we are the first port to open, post the interrupt urb */
urb = edge_serial->serial->port[0]->interrupt_in_urb;
if (!urb) {
dev_err(&port->dev,
"%s - no interrupt urb present, exiting\n",
__func__);
status = -EINVAL;
goto release_es_lock;
}
urb->context = edge_serial;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
"%s - usb_submit_urb failed with value %d\n",
__func__, status);
goto release_es_lock;
}
}
/*
* reset the data toggle on the bulk endpoints to work around bug in
* host controllers where things get out of sync some times
*/
usb_clear_halt(dev, port->write_urb->pipe);
usb_clear_halt(dev, port->read_urb->pipe);
/* start up our bulk read urb */
urb = port->read_urb;
if (!urb) {
dev_err(&port->dev, "%s - no read urb present, exiting\n",
__func__);
status = -EINVAL;
goto unlink_int_urb;
}
edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING;
urb->context = edge_port;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
"%s - read bulk usb_submit_urb failed with value %d\n",
__func__, status);
goto unlink_int_urb;
}
++edge_serial->num_ports_open;
goto release_es_lock;
unlink_int_urb:
if (edge_port->edge_serial->num_ports_open == 0)
usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
release_es_lock:
mutex_unlock(&edge_serial->es_lock);
return status;
}
| DoS | 0 | static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
struct edgeport_serial *edge_serial;
struct usb_device *dev;
struct urb *urb;
int port_number;
int status;
u16 open_settings;
u8 transaction_timeout;
if (edge_port == NULL)
return -ENODEV;
port_number = port->number - port->serial->minor;
switch (port_number) {
case 0:
edge_port->uart_base = UMPMEM_BASE_UART1;
edge_port->dma_address = UMPD_OEDB1_ADDRESS;
break;
case 1:
edge_port->uart_base = UMPMEM_BASE_UART2;
edge_port->dma_address = UMPD_OEDB2_ADDRESS;
break;
default:
dev_err(&port->dev, "Unknown port number!!!\n");
return -ENODEV;
}
dev_dbg(&port->dev, "%s - port_number = %d, uart_base = %04x, dma_address = %04x\n",
__func__, port_number, edge_port->uart_base, edge_port->dma_address);
dev = port->serial->dev;
memset(&(edge_port->icount), 0x00, sizeof(edge_port->icount));
init_waitqueue_head(&edge_port->delta_msr_wait);
/* turn off loopback */
status = ti_do_config(edge_port, UMPC_SET_CLR_LOOPBACK, 0);
if (status) {
dev_err(&port->dev,
"%s - cannot send clear loopback command, %d\n",
__func__, status);
return status;
}
/* set up the port settings */
if (tty)
edge_set_termios(tty, port, &tty->termios);
/* open up the port */
/* milliseconds to timeout for DMA transfer */
transaction_timeout = 2;
edge_port->ump_read_timeout =
max(20, ((transaction_timeout * 3) / 2));
/* milliseconds to timeout for DMA transfer */
open_settings = (u8)(UMP_DMA_MODE_CONTINOUS |
UMP_PIPE_TRANS_TIMEOUT_ENA |
(transaction_timeout << 2));
dev_dbg(&port->dev, "%s - Sending UMPC_OPEN_PORT\n", __func__);
/* Tell TI to open and start the port */
status = send_cmd(dev, UMPC_OPEN_PORT,
(u8)(UMPM_UART1_PORT + port_number), open_settings, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot send open command, %d\n",
__func__, status);
return status;
}
/* Start the DMA? */
status = send_cmd(dev, UMPC_START_PORT,
(u8)(UMPM_UART1_PORT + port_number), 0, NULL, 0);
if (status) {
dev_err(&port->dev, "%s - cannot send start DMA command, %d\n",
__func__, status);
return status;
}
/* Clear TX and RX buffers in UMP */
status = purge_port(port, UMP_PORT_DIR_OUT | UMP_PORT_DIR_IN);
if (status) {
dev_err(&port->dev,
"%s - cannot send clear buffers command, %d\n",
__func__, status);
return status;
}
/* Read Initial MSR */
status = ti_vread_sync(dev, UMPC_READ_MSR, 0,
(__u16)(UMPM_UART1_PORT + port_number),
&edge_port->shadow_msr, 1);
if (status) {
dev_err(&port->dev, "%s - cannot send read MSR command, %d\n",
__func__, status);
return status;
}
dev_dbg(&port->dev, "ShadowMSR 0x%X\n", edge_port->shadow_msr);
/* Set Initial MCR */
edge_port->shadow_mcr = MCR_RTS | MCR_DTR;
dev_dbg(&port->dev, "ShadowMCR 0x%X\n", edge_port->shadow_mcr);
edge_serial = edge_port->edge_serial;
if (mutex_lock_interruptible(&edge_serial->es_lock))
return -ERESTARTSYS;
if (edge_serial->num_ports_open == 0) {
/* we are the first port to open, post the interrupt urb */
urb = edge_serial->serial->port[0]->interrupt_in_urb;
if (!urb) {
dev_err(&port->dev,
"%s - no interrupt urb present, exiting\n",
__func__);
status = -EINVAL;
goto release_es_lock;
}
urb->context = edge_serial;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
"%s - usb_submit_urb failed with value %d\n",
__func__, status);
goto release_es_lock;
}
}
/*
* reset the data toggle on the bulk endpoints to work around bug in
* host controllers where things get out of sync some times
*/
usb_clear_halt(dev, port->write_urb->pipe);
usb_clear_halt(dev, port->read_urb->pipe);
/* start up our bulk read urb */
urb = port->read_urb;
if (!urb) {
dev_err(&port->dev, "%s - no read urb present, exiting\n",
__func__);
status = -EINVAL;
goto unlink_int_urb;
}
edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING;
urb->context = edge_port;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
"%s - read bulk usb_submit_urb failed with value %d\n",
__func__, status);
goto unlink_int_urb;
}
++edge_serial->num_ports_open;
goto release_es_lock;
unlink_int_urb:
if (edge_port->edge_serial->num_ports_open == 0)
usb_kill_urb(port->serial->port[0]->interrupt_in_urb);
release_es_lock:
mutex_unlock(&edge_serial->es_lock);
return status;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,745 | static int edge_port_probe(struct usb_serial_port *port)
{
struct edgeport_port *edge_port;
int ret;
edge_port = kzalloc(sizeof(*edge_port), GFP_KERNEL);
if (!edge_port)
return -ENOMEM;
ret = kfifo_alloc(&edge_port->write_fifo, EDGE_OUT_BUF_SIZE,
GFP_KERNEL);
if (ret) {
kfree(edge_port);
return -ENOMEM;
}
spin_lock_init(&edge_port->ep_lock);
edge_port->port = port;
edge_port->edge_serial = usb_get_serial_data(port->serial);
edge_port->bUartMode = default_uart_mode;
usb_set_serial_port_data(port, edge_port);
ret = edge_create_sysfs_attrs(port);
if (ret) {
kfifo_free(&edge_port->write_fifo);
kfree(edge_port);
return ret;
}
return 0;
}
| DoS | 0 | static int edge_port_probe(struct usb_serial_port *port)
{
struct edgeport_port *edge_port;
int ret;
edge_port = kzalloc(sizeof(*edge_port), GFP_KERNEL);
if (!edge_port)
return -ENOMEM;
ret = kfifo_alloc(&edge_port->write_fifo, EDGE_OUT_BUF_SIZE,
GFP_KERNEL);
if (ret) {
kfree(edge_port);
return -ENOMEM;
}
spin_lock_init(&edge_port->ep_lock);
edge_port->port = port;
edge_port->edge_serial = usb_get_serial_data(port->serial);
edge_port->bUartMode = default_uart_mode;
usb_set_serial_port_data(port, edge_port);
ret = edge_create_sysfs_attrs(port);
if (ret) {
kfifo_free(&edge_port->write_fifo);
kfree(edge_port);
return ret;
}
return 0;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,746 | static int edge_port_remove(struct usb_serial_port *port)
{
struct edgeport_port *edge_port;
edge_port = usb_get_serial_port_data(port);
edge_remove_sysfs_attrs(port);
kfifo_free(&edge_port->write_fifo);
kfree(edge_port);
return 0;
}
| DoS | 0 | static int edge_port_remove(struct usb_serial_port *port)
{
struct edgeport_port *edge_port;
edge_port = usb_get_serial_port_data(port);
edge_remove_sysfs_attrs(port);
kfifo_free(&edge_port->write_fifo);
kfree(edge_port);
return 0;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,747 | static void edge_release(struct usb_serial *serial)
{
kfree(usb_get_serial_data(serial));
}
| DoS | 0 | static void edge_release(struct usb_serial *serial)
{
kfree(usb_get_serial_data(serial));
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,748 | static int edge_remove_sysfs_attrs(struct usb_serial_port *port)
{
device_remove_file(&port->dev, &dev_attr_uart_mode);
return 0;
}
| DoS | 0 | static int edge_remove_sysfs_attrs(struct usb_serial_port *port)
{
device_remove_file(&port->dev, &dev_attr_uart_mode);
return 0;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,749 | static void edge_send(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
int count, result;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
if (edge_port->ep_write_urb_in_use) {
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
return;
}
count = kfifo_out(&edge_port->write_fifo,
port->write_urb->transfer_buffer,
port->bulk_out_size);
if (count == 0) {
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
return;
}
edge_port->ep_write_urb_in_use = 1;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
usb_serial_debug_data(&port->dev, __func__, count, port->write_urb->transfer_buffer);
/* set up our urb */
port->write_urb->transfer_buffer_length = count;
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err_console(port,
"%s - failed submitting write urb, error %d\n",
__func__, result);
edge_port->ep_write_urb_in_use = 0;
/* TODO: reschedule edge_send */
} else
edge_port->icount.tx += count;
/* wakeup any process waiting for writes to complete */
/* there is now more room in the buffer for new writes */
if (tty)
tty_wakeup(tty);
}
| DoS | 0 | static void edge_send(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
int count, result;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
if (edge_port->ep_write_urb_in_use) {
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
return;
}
count = kfifo_out(&edge_port->write_fifo,
port->write_urb->transfer_buffer,
port->bulk_out_size);
if (count == 0) {
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
return;
}
edge_port->ep_write_urb_in_use = 1;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
usb_serial_debug_data(&port->dev, __func__, count, port->write_urb->transfer_buffer);
/* set up our urb */
port->write_urb->transfer_buffer_length = count;
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err_console(port,
"%s - failed submitting write urb, error %d\n",
__func__, result);
edge_port->ep_write_urb_in_use = 0;
/* TODO: reschedule edge_send */
} else
edge_port->icount.tx += count;
/* wakeup any process waiting for writes to complete */
/* there is now more room in the buffer for new writes */
if (tty)
tty_wakeup(tty);
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,750 | static void edge_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int cflag;
cflag = tty->termios.c_cflag;
dev_dbg(&port->dev, "%s - clfag %08x iflag %08x\n", __func__,
tty->termios.c_cflag, tty->termios.c_iflag);
dev_dbg(&port->dev, "%s - old clfag %08x old iflag %08x\n", __func__,
old_termios->c_cflag, old_termios->c_iflag);
dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number);
if (edge_port == NULL)
return;
/* change the port settings to the new ones specified */
change_port_settings(tty, edge_port, old_termios);
}
| DoS | 0 | static void edge_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int cflag;
cflag = tty->termios.c_cflag;
dev_dbg(&port->dev, "%s - clfag %08x iflag %08x\n", __func__,
tty->termios.c_cflag, tty->termios.c_iflag);
dev_dbg(&port->dev, "%s - old clfag %08x old iflag %08x\n", __func__,
old_termios->c_cflag, old_termios->c_iflag);
dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number);
if (edge_port == NULL)
return;
/* change the port settings to the new ones specified */
change_port_settings(tty, edge_port, old_termios);
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,751 | static void edge_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status;
if (edge_port == NULL)
return;
/* if we are implementing XON/XOFF, send the stop character */
if (I_IXOFF(tty)) {
unsigned char stop_char = STOP_CHAR(tty);
status = edge_write(tty, port, &stop_char, 1);
if (status <= 0) {
dev_err(&port->dev, "%s - failed to write stop character, %d\n", __func__, status);
}
}
/* if we are implementing RTS/CTS, stop reads */
/* and the Edgeport will clear the RTS line */
if (C_CRTSCTS(tty))
stop_read(edge_port);
}
| DoS | 0 | static void edge_throttle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status;
if (edge_port == NULL)
return;
/* if we are implementing XON/XOFF, send the stop character */
if (I_IXOFF(tty)) {
unsigned char stop_char = STOP_CHAR(tty);
status = edge_write(tty, port, &stop_char, 1);
if (status <= 0) {
dev_err(&port->dev, "%s - failed to write stop character, %d\n", __func__, status);
}
}
/* if we are implementing RTS/CTS, stop reads */
/* and the Edgeport will clear the RTS line */
if (C_CRTSCTS(tty))
stop_read(edge_port);
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,752 | static int edge_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int result = 0;
unsigned int msr;
unsigned int mcr;
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
msr = edge_port->shadow_msr;
mcr = edge_port->shadow_mcr;
result = ((mcr & MCR_DTR) ? TIOCM_DTR: 0) /* 0x002 */
| ((mcr & MCR_RTS) ? TIOCM_RTS: 0) /* 0x004 */
| ((msr & EDGEPORT_MSR_CTS) ? TIOCM_CTS: 0) /* 0x020 */
| ((msr & EDGEPORT_MSR_CD) ? TIOCM_CAR: 0) /* 0x040 */
| ((msr & EDGEPORT_MSR_RI) ? TIOCM_RI: 0) /* 0x080 */
| ((msr & EDGEPORT_MSR_DSR) ? TIOCM_DSR: 0); /* 0x100 */
dev_dbg(&port->dev, "%s -- %x\n", __func__, result);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
return result;
}
| DoS | 0 | static int edge_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int result = 0;
unsigned int msr;
unsigned int mcr;
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
msr = edge_port->shadow_msr;
mcr = edge_port->shadow_mcr;
result = ((mcr & MCR_DTR) ? TIOCM_DTR: 0) /* 0x002 */
| ((mcr & MCR_RTS) ? TIOCM_RTS: 0) /* 0x004 */
| ((msr & EDGEPORT_MSR_CTS) ? TIOCM_CTS: 0) /* 0x020 */
| ((msr & EDGEPORT_MSR_CD) ? TIOCM_CAR: 0) /* 0x040 */
| ((msr & EDGEPORT_MSR_RI) ? TIOCM_RI: 0) /* 0x080 */
| ((msr & EDGEPORT_MSR_DSR) ? TIOCM_DSR: 0); /* 0x100 */
dev_dbg(&port->dev, "%s -- %x\n", __func__, result);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
return result;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,753 | static int edge_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int mcr;
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
mcr = edge_port->shadow_mcr;
if (set & TIOCM_RTS)
mcr |= MCR_RTS;
if (set & TIOCM_DTR)
mcr |= MCR_DTR;
if (set & TIOCM_LOOP)
mcr |= MCR_LOOPBACK;
if (clear & TIOCM_RTS)
mcr &= ~MCR_RTS;
if (clear & TIOCM_DTR)
mcr &= ~MCR_DTR;
if (clear & TIOCM_LOOP)
mcr &= ~MCR_LOOPBACK;
edge_port->shadow_mcr = mcr;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
restore_mcr(edge_port, mcr);
return 0;
}
| DoS | 0 | static int edge_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int mcr;
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
mcr = edge_port->shadow_mcr;
if (set & TIOCM_RTS)
mcr |= MCR_RTS;
if (set & TIOCM_DTR)
mcr |= MCR_DTR;
if (set & TIOCM_LOOP)
mcr |= MCR_LOOPBACK;
if (clear & TIOCM_RTS)
mcr &= ~MCR_RTS;
if (clear & TIOCM_DTR)
mcr &= ~MCR_DTR;
if (clear & TIOCM_LOOP)
mcr &= ~MCR_LOOPBACK;
edge_port->shadow_mcr = mcr;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
restore_mcr(edge_port, mcr);
return 0;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,754 | static void edge_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status;
if (edge_port == NULL)
return;
/* if we are implementing XON/XOFF, send the start character */
if (I_IXOFF(tty)) {
unsigned char start_char = START_CHAR(tty);
status = edge_write(tty, port, &start_char, 1);
if (status <= 0) {
dev_err(&port->dev, "%s - failed to write start character, %d\n", __func__, status);
}
}
/* if we are implementing RTS/CTS, restart reads */
/* are the Edgeport will assert the RTS line */
if (C_CRTSCTS(tty)) {
status = restart_read(edge_port);
if (status)
dev_err(&port->dev,
"%s - read bulk usb_submit_urb failed: %d\n",
__func__, status);
}
}
| DoS | 0 | static void edge_unthrottle(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int status;
if (edge_port == NULL)
return;
/* if we are implementing XON/XOFF, send the start character */
if (I_IXOFF(tty)) {
unsigned char start_char = START_CHAR(tty);
status = edge_write(tty, port, &start_char, 1);
if (status <= 0) {
dev_err(&port->dev, "%s - failed to write start character, %d\n", __func__, status);
}
}
/* if we are implementing RTS/CTS, restart reads */
/* are the Edgeport will assert the RTS line */
if (C_CRTSCTS(tty)) {
status = restart_read(edge_port);
if (status)
dev_err(&port->dev,
"%s - read bulk usb_submit_urb failed: %d\n",
__func__, status);
}
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,755 | static int edge_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
if (count == 0) {
dev_dbg(&port->dev, "%s - write request of 0 bytes\n", __func__);
return 0;
}
if (edge_port == NULL)
return -ENODEV;
if (edge_port->close_pending == 1)
return -ENODEV;
count = kfifo_in_locked(&edge_port->write_fifo, data, count,
&edge_port->ep_lock);
edge_send(tty);
return count;
}
| DoS | 0 | static int edge_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *data, int count)
{
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
if (count == 0) {
dev_dbg(&port->dev, "%s - write request of 0 bytes\n", __func__);
return 0;
}
if (edge_port == NULL)
return -ENODEV;
if (edge_port->close_pending == 1)
return -ENODEV;
count = kfifo_in_locked(&edge_port->write_fifo, data, count,
&edge_port->ep_lock);
edge_send(tty);
return count;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,756 | static int edge_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int room = 0;
unsigned long flags;
if (edge_port == NULL)
return 0;
if (edge_port->close_pending == 1)
return 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
room = kfifo_avail(&edge_port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
return room;
}
| DoS | 0 | static int edge_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
int room = 0;
unsigned long flags;
if (edge_port == NULL)
return 0;
if (edge_port->close_pending == 1)
return 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
room = kfifo_avail(&edge_port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
return room;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,757 | static int get_descriptor_addr(struct edgeport_serial *serial,
int desc_type, struct ti_i2c_desc *rom_desc)
{
int start_address;
int status;
/* Search for requested descriptor in I2C */
start_address = 2;
do {
status = read_rom(serial,
start_address,
sizeof(struct ti_i2c_desc),
(__u8 *)rom_desc);
if (status)
return 0;
if (rom_desc->Type == desc_type)
return start_address;
start_address = start_address + sizeof(struct ti_i2c_desc)
+ rom_desc->Size;
} while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type);
return 0;
}
| DoS | 0 | static int get_descriptor_addr(struct edgeport_serial *serial,
int desc_type, struct ti_i2c_desc *rom_desc)
{
int start_address;
int status;
/* Search for requested descriptor in I2C */
start_address = 2;
do {
status = read_rom(serial,
start_address,
sizeof(struct ti_i2c_desc),
(__u8 *)rom_desc);
if (status)
return 0;
if (rom_desc->Type == desc_type)
return start_address;
start_address = start_address + sizeof(struct ti_i2c_desc)
+ rom_desc->Size;
} while ((start_address < TI_MAX_I2C_SIZE) && rom_desc->Type);
return 0;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,758 | static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer)
{
int status;
int start_address;
struct ti_i2c_desc *rom_desc;
struct edge_ti_manuf_descriptor *desc;
struct device *dev = &serial->serial->dev->dev;
rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
if (!rom_desc) {
dev_err(dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_ION,
rom_desc);
if (!start_address) {
dev_dbg(dev, "%s - Edge Descriptor not found in I2C\n", __func__);
status = -ENODEV;
goto exit;
}
/* Read the descriptor data */
status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc),
rom_desc->Size, buffer);
if (status)
goto exit;
status = valid_csum(rom_desc, buffer);
desc = (struct edge_ti_manuf_descriptor *)buffer;
dev_dbg(dev, "%s - IonConfig 0x%x\n", __func__, desc->IonConfig);
dev_dbg(dev, "%s - Version %d\n", __func__, desc->Version);
dev_dbg(dev, "%s - Cpu/Board 0x%x\n", __func__, desc->CpuRev_BoardRev);
dev_dbg(dev, "%s - NumPorts %d\n", __func__, desc->NumPorts);
dev_dbg(dev, "%s - NumVirtualPorts %d\n", __func__, desc->NumVirtualPorts);
dev_dbg(dev, "%s - TotalPorts %d\n", __func__, desc->TotalPorts);
exit:
kfree(rom_desc);
return status;
}
| DoS | 0 | static int get_manuf_info(struct edgeport_serial *serial, __u8 *buffer)
{
int status;
int start_address;
struct ti_i2c_desc *rom_desc;
struct edge_ti_manuf_descriptor *desc;
struct device *dev = &serial->serial->dev->dev;
rom_desc = kmalloc(sizeof(*rom_desc), GFP_KERNEL);
if (!rom_desc) {
dev_err(dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
start_address = get_descriptor_addr(serial, I2C_DESC_TYPE_ION,
rom_desc);
if (!start_address) {
dev_dbg(dev, "%s - Edge Descriptor not found in I2C\n", __func__);
status = -ENODEV;
goto exit;
}
/* Read the descriptor data */
status = read_rom(serial, start_address+sizeof(struct ti_i2c_desc),
rom_desc->Size, buffer);
if (status)
goto exit;
status = valid_csum(rom_desc, buffer);
desc = (struct edge_ti_manuf_descriptor *)buffer;
dev_dbg(dev, "%s - IonConfig 0x%x\n", __func__, desc->IonConfig);
dev_dbg(dev, "%s - Version %d\n", __func__, desc->Version);
dev_dbg(dev, "%s - Cpu/Board 0x%x\n", __func__, desc->CpuRev_BoardRev);
dev_dbg(dev, "%s - NumPorts %d\n", __func__, desc->NumPorts);
dev_dbg(dev, "%s - NumVirtualPorts %d\n", __func__, desc->NumVirtualPorts);
dev_dbg(dev, "%s - TotalPorts %d\n", __func__, desc->TotalPorts);
exit:
kfree(rom_desc);
return status;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,759 | static void handle_new_lsr(struct edgeport_port *edge_port, int lsr_data,
__u8 lsr, __u8 data)
{
struct async_icount *icount;
__u8 new_lsr = (__u8)(lsr & (__u8)(LSR_OVER_ERR | LSR_PAR_ERR |
LSR_FRM_ERR | LSR_BREAK));
struct tty_struct *tty;
dev_dbg(&edge_port->port->dev, "%s - %02x\n", __func__, new_lsr);
edge_port->shadow_lsr = lsr;
if (new_lsr & LSR_BREAK)
/*
* Parity and Framing errors only count if they
* occur exclusive of a break being received.
*/
new_lsr &= (__u8)(LSR_OVER_ERR | LSR_BREAK);
/* Place LSR data byte into Rx buffer */
if (lsr_data) {
tty = tty_port_tty_get(&edge_port->port->port);
if (tty) {
edge_tty_recv(&edge_port->port->dev, tty, &data, 1);
tty_kref_put(tty);
}
}
/* update input line counters */
icount = &edge_port->icount;
if (new_lsr & LSR_BREAK)
icount->brk++;
if (new_lsr & LSR_OVER_ERR)
icount->overrun++;
if (new_lsr & LSR_PAR_ERR)
icount->parity++;
if (new_lsr & LSR_FRM_ERR)
icount->frame++;
}
| DoS | 0 | static void handle_new_lsr(struct edgeport_port *edge_port, int lsr_data,
__u8 lsr, __u8 data)
{
struct async_icount *icount;
__u8 new_lsr = (__u8)(lsr & (__u8)(LSR_OVER_ERR | LSR_PAR_ERR |
LSR_FRM_ERR | LSR_BREAK));
struct tty_struct *tty;
dev_dbg(&edge_port->port->dev, "%s - %02x\n", __func__, new_lsr);
edge_port->shadow_lsr = lsr;
if (new_lsr & LSR_BREAK)
/*
* Parity and Framing errors only count if they
* occur exclusive of a break being received.
*/
new_lsr &= (__u8)(LSR_OVER_ERR | LSR_BREAK);
/* Place LSR data byte into Rx buffer */
if (lsr_data) {
tty = tty_port_tty_get(&edge_port->port->port);
if (tty) {
edge_tty_recv(&edge_port->port->dev, tty, &data, 1);
tty_kref_put(tty);
}
}
/* update input line counters */
icount = &edge_port->icount;
if (new_lsr & LSR_BREAK)
icount->brk++;
if (new_lsr & LSR_OVER_ERR)
icount->overrun++;
if (new_lsr & LSR_PAR_ERR)
icount->parity++;
if (new_lsr & LSR_FRM_ERR)
icount->frame++;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,760 | static int i2c_type_bootmode(struct edgeport_serial *serial)
{
struct device *dev = &serial->serial->dev->dev;
int status;
u8 *data;
data = kmalloc(1, GFP_KERNEL);
if (!data) {
dev_err(dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
/* Try to read type 2 */
status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
DTK_ADDR_SPACE_I2C_TYPE_II, 0, data, 0x01);
if (status)
dev_dbg(dev, "%s - read 2 status error = %d\n", __func__, status);
else
dev_dbg(dev, "%s - read 2 data = 0x%x\n", __func__, *data);
if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
dev_dbg(dev, "%s - ROM_TYPE_II\n", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
goto out;
}
/* Try to read type 3 */
status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
DTK_ADDR_SPACE_I2C_TYPE_III, 0, data, 0x01);
if (status)
dev_dbg(dev, "%s - read 3 status error = %d\n", __func__, status);
else
dev_dbg(dev, "%s - read 2 data = 0x%x\n", __func__, *data);
if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
dev_dbg(dev, "%s - ROM_TYPE_III\n", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III;
goto out;
}
dev_dbg(dev, "%s - Unknown\n", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
status = -ENODEV;
out:
kfree(data);
return status;
}
| DoS | 0 | static int i2c_type_bootmode(struct edgeport_serial *serial)
{
struct device *dev = &serial->serial->dev->dev;
int status;
u8 *data;
data = kmalloc(1, GFP_KERNEL);
if (!data) {
dev_err(dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
/* Try to read type 2 */
status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
DTK_ADDR_SPACE_I2C_TYPE_II, 0, data, 0x01);
if (status)
dev_dbg(dev, "%s - read 2 status error = %d\n", __func__, status);
else
dev_dbg(dev, "%s - read 2 data = 0x%x\n", __func__, *data);
if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
dev_dbg(dev, "%s - ROM_TYPE_II\n", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
goto out;
}
/* Try to read type 3 */
status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
DTK_ADDR_SPACE_I2C_TYPE_III, 0, data, 0x01);
if (status)
dev_dbg(dev, "%s - read 3 status error = %d\n", __func__, status);
else
dev_dbg(dev, "%s - read 2 data = 0x%x\n", __func__, *data);
if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
dev_dbg(dev, "%s - ROM_TYPE_III\n", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III;
goto out;
}
dev_dbg(dev, "%s - Unknown\n", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
status = -ENODEV;
out:
kfree(data);
return status;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,761 | static __u8 map_line_status(__u8 ti_lsr)
{
__u8 lsr = 0;
#define MAP_FLAG(flagUmp, flagUart) \
if (ti_lsr & flagUmp) \
lsr |= flagUart;
MAP_FLAG(UMP_UART_LSR_OV_MASK, LSR_OVER_ERR) /* overrun */
MAP_FLAG(UMP_UART_LSR_PE_MASK, LSR_PAR_ERR) /* parity error */
MAP_FLAG(UMP_UART_LSR_FE_MASK, LSR_FRM_ERR) /* framing error */
MAP_FLAG(UMP_UART_LSR_BR_MASK, LSR_BREAK) /* break detected */
MAP_FLAG(UMP_UART_LSR_RX_MASK, LSR_RX_AVAIL) /* rx data available */
MAP_FLAG(UMP_UART_LSR_TX_MASK, LSR_TX_EMPTY) /* tx hold reg empty */
#undef MAP_FLAG
return lsr;
}
| DoS | 0 | static __u8 map_line_status(__u8 ti_lsr)
{
__u8 lsr = 0;
#define MAP_FLAG(flagUmp, flagUart) \
if (ti_lsr & flagUmp) \
lsr |= flagUart;
MAP_FLAG(UMP_UART_LSR_OV_MASK, LSR_OVER_ERR) /* overrun */
MAP_FLAG(UMP_UART_LSR_PE_MASK, LSR_PAR_ERR) /* parity error */
MAP_FLAG(UMP_UART_LSR_FE_MASK, LSR_FRM_ERR) /* framing error */
MAP_FLAG(UMP_UART_LSR_BR_MASK, LSR_BREAK) /* break detected */
MAP_FLAG(UMP_UART_LSR_RX_MASK, LSR_RX_AVAIL) /* rx data available */
MAP_FLAG(UMP_UART_LSR_TX_MASK, LSR_TX_EMPTY) /* tx hold reg empty */
#undef MAP_FLAG
return lsr;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,762 | static int purge_port(struct usb_serial_port *port, __u16 mask)
{
int port_number = port->number - port->serial->minor;
dev_dbg(&port->dev, "%s - port %d, mask %x\n", __func__, port_number, mask);
return send_cmd(port->serial->dev,
UMPC_PURGE_PORT,
(__u8)(UMPM_UART1_PORT + port_number),
mask,
NULL,
0);
}
| DoS | 0 | static int purge_port(struct usb_serial_port *port, __u16 mask)
{
int port_number = port->number - port->serial->minor;
dev_dbg(&port->dev, "%s - port %d, mask %x\n", __func__, port_number, mask);
return send_cmd(port->serial->dev,
UMPC_PURGE_PORT,
(__u8)(UMPM_UART1_PORT + port_number),
mask,
NULL,
0);
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,763 | static int read_boot_mem(struct edgeport_serial *serial,
int start_address, int length, __u8 *buffer)
{
int status = 0;
int i;
for (i = 0; i < length; i++) {
status = ti_vread_sync(serial->serial->dev,
UMPC_MEMORY_READ, serial->TI_I2C_Type,
(__u16)(start_address+i), &buffer[i], 0x01);
if (status) {
dev_dbg(&serial->serial->dev->dev, "%s - ERROR %x\n", __func__, status);
return status;
}
}
dev_dbg(&serial->serial->dev->dev, "%s - start_address = %x, length = %d\n",
__func__, start_address, length);
usb_serial_debug_data(&serial->serial->dev->dev, __func__, length, buffer);
serial->TiReadI2C = 1;
return status;
}
| DoS | 0 | static int read_boot_mem(struct edgeport_serial *serial,
int start_address, int length, __u8 *buffer)
{
int status = 0;
int i;
for (i = 0; i < length; i++) {
status = ti_vread_sync(serial->serial->dev,
UMPC_MEMORY_READ, serial->TI_I2C_Type,
(__u16)(start_address+i), &buffer[i], 0x01);
if (status) {
dev_dbg(&serial->serial->dev->dev, "%s - ERROR %x\n", __func__, status);
return status;
}
}
dev_dbg(&serial->serial->dev->dev, "%s - start_address = %x, length = %d\n",
__func__, start_address, length);
usb_serial_debug_data(&serial->serial->dev->dev, __func__, length, buffer);
serial->TiReadI2C = 1;
return status;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,764 | static int read_download_mem(struct usb_device *dev, int start_address,
int length, __u8 address_type, __u8 *buffer)
{
int status = 0;
__u8 read_length;
__be16 be_start_address;
dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length);
/* Read in blocks of 64 bytes
* (TI firmware can't handle more than 64 byte reads)
*/
while (length) {
if (length > 64)
read_length = 64;
else
read_length = (__u8)length;
if (read_length > 1) {
dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length);
}
be_start_address = cpu_to_be16(start_address);
status = ti_vread_sync(dev, UMPC_MEMORY_READ,
(__u16)address_type,
(__force __u16)be_start_address,
buffer, read_length);
if (status) {
dev_dbg(&dev->dev, "%s - ERROR %x\n", __func__, status);
return status;
}
if (read_length > 1)
usb_serial_debug_data(&dev->dev, __func__, read_length, buffer);
/* Update pointers/length */
start_address += read_length;
buffer += read_length;
length -= read_length;
}
return status;
}
| DoS | 0 | static int read_download_mem(struct usb_device *dev, int start_address,
int length, __u8 address_type, __u8 *buffer)
{
int status = 0;
__u8 read_length;
__be16 be_start_address;
dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, length);
/* Read in blocks of 64 bytes
* (TI firmware can't handle more than 64 byte reads)
*/
while (length) {
if (length > 64)
read_length = 64;
else
read_length = (__u8)length;
if (read_length > 1) {
dev_dbg(&dev->dev, "%s - @ %x for %d\n", __func__, start_address, read_length);
}
be_start_address = cpu_to_be16(start_address);
status = ti_vread_sync(dev, UMPC_MEMORY_READ,
(__u16)address_type,
(__force __u16)be_start_address,
buffer, read_length);
if (status) {
dev_dbg(&dev->dev, "%s - ERROR %x\n", __func__, status);
return status;
}
if (read_length > 1)
usb_serial_debug_data(&dev->dev, __func__, read_length, buffer);
/* Update pointers/length */
start_address += read_length;
buffer += read_length;
length -= read_length;
}
return status;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,765 | static int read_ram(struct usb_device *dev, int start_address,
int length, __u8 *buffer)
{
return read_download_mem(dev, start_address, length,
DTK_ADDR_SPACE_XDATA, buffer);
}
| DoS | 0 | static int read_ram(struct usb_device *dev, int start_address,
int length, __u8 *buffer)
{
return read_download_mem(dev, start_address, length,
DTK_ADDR_SPACE_XDATA, buffer);
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,766 | static int read_rom(struct edgeport_serial *serial,
int start_address, int length, __u8 *buffer)
{
int status;
if (serial->product_info.TiMode == TI_MODE_DOWNLOAD) {
status = read_download_mem(serial->serial->dev,
start_address,
length,
serial->TI_I2C_Type,
buffer);
} else {
status = read_boot_mem(serial, start_address, length,
buffer);
}
return status;
}
| DoS | 0 | static int read_rom(struct edgeport_serial *serial,
int start_address, int length, __u8 *buffer)
{
int status;
if (serial->product_info.TiMode == TI_MODE_DOWNLOAD) {
status = read_download_mem(serial->serial->dev,
start_address,
length,
serial->TI_I2C_Type,
buffer);
} else {
status = read_boot_mem(serial, start_address, length,
buffer);
}
return status;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,767 | static int restore_mcr(struct edgeport_port *port, __u8 mcr)
{
int status = 0;
dev_dbg(&port->port->dev, "%s - %x\n", __func__, mcr);
status = ti_do_config(port, UMPC_SET_CLR_DTR, mcr & MCR_DTR);
if (status)
return status;
status = ti_do_config(port, UMPC_SET_CLR_RTS, mcr & MCR_RTS);
if (status)
return status;
return ti_do_config(port, UMPC_SET_CLR_LOOPBACK, mcr & MCR_LOOPBACK);
}
| DoS | 0 | static int restore_mcr(struct edgeport_port *port, __u8 mcr)
{
int status = 0;
dev_dbg(&port->port->dev, "%s - %x\n", __func__, mcr);
status = ti_do_config(port, UMPC_SET_CLR_DTR, mcr & MCR_DTR);
if (status)
return status;
status = ti_do_config(port, UMPC_SET_CLR_RTS, mcr & MCR_RTS);
if (status)
return status;
return ti_do_config(port, UMPC_SET_CLR_LOOPBACK, mcr & MCR_LOOPBACK);
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,768 | static int send_cmd(struct usb_device *dev, __u8 command,
__u8 moduleid, __u16 value, u8 *data,
int size)
{
return ti_vsend_sync(dev, command, value, moduleid, data, size);
}
| DoS | 0 | static int send_cmd(struct usb_device *dev, __u8 command,
__u8 moduleid, __u16 value, u8 *data,
int size)
{
return ti_vsend_sync(dev, command, value, moduleid, data, size);
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,769 | static void stop_read(struct edgeport_port *edge_port)
{
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING)
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPING;
edge_port->shadow_mcr &= ~MCR_RTS;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
}
| DoS | 0 | static void stop_read(struct edgeport_port *edge_port)
{
unsigned long flags;
spin_lock_irqsave(&edge_port->ep_lock, flags);
if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING)
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPING;
edge_port->shadow_mcr &= ~MCR_RTS;
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,770 | static ssize_t store_uart_mode(struct device *dev,
struct device_attribute *attr, const char *valbuf, size_t count)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int v = simple_strtoul(valbuf, NULL, 0);
dev_dbg(dev, "%s: setting uart_mode = %d\n", __func__, v);
if (v < 256)
edge_port->bUartMode = v;
else
dev_err(dev, "%s - uart_mode %d is invalid\n", __func__, v);
return count;
}
| DoS | 0 | static ssize_t store_uart_mode(struct device *dev,
struct device_attribute *attr, const char *valbuf, size_t count)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct edgeport_port *edge_port = usb_get_serial_port_data(port);
unsigned int v = simple_strtoul(valbuf, NULL, 0);
dev_dbg(dev, "%s: setting uart_mode = %d\n", __func__, v);
if (v < 256)
edge_port->bUartMode = v;
else
dev_err(dev, "%s - uart_mode %d is invalid\n", __func__, v);
return count;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,771 | static int ti_cpu_rev(struct edge_ti_manuf_descriptor *desc)
{
return TI_GET_CPU_REVISION(desc->CpuRev_BoardRev);
}
| DoS | 0 | static int ti_cpu_rev(struct edge_ti_manuf_descriptor *desc)
{
return TI_GET_CPU_REVISION(desc->CpuRev_BoardRev);
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,772 | static int tx_active(struct edgeport_port *port)
{
int status;
struct out_endpoint_desc_block *oedb;
__u8 *lsr;
int bytes_left = 0;
oedb = kmalloc(sizeof(*oedb), GFP_KERNEL);
if (!oedb) {
dev_err(&port->port->dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
lsr = kmalloc(1, GFP_KERNEL); /* Sigh, that's right, just one byte,
as not all platforms can do DMA
from stack */
if (!lsr) {
kfree(oedb);
return -ENOMEM;
}
/* Read the DMA Count Registers */
status = read_ram(port->port->serial->dev, port->dma_address,
sizeof(*oedb), (void *)oedb);
if (status)
goto exit_is_tx_active;
dev_dbg(&port->port->dev, "%s - XByteCount 0x%X\n", __func__, oedb->XByteCount);
/* and the LSR */
status = read_ram(port->port->serial->dev,
port->uart_base + UMPMEM_OFFS_UART_LSR, 1, lsr);
if (status)
goto exit_is_tx_active;
dev_dbg(&port->port->dev, "%s - LSR = 0x%X\n", __func__, *lsr);
/* If either buffer has data or we are transmitting then return TRUE */
if ((oedb->XByteCount & 0x80) != 0)
bytes_left += 64;
if ((*lsr & UMP_UART_LSR_TX_MASK) == 0)
bytes_left += 1;
/* We return Not Active if we get any kind of error */
exit_is_tx_active:
dev_dbg(&port->port->dev, "%s - return %d\n", __func__, bytes_left);
kfree(lsr);
kfree(oedb);
return bytes_left;
}
| DoS | 0 | static int tx_active(struct edgeport_port *port)
{
int status;
struct out_endpoint_desc_block *oedb;
__u8 *lsr;
int bytes_left = 0;
oedb = kmalloc(sizeof(*oedb), GFP_KERNEL);
if (!oedb) {
dev_err(&port->port->dev, "%s - out of memory\n", __func__);
return -ENOMEM;
}
lsr = kmalloc(1, GFP_KERNEL); /* Sigh, that's right, just one byte,
as not all platforms can do DMA
from stack */
if (!lsr) {
kfree(oedb);
return -ENOMEM;
}
/* Read the DMA Count Registers */
status = read_ram(port->port->serial->dev, port->dma_address,
sizeof(*oedb), (void *)oedb);
if (status)
goto exit_is_tx_active;
dev_dbg(&port->port->dev, "%s - XByteCount 0x%X\n", __func__, oedb->XByteCount);
/* and the LSR */
status = read_ram(port->port->serial->dev,
port->uart_base + UMPMEM_OFFS_UART_LSR, 1, lsr);
if (status)
goto exit_is_tx_active;
dev_dbg(&port->port->dev, "%s - LSR = 0x%X\n", __func__, *lsr);
/* If either buffer has data or we are transmitting then return TRUE */
if ((oedb->XByteCount & 0x80) != 0)
bytes_left += 64;
if ((*lsr & UMP_UART_LSR_TX_MASK) == 0)
bytes_left += 1;
/* We return Not Active if we get any kind of error */
exit_is_tx_active:
dev_dbg(&port->port->dev, "%s - return %d\n", __func__, bytes_left);
kfree(lsr);
kfree(oedb);
return bytes_left;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,773 | static int write_i2c_mem(struct edgeport_serial *serial,
int start_address, int length, __u8 address_type, __u8 *buffer)
{
struct device *dev = &serial->serial->dev->dev;
int status = 0;
int write_length;
__be16 be_start_address;
/* We can only send a maximum of 1 aligned byte page at a time */
/* calculate the number of bytes left in the first page */
write_length = EPROM_PAGE_SIZE -
(start_address & (EPROM_PAGE_SIZE - 1));
if (write_length > length)
write_length = length;
dev_dbg(dev, "%s - BytesInFirstPage Addr = %x, length = %d\n",
__func__, start_address, write_length);
usb_serial_debug_data(dev, __func__, write_length, buffer);
/* Write first page */
be_start_address = cpu_to_be16(start_address);
status = ti_vsend_sync(serial->serial->dev,
UMPC_MEMORY_WRITE, (__u16)address_type,
(__force __u16)be_start_address,
buffer, write_length);
if (status) {
dev_dbg(dev, "%s - ERROR %d\n", __func__, status);
return status;
}
length -= write_length;
start_address += write_length;
buffer += write_length;
/* We should be aligned now -- can write
max page size bytes at a time */
while (length) {
if (length > EPROM_PAGE_SIZE)
write_length = EPROM_PAGE_SIZE;
else
write_length = length;
dev_dbg(dev, "%s - Page Write Addr = %x, length = %d\n",
__func__, start_address, write_length);
usb_serial_debug_data(dev, __func__, write_length, buffer);
/* Write next page */
be_start_address = cpu_to_be16(start_address);
status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
(__u16)address_type,
(__force __u16)be_start_address,
buffer, write_length);
if (status) {
dev_err(dev, "%s - ERROR %d\n", __func__, status);
return status;
}
length -= write_length;
start_address += write_length;
buffer += write_length;
}
return status;
}
| DoS | 0 | static int write_i2c_mem(struct edgeport_serial *serial,
int start_address, int length, __u8 address_type, __u8 *buffer)
{
struct device *dev = &serial->serial->dev->dev;
int status = 0;
int write_length;
__be16 be_start_address;
/* We can only send a maximum of 1 aligned byte page at a time */
/* calculate the number of bytes left in the first page */
write_length = EPROM_PAGE_SIZE -
(start_address & (EPROM_PAGE_SIZE - 1));
if (write_length > length)
write_length = length;
dev_dbg(dev, "%s - BytesInFirstPage Addr = %x, length = %d\n",
__func__, start_address, write_length);
usb_serial_debug_data(dev, __func__, write_length, buffer);
/* Write first page */
be_start_address = cpu_to_be16(start_address);
status = ti_vsend_sync(serial->serial->dev,
UMPC_MEMORY_WRITE, (__u16)address_type,
(__force __u16)be_start_address,
buffer, write_length);
if (status) {
dev_dbg(dev, "%s - ERROR %d\n", __func__, status);
return status;
}
length -= write_length;
start_address += write_length;
buffer += write_length;
/* We should be aligned now -- can write
max page size bytes at a time */
while (length) {
if (length > EPROM_PAGE_SIZE)
write_length = EPROM_PAGE_SIZE;
else
write_length = length;
dev_dbg(dev, "%s - Page Write Addr = %x, length = %d\n",
__func__, start_address, write_length);
usb_serial_debug_data(dev, __func__, write_length, buffer);
/* Write next page */
be_start_address = cpu_to_be16(start_address);
status = ti_vsend_sync(serial->serial->dev, UMPC_MEMORY_WRITE,
(__u16)address_type,
(__force __u16)be_start_address,
buffer, write_length);
if (status) {
dev_err(dev, "%s - ERROR %d\n", __func__, status);
return status;
}
length -= write_length;
start_address += write_length;
buffer += write_length;
}
return status;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,774 | static int write_rom(struct edgeport_serial *serial, int start_address,
int length, __u8 *buffer)
{
if (serial->product_info.TiMode == TI_MODE_BOOT)
return write_boot_mem(serial, start_address, length,
buffer);
if (serial->product_info.TiMode == TI_MODE_DOWNLOAD)
return write_i2c_mem(serial, start_address, length,
serial->TI_I2C_Type, buffer);
return -EINVAL;
}
| DoS | 0 | static int write_rom(struct edgeport_serial *serial, int start_address,
int length, __u8 *buffer)
{
if (serial->product_info.TiMode == TI_MODE_BOOT)
return write_boot_mem(serial, start_address, length,
buffer);
if (serial->product_info.TiMode == TI_MODE_DOWNLOAD)
return write_i2c_mem(serial, start_address, length,
serial->TI_I2C_Type, buffer);
return -EINVAL;
}
| @@ -530,6 +530,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout,
wait_queue_t wait;
unsigned long flags;
+ if (!tty)
+ return;
+
if (!timeout)
timeout = (HZ * EDGE_CLOSING_WAIT)/100;
| CWE-264 | null | null |
26,775 | hv_kvp_init(struct hv_util_service *srv)
{
int err;
err = cn_add_callback(&kvp_id, kvp_name, kvp_cn_callback);
if (err)
return err;
recv_buffer = srv->recv_buffer;
return 0;
}
| DoS Overflow +Priv | 0 | hv_kvp_init(struct hv_util_service *srv)
{
int err;
err = cn_add_callback(&kvp_id, kvp_name, kvp_cn_callback);
if (err)
return err;
recv_buffer = srv->recv_buffer;
return 0;
}
| @@ -212,11 +212,13 @@ kvp_respond_to_host(char *key, char *value, int error)
* The windows host expects the key/value pair to be encoded
* in utf16.
*/
- keylen = utf8s_to_utf16s(key_name, strlen(key_name),
- (wchar_t *)kvp_data->data.key);
+ keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN,
+ (wchar_t *) kvp_data->data.key,
+ HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2);
kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */
- valuelen = utf8s_to_utf16s(value, strlen(value),
- (wchar_t *)kvp_data->data.value);
+ valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN,
+ (wchar_t *) kvp_data->data.value,
+ HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2);
kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */
kvp_data->data.value_type = REG_SZ; /* all our values are strings */ | CWE-119 | null | null |
26,776 | kvp_register(void)
{
struct cn_msg *msg;
msg = kzalloc(sizeof(*msg) + strlen(HV_DRV_VERSION) + 1 , GFP_ATOMIC);
if (msg) {
msg->id.idx = CN_KVP_IDX;
msg->id.val = CN_KVP_VAL;
msg->seq = KVP_REGISTER;
strcpy(msg->data, HV_DRV_VERSION);
msg->len = strlen(HV_DRV_VERSION) + 1;
cn_netlink_send(msg, 0, GFP_ATOMIC);
kfree(msg);
}
}
| DoS Overflow +Priv | 0 | kvp_register(void)
{
struct cn_msg *msg;
msg = kzalloc(sizeof(*msg) + strlen(HV_DRV_VERSION) + 1 , GFP_ATOMIC);
if (msg) {
msg->id.idx = CN_KVP_IDX;
msg->id.val = CN_KVP_VAL;
msg->seq = KVP_REGISTER;
strcpy(msg->data, HV_DRV_VERSION);
msg->len = strlen(HV_DRV_VERSION) + 1;
cn_netlink_send(msg, 0, GFP_ATOMIC);
kfree(msg);
}
}
| @@ -212,11 +212,13 @@ kvp_respond_to_host(char *key, char *value, int error)
* The windows host expects the key/value pair to be encoded
* in utf16.
*/
- keylen = utf8s_to_utf16s(key_name, strlen(key_name),
- (wchar_t *)kvp_data->data.key);
+ keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN,
+ (wchar_t *) kvp_data->data.key,
+ HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2);
kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */
- valuelen = utf8s_to_utf16s(value, strlen(value),
- (wchar_t *)kvp_data->data.value);
+ valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN,
+ (wchar_t *) kvp_data->data.value,
+ HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2);
kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */
kvp_data->data.value_type = REG_SZ; /* all our values are strings */ | CWE-119 | null | null |
26,777 | kvp_send_key(struct work_struct *dummy)
{
struct cn_msg *msg;
int index = kvp_transaction.index;
msg = kzalloc(sizeof(*msg) + sizeof(struct hv_kvp_msg) , GFP_ATOMIC);
if (msg) {
msg->id.idx = CN_KVP_IDX;
msg->id.val = CN_KVP_VAL;
msg->seq = KVP_KERNEL_GET;
((struct hv_ku_msg *)msg->data)->kvp_index = index;
msg->len = sizeof(struct hv_ku_msg);
cn_netlink_send(msg, 0, GFP_ATOMIC);
kfree(msg);
}
return;
}
| DoS Overflow +Priv | 0 | kvp_send_key(struct work_struct *dummy)
{
struct cn_msg *msg;
int index = kvp_transaction.index;
msg = kzalloc(sizeof(*msg) + sizeof(struct hv_kvp_msg) , GFP_ATOMIC);
if (msg) {
msg->id.idx = CN_KVP_IDX;
msg->id.val = CN_KVP_VAL;
msg->seq = KVP_KERNEL_GET;
((struct hv_ku_msg *)msg->data)->kvp_index = index;
msg->len = sizeof(struct hv_ku_msg);
cn_netlink_send(msg, 0, GFP_ATOMIC);
kfree(msg);
}
return;
}
| @@ -212,11 +212,13 @@ kvp_respond_to_host(char *key, char *value, int error)
* The windows host expects the key/value pair to be encoded
* in utf16.
*/
- keylen = utf8s_to_utf16s(key_name, strlen(key_name),
- (wchar_t *)kvp_data->data.key);
+ keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN,
+ (wchar_t *) kvp_data->data.key,
+ HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2);
kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */
- valuelen = utf8s_to_utf16s(value, strlen(value),
- (wchar_t *)kvp_data->data.value);
+ valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN,
+ (wchar_t *) kvp_data->data.value,
+ HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2);
kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */
kvp_data->data.value_type = REG_SZ; /* all our values are strings */ | CWE-119 | null | null |
26,778 | static unsigned int __vfat_striptail_len(unsigned int len, const char *name)
{
while (len && name[len - 1] == '.')
len--;
return len;
}
| DoS Overflow +Priv | 0 | static unsigned int __vfat_striptail_len(unsigned int len, const char *name)
{
while (len && name[len - 1] == '.')
len--;
return len;
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,779 | static void __exit exit_vfat_fs(void)
{
unregister_filesystem(&vfat_fs_type);
}
| DoS Overflow +Priv | 0 | static void __exit exit_vfat_fs(void)
{
unregister_filesystem(&vfat_fs_type);
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,780 | static void setup(struct super_block *sb)
{
MSDOS_SB(sb)->dir_ops = &vfat_dir_inode_operations;
if (MSDOS_SB(sb)->options.name_check != 's')
sb->s_d_op = &vfat_ci_dentry_ops;
else
sb->s_d_op = &vfat_dentry_ops;
}
| DoS Overflow +Priv | 0 | static void setup(struct super_block *sb)
{
MSDOS_SB(sb)->dir_ops = &vfat_dir_inode_operations;
if (MSDOS_SB(sb)->options.name_check != 's')
sb->s_d_op = &vfat_ci_dentry_ops;
else
sb->s_d_op = &vfat_dentry_ops;
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,781 | static int vfat_add_entry(struct inode *dir, struct qstr *qname, int is_dir,
int cluster, struct timespec *ts,
struct fat_slot_info *sinfo)
{
struct msdos_dir_slot *slots;
unsigned int len;
int err, nr_slots;
len = vfat_striptail_len(qname);
if (len == 0)
return -ENOENT;
slots = kmalloc(sizeof(*slots) * MSDOS_SLOTS, GFP_NOFS);
if (slots == NULL)
return -ENOMEM;
err = vfat_build_slots(dir, qname->name, len, is_dir, cluster, ts,
slots, &nr_slots);
if (err)
goto cleanup;
err = fat_add_entries(dir, slots, nr_slots, sinfo);
if (err)
goto cleanup;
/* update timestamp */
dir->i_ctime = dir->i_mtime = dir->i_atime = *ts;
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
mark_inode_dirty(dir);
cleanup:
kfree(slots);
return err;
}
| DoS Overflow +Priv | 0 | static int vfat_add_entry(struct inode *dir, struct qstr *qname, int is_dir,
int cluster, struct timespec *ts,
struct fat_slot_info *sinfo)
{
struct msdos_dir_slot *slots;
unsigned int len;
int err, nr_slots;
len = vfat_striptail_len(qname);
if (len == 0)
return -ENOENT;
slots = kmalloc(sizeof(*slots) * MSDOS_SLOTS, GFP_NOFS);
if (slots == NULL)
return -ENOMEM;
err = vfat_build_slots(dir, qname->name, len, is_dir, cluster, ts,
slots, &nr_slots);
if (err)
goto cleanup;
err = fat_add_entries(dir, slots, nr_slots, sinfo);
if (err)
goto cleanup;
/* update timestamp */
dir->i_ctime = dir->i_mtime = dir->i_atime = *ts;
if (IS_DIRSYNC(dir))
(void)fat_sync_inode(dir);
else
mark_inode_dirty(dir);
cleanup:
kfree(slots);
return err;
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,782 | static int vfat_build_slots(struct inode *dir, const unsigned char *name,
int len, int is_dir, int cluster,
struct timespec *ts,
struct msdos_dir_slot *slots, int *nr_slots)
{
struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb);
struct fat_mount_options *opts = &sbi->options;
struct msdos_dir_slot *ps;
struct msdos_dir_entry *de;
unsigned char cksum, lcase;
unsigned char msdos_name[MSDOS_NAME];
wchar_t *uname;
__le16 time, date;
u8 time_cs;
int err, ulen, usize, i;
loff_t offset;
*nr_slots = 0;
uname = __getname();
if (!uname)
return -ENOMEM;
err = xlate_to_uni(name, len, (unsigned char *)uname, &ulen, &usize,
opts->unicode_xlate, opts->utf8, sbi->nls_io);
if (err)
goto out_free;
err = vfat_is_used_badchars(uname, ulen);
if (err)
goto out_free;
err = vfat_create_shortname(dir, sbi->nls_disk, uname, ulen,
msdos_name, &lcase);
if (err < 0)
goto out_free;
else if (err == 1) {
de = (struct msdos_dir_entry *)slots;
err = 0;
goto shortname;
}
/* build the entry of long file name */
cksum = fat_checksum(msdos_name);
*nr_slots = usize / 13;
for (ps = slots, i = *nr_slots; i > 0; i--, ps++) {
ps->id = i;
ps->attr = ATTR_EXT;
ps->reserved = 0;
ps->alias_checksum = cksum;
ps->start = 0;
offset = (i - 1) * 13;
fatwchar_to16(ps->name0_4, uname + offset, 5);
fatwchar_to16(ps->name5_10, uname + offset + 5, 6);
fatwchar_to16(ps->name11_12, uname + offset + 11, 2);
}
slots[0].id |= 0x40;
de = (struct msdos_dir_entry *)ps;
shortname:
/* build the entry of 8.3 alias name */
(*nr_slots)++;
memcpy(de->name, msdos_name, MSDOS_NAME);
de->attr = is_dir ? ATTR_DIR : ATTR_ARCH;
de->lcase = lcase;
fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
de->time = de->ctime = time;
de->date = de->cdate = de->adate = date;
de->ctime_cs = time_cs;
de->start = cpu_to_le16(cluster);
de->starthi = cpu_to_le16(cluster >> 16);
de->size = 0;
out_free:
__putname(uname);
return err;
}
| DoS Overflow +Priv | 0 | static int vfat_build_slots(struct inode *dir, const unsigned char *name,
int len, int is_dir, int cluster,
struct timespec *ts,
struct msdos_dir_slot *slots, int *nr_slots)
{
struct msdos_sb_info *sbi = MSDOS_SB(dir->i_sb);
struct fat_mount_options *opts = &sbi->options;
struct msdos_dir_slot *ps;
struct msdos_dir_entry *de;
unsigned char cksum, lcase;
unsigned char msdos_name[MSDOS_NAME];
wchar_t *uname;
__le16 time, date;
u8 time_cs;
int err, ulen, usize, i;
loff_t offset;
*nr_slots = 0;
uname = __getname();
if (!uname)
return -ENOMEM;
err = xlate_to_uni(name, len, (unsigned char *)uname, &ulen, &usize,
opts->unicode_xlate, opts->utf8, sbi->nls_io);
if (err)
goto out_free;
err = vfat_is_used_badchars(uname, ulen);
if (err)
goto out_free;
err = vfat_create_shortname(dir, sbi->nls_disk, uname, ulen,
msdos_name, &lcase);
if (err < 0)
goto out_free;
else if (err == 1) {
de = (struct msdos_dir_entry *)slots;
err = 0;
goto shortname;
}
/* build the entry of long file name */
cksum = fat_checksum(msdos_name);
*nr_slots = usize / 13;
for (ps = slots, i = *nr_slots; i > 0; i--, ps++) {
ps->id = i;
ps->attr = ATTR_EXT;
ps->reserved = 0;
ps->alias_checksum = cksum;
ps->start = 0;
offset = (i - 1) * 13;
fatwchar_to16(ps->name0_4, uname + offset, 5);
fatwchar_to16(ps->name5_10, uname + offset + 5, 6);
fatwchar_to16(ps->name11_12, uname + offset + 11, 2);
}
slots[0].id |= 0x40;
de = (struct msdos_dir_entry *)ps;
shortname:
/* build the entry of 8.3 alias name */
(*nr_slots)++;
memcpy(de->name, msdos_name, MSDOS_NAME);
de->attr = is_dir ? ATTR_DIR : ATTR_ARCH;
de->lcase = lcase;
fat_time_unix2fat(sbi, ts, &time, &date, &time_cs);
de->time = de->ctime = time;
de->date = de->cdate = de->adate = date;
de->ctime_cs = time_cs;
de->start = cpu_to_le16(cluster);
de->starthi = cpu_to_le16(cluster >> 16);
de->size = 0;
out_free:
__putname(uname);
return err;
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,783 | static int vfat_cmpi(const struct dentry *parent, const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name)
{
struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io;
unsigned int alen, blen;
/* A filename cannot end in '.' or we treat it like it has none */
alen = vfat_striptail_len(name);
blen = __vfat_striptail_len(len, str);
if (alen == blen) {
if (nls_strnicmp(t, name->name, str, alen) == 0)
return 0;
}
return 1;
}
| DoS Overflow +Priv | 0 | static int vfat_cmpi(const struct dentry *parent, const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name)
{
struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io;
unsigned int alen, blen;
/* A filename cannot end in '.' or we treat it like it has none */
alen = vfat_striptail_len(name);
blen = __vfat_striptail_len(len, str);
if (alen == blen) {
if (nls_strnicmp(t, name->name, str, alen) == 0)
return 0;
}
return 1;
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,784 | static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
wchar_t *uname, int ulen,
unsigned char *name_res, unsigned char *lcase)
{
struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options;
wchar_t *ip, *ext_start, *end, *name_start;
unsigned char base[9], ext[4], buf[5], *p;
unsigned char charbuf[NLS_MAX_CHARSET_SIZE];
int chl, chi;
int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
int is_shortname;
struct shortname_info base_info, ext_info;
is_shortname = 1;
INIT_SHORTNAME_INFO(&base_info);
INIT_SHORTNAME_INFO(&ext_info);
/* Now, we need to create a shortname from the long name */
ext_start = end = &uname[ulen];
while (--ext_start >= uname) {
if (*ext_start == 0x002E) { /* is `.' */
if (ext_start == end - 1) {
sz = ulen;
ext_start = NULL;
}
break;
}
}
if (ext_start == uname - 1) {
sz = ulen;
ext_start = NULL;
} else if (ext_start) {
/*
* Names which start with a dot could be just
* an extension eg. "...test". In this case Win95
* uses the extension as the name and sets no extension.
*/
name_start = &uname[0];
while (name_start < ext_start) {
if (!vfat_skip_char(*name_start))
break;
name_start++;
}
if (name_start != ext_start) {
sz = ext_start - uname;
ext_start++;
} else {
sz = ulen;
ext_start = NULL;
}
}
numtail_baselen = 6;
numtail2_baselen = 2;
for (baselen = i = 0, p = base, ip = uname; i < sz; i++, ip++) {
chl = to_shortname_char(nls, charbuf, sizeof(charbuf),
ip, &base_info);
if (chl == 0)
continue;
if (baselen < 2 && (baselen + chl) > 2)
numtail2_baselen = baselen;
if (baselen < 6 && (baselen + chl) > 6)
numtail_baselen = baselen;
for (chi = 0; chi < chl; chi++) {
*p++ = charbuf[chi];
baselen++;
if (baselen >= 8)
break;
}
if (baselen >= 8) {
if ((chi < chl - 1) || (ip + 1) - uname < sz)
is_shortname = 0;
break;
}
}
if (baselen == 0) {
return -EINVAL;
}
extlen = 0;
if (ext_start) {
for (p = ext, ip = ext_start; extlen < 3 && ip < end; ip++) {
chl = to_shortname_char(nls, charbuf, sizeof(charbuf),
ip, &ext_info);
if (chl == 0)
continue;
if ((extlen + chl) > 3) {
is_shortname = 0;
break;
}
for (chi = 0; chi < chl; chi++) {
*p++ = charbuf[chi];
extlen++;
}
if (extlen >= 3) {
if (ip + 1 != end)
is_shortname = 0;
break;
}
}
}
ext[extlen] = '\0';
base[baselen] = '\0';
/* Yes, it can happen. ".\xe5" would do it. */
if (base[0] == DELETED_FLAG)
base[0] = 0x05;
/* OK, at this point we know that base is not longer than 8 symbols,
* ext is not longer than 3, base is nonempty, both don't contain
* any bad symbols (lowercase transformed to uppercase).
*/
memset(name_res, ' ', MSDOS_NAME);
memcpy(name_res, base, baselen);
memcpy(name_res + 8, ext, extlen);
*lcase = 0;
if (is_shortname && base_info.valid && ext_info.valid) {
if (vfat_find_form(dir, name_res) == 0)
return -EEXIST;
if (opts->shortname & VFAT_SFN_CREATE_WIN95) {
return (base_info.upper && ext_info.upper);
} else if (opts->shortname & VFAT_SFN_CREATE_WINNT) {
if ((base_info.upper || base_info.lower) &&
(ext_info.upper || ext_info.lower)) {
if (!base_info.upper && base_info.lower)
*lcase |= CASE_LOWER_BASE;
if (!ext_info.upper && ext_info.lower)
*lcase |= CASE_LOWER_EXT;
return 1;
}
return 0;
} else {
BUG();
}
}
if (opts->numtail == 0)
if (vfat_find_form(dir, name_res) < 0)
return 0;
/*
* Try to find a unique extension. This used to
* iterate through all possibilities sequentially,
* but that gave extremely bad performance. Windows
* only tries a few cases before using random
* values for part of the base.
*/
if (baselen > 6) {
baselen = numtail_baselen;
name_res[7] = ' ';
}
name_res[baselen] = '~';
for (i = 1; i < 10; i++) {
name_res[baselen + 1] = i + '0';
if (vfat_find_form(dir, name_res) < 0)
return 0;
}
i = jiffies;
sz = (jiffies >> 16) & 0x7;
if (baselen > 2) {
baselen = numtail2_baselen;
name_res[7] = ' ';
}
name_res[baselen + 4] = '~';
name_res[baselen + 5] = '1' + sz;
while (1) {
snprintf(buf, sizeof(buf), "%04X", i & 0xffff);
memcpy(&name_res[baselen], buf, 4);
if (vfat_find_form(dir, name_res) < 0)
break;
i -= 11;
}
return 0;
}
| DoS Overflow +Priv | 0 | static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
wchar_t *uname, int ulen,
unsigned char *name_res, unsigned char *lcase)
{
struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options;
wchar_t *ip, *ext_start, *end, *name_start;
unsigned char base[9], ext[4], buf[5], *p;
unsigned char charbuf[NLS_MAX_CHARSET_SIZE];
int chl, chi;
int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
int is_shortname;
struct shortname_info base_info, ext_info;
is_shortname = 1;
INIT_SHORTNAME_INFO(&base_info);
INIT_SHORTNAME_INFO(&ext_info);
/* Now, we need to create a shortname from the long name */
ext_start = end = &uname[ulen];
while (--ext_start >= uname) {
if (*ext_start == 0x002E) { /* is `.' */
if (ext_start == end - 1) {
sz = ulen;
ext_start = NULL;
}
break;
}
}
if (ext_start == uname - 1) {
sz = ulen;
ext_start = NULL;
} else if (ext_start) {
/*
* Names which start with a dot could be just
* an extension eg. "...test". In this case Win95
* uses the extension as the name and sets no extension.
*/
name_start = &uname[0];
while (name_start < ext_start) {
if (!vfat_skip_char(*name_start))
break;
name_start++;
}
if (name_start != ext_start) {
sz = ext_start - uname;
ext_start++;
} else {
sz = ulen;
ext_start = NULL;
}
}
numtail_baselen = 6;
numtail2_baselen = 2;
for (baselen = i = 0, p = base, ip = uname; i < sz; i++, ip++) {
chl = to_shortname_char(nls, charbuf, sizeof(charbuf),
ip, &base_info);
if (chl == 0)
continue;
if (baselen < 2 && (baselen + chl) > 2)
numtail2_baselen = baselen;
if (baselen < 6 && (baselen + chl) > 6)
numtail_baselen = baselen;
for (chi = 0; chi < chl; chi++) {
*p++ = charbuf[chi];
baselen++;
if (baselen >= 8)
break;
}
if (baselen >= 8) {
if ((chi < chl - 1) || (ip + 1) - uname < sz)
is_shortname = 0;
break;
}
}
if (baselen == 0) {
return -EINVAL;
}
extlen = 0;
if (ext_start) {
for (p = ext, ip = ext_start; extlen < 3 && ip < end; ip++) {
chl = to_shortname_char(nls, charbuf, sizeof(charbuf),
ip, &ext_info);
if (chl == 0)
continue;
if ((extlen + chl) > 3) {
is_shortname = 0;
break;
}
for (chi = 0; chi < chl; chi++) {
*p++ = charbuf[chi];
extlen++;
}
if (extlen >= 3) {
if (ip + 1 != end)
is_shortname = 0;
break;
}
}
}
ext[extlen] = '\0';
base[baselen] = '\0';
/* Yes, it can happen. ".\xe5" would do it. */
if (base[0] == DELETED_FLAG)
base[0] = 0x05;
/* OK, at this point we know that base is not longer than 8 symbols,
* ext is not longer than 3, base is nonempty, both don't contain
* any bad symbols (lowercase transformed to uppercase).
*/
memset(name_res, ' ', MSDOS_NAME);
memcpy(name_res, base, baselen);
memcpy(name_res + 8, ext, extlen);
*lcase = 0;
if (is_shortname && base_info.valid && ext_info.valid) {
if (vfat_find_form(dir, name_res) == 0)
return -EEXIST;
if (opts->shortname & VFAT_SFN_CREATE_WIN95) {
return (base_info.upper && ext_info.upper);
} else if (opts->shortname & VFAT_SFN_CREATE_WINNT) {
if ((base_info.upper || base_info.lower) &&
(ext_info.upper || ext_info.lower)) {
if (!base_info.upper && base_info.lower)
*lcase |= CASE_LOWER_BASE;
if (!ext_info.upper && ext_info.lower)
*lcase |= CASE_LOWER_EXT;
return 1;
}
return 0;
} else {
BUG();
}
}
if (opts->numtail == 0)
if (vfat_find_form(dir, name_res) < 0)
return 0;
/*
* Try to find a unique extension. This used to
* iterate through all possibilities sequentially,
* but that gave extremely bad performance. Windows
* only tries a few cases before using random
* values for part of the base.
*/
if (baselen > 6) {
baselen = numtail_baselen;
name_res[7] = ' ';
}
name_res[baselen] = '~';
for (i = 1; i < 10; i++) {
name_res[baselen + 1] = i + '0';
if (vfat_find_form(dir, name_res) < 0)
return 0;
}
i = jiffies;
sz = (jiffies >> 16) & 0x7;
if (baselen > 2) {
baselen = numtail2_baselen;
name_res[7] = ' ';
}
name_res[baselen + 4] = '~';
name_res[baselen + 5] = '1' + sz;
while (1) {
snprintf(buf, sizeof(buf), "%04X", i & 0xffff);
memcpy(&name_res[baselen], buf, 4);
if (vfat_find_form(dir, name_res) < 0)
break;
i -= 11;
}
return 0;
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,785 | static int vfat_d_anon_disconn(struct dentry *dentry)
{
return IS_ROOT(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED);
}
| DoS Overflow +Priv | 0 | static int vfat_d_anon_disconn(struct dentry *dentry)
{
return IS_ROOT(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED);
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,786 | static int vfat_fill_super(struct super_block *sb, void *data, int silent)
{
return fat_fill_super(sb, data, silent, 1, setup);
}
| DoS Overflow +Priv | 0 | static int vfat_fill_super(struct super_block *sb, void *data, int silent)
{
return fat_fill_super(sb, data, silent, 1, setup);
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,787 | static int vfat_find(struct inode *dir, struct qstr *qname,
struct fat_slot_info *sinfo)
{
unsigned int len = vfat_striptail_len(qname);
if (len == 0)
return -ENOENT;
return fat_search_long(dir, qname->name, len, sinfo);
}
| DoS Overflow +Priv | 0 | static int vfat_find(struct inode *dir, struct qstr *qname,
struct fat_slot_info *sinfo)
{
unsigned int len = vfat_striptail_len(qname);
if (len == 0)
return -ENOENT;
return fat_search_long(dir, qname->name, len, sinfo);
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,788 | static int vfat_find_form(struct inode *dir, unsigned char *name)
{
struct fat_slot_info sinfo;
int err = fat_scan(dir, name, &sinfo);
if (err)
return -ENOENT;
brelse(sinfo.bh);
return 0;
}
| DoS Overflow +Priv | 0 | static int vfat_find_form(struct inode *dir, unsigned char *name)
{
struct fat_slot_info sinfo;
int err = fat_scan(dir, name, &sinfo);
if (err)
return -ENOENT;
brelse(sinfo.bh);
return 0;
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,789 | static int vfat_hashi(const struct dentry *dentry, const struct inode *inode,
struct qstr *qstr)
{
struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io;
const unsigned char *name;
unsigned int len;
unsigned long hash;
name = qstr->name;
len = vfat_striptail_len(qstr);
hash = init_name_hash();
while (len--)
hash = partial_name_hash(nls_tolower(t, *name++), hash);
qstr->hash = end_name_hash(hash);
return 0;
}
| DoS Overflow +Priv | 0 | static int vfat_hashi(const struct dentry *dentry, const struct inode *inode,
struct qstr *qstr)
{
struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io;
const unsigned char *name;
unsigned int len;
unsigned long hash;
name = qstr->name;
len = vfat_striptail_len(qstr);
hash = init_name_hash();
while (len--)
hash = partial_name_hash(nls_tolower(t, *name++), hash);
qstr->hash = end_name_hash(hash);
return 0;
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,790 | static inline int vfat_is_used_badchars(const wchar_t *s, int len)
{
int i;
for (i = 0; i < len; i++)
if (vfat_bad_char(s[i]))
return -EINVAL;
if (s[i - 1] == ' ') /* last character cannot be space */
return -EINVAL;
return 0;
}
| DoS Overflow +Priv | 0 | static inline int vfat_is_used_badchars(const wchar_t *s, int len)
{
int i;
for (i = 0; i < len; i++)
if (vfat_bad_char(s[i]))
return -EINVAL;
if (s[i - 1] == ' ') /* last character cannot be space */
return -EINVAL;
return 0;
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,791 | static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
struct inode *inode;
struct dentry *alias;
int err;
lock_super(sb);
err = vfat_find(dir, &dentry->d_name, &sinfo);
if (err) {
if (err == -ENOENT) {
inode = NULL;
goto out;
}
goto error;
}
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(sinfo.bh);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto error;
}
alias = d_find_alias(inode);
if (alias && !vfat_d_anon_disconn(alias)) {
/*
* This inode has non anonymous-DCACHE_DISCONNECTED
* dentry. This means, the user did ->lookup() by an
* another name (longname vs 8.3 alias of it) in past.
*
* Switch to new one for reason of locality if possible.
*/
BUG_ON(d_unhashed(alias));
if (!S_ISDIR(inode->i_mode))
d_move(alias, dentry);
iput(inode);
unlock_super(sb);
return alias;
} else
dput(alias);
out:
unlock_super(sb);
dentry->d_time = dentry->d_parent->d_inode->i_version;
dentry = d_splice_alias(inode, dentry);
if (dentry)
dentry->d_time = dentry->d_parent->d_inode->i_version;
return dentry;
error:
unlock_super(sb);
return ERR_PTR(err);
}
| DoS Overflow +Priv | 0 | static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
struct inode *inode;
struct dentry *alias;
int err;
lock_super(sb);
err = vfat_find(dir, &dentry->d_name, &sinfo);
if (err) {
if (err == -ENOENT) {
inode = NULL;
goto out;
}
goto error;
}
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(sinfo.bh);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
goto error;
}
alias = d_find_alias(inode);
if (alias && !vfat_d_anon_disconn(alias)) {
/*
* This inode has non anonymous-DCACHE_DISCONNECTED
* dentry. This means, the user did ->lookup() by an
* another name (longname vs 8.3 alias of it) in past.
*
* Switch to new one for reason of locality if possible.
*/
BUG_ON(d_unhashed(alias));
if (!S_ISDIR(inode->i_mode))
d_move(alias, dentry);
iput(inode);
unlock_super(sb);
return alias;
} else
dput(alias);
out:
unlock_super(sb);
dentry->d_time = dentry->d_parent->d_inode->i_version;
dentry = d_splice_alias(inode, dentry);
if (dentry)
dentry->d_time = dentry->d_parent->d_inode->i_version;
return dentry;
error:
unlock_super(sb);
return ERR_PTR(err);
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,792 | static int vfat_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
struct fat_slot_info sinfo;
struct timespec ts;
int err, cluster;
lock_super(sb);
ts = CURRENT_TIME_SEC;
cluster = fat_alloc_new_dir(dir, &ts);
if (cluster < 0) {
err = cluster;
goto out;
}
err = vfat_add_entry(dir, &dentry->d_name, 1, cluster, &ts, &sinfo);
if (err)
goto out_free;
dir->i_version++;
inc_nlink(dir);
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(sinfo.bh);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
/* the directory was completed, just return a error */
goto out;
}
inode->i_version++;
set_nlink(inode, 2);
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
dentry->d_time = dentry->d_parent->d_inode->i_version;
d_instantiate(dentry, inode);
unlock_super(sb);
return 0;
out_free:
fat_free_clusters(dir, cluster);
out:
unlock_super(sb);
return err;
}
| DoS Overflow +Priv | 0 | static int vfat_mkdir(struct inode *dir, struct dentry *dentry, int mode)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
struct fat_slot_info sinfo;
struct timespec ts;
int err, cluster;
lock_super(sb);
ts = CURRENT_TIME_SEC;
cluster = fat_alloc_new_dir(dir, &ts);
if (cluster < 0) {
err = cluster;
goto out;
}
err = vfat_add_entry(dir, &dentry->d_name, 1, cluster, &ts, &sinfo);
if (err)
goto out_free;
dir->i_version++;
inc_nlink(dir);
inode = fat_build_inode(sb, sinfo.de, sinfo.i_pos);
brelse(sinfo.bh);
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
/* the directory was completed, just return a error */
goto out;
}
inode->i_version++;
set_nlink(inode, 2);
inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
/* timestamp is already written, so mark_inode_dirty() is unneeded. */
dentry->d_time = dentry->d_parent->d_inode->i_version;
d_instantiate(dentry, inode);
unlock_super(sb);
return 0;
out_free:
fat_free_clusters(dir, cluster);
out:
unlock_super(sb);
return err;
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,793 | static struct dentry *vfat_mount(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, vfat_fill_super);
}
| DoS Overflow +Priv | 0 | static struct dentry *vfat_mount(struct file_system_type *fs_type,
int flags, const char *dev_name,
void *data)
{
return mount_bdev(fs_type, flags, dev_name, data, vfat_fill_super);
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,794 | static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct buffer_head *dotdot_bh;
struct msdos_dir_entry *dotdot_de;
struct inode *old_inode, *new_inode;
struct fat_slot_info old_sinfo, sinfo;
struct timespec ts;
loff_t dotdot_i_pos, new_i_pos;
int err, is_dir, update_dotdot, corrupt = 0;
struct super_block *sb = old_dir->i_sb;
old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
old_inode = old_dentry->d_inode;
new_inode = new_dentry->d_inode;
lock_super(sb);
err = vfat_find(old_dir, &old_dentry->d_name, &old_sinfo);
if (err)
goto out;
is_dir = S_ISDIR(old_inode->i_mode);
update_dotdot = (is_dir && old_dir != new_dir);
if (update_dotdot) {
if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de,
&dotdot_i_pos) < 0) {
err = -EIO;
goto out;
}
}
ts = CURRENT_TIME_SEC;
if (new_inode) {
if (is_dir) {
err = fat_dir_empty(new_inode);
if (err)
goto out;
}
new_i_pos = MSDOS_I(new_inode)->i_pos;
fat_detach(new_inode);
} else {
err = vfat_add_entry(new_dir, &new_dentry->d_name, is_dir, 0,
&ts, &sinfo);
if (err)
goto out;
new_i_pos = sinfo.i_pos;
}
new_dir->i_version++;
fat_detach(old_inode);
fat_attach(old_inode, new_i_pos);
if (IS_DIRSYNC(new_dir)) {
err = fat_sync_inode(old_inode);
if (err)
goto error_inode;
} else
mark_inode_dirty(old_inode);
if (update_dotdot) {
int start = MSDOS_I(new_dir)->i_logstart;
dotdot_de->start = cpu_to_le16(start);
dotdot_de->starthi = cpu_to_le16(start >> 16);
mark_buffer_dirty_inode(dotdot_bh, old_inode);
if (IS_DIRSYNC(new_dir)) {
err = sync_dirty_buffer(dotdot_bh);
if (err)
goto error_dotdot;
}
drop_nlink(old_dir);
if (!new_inode)
inc_nlink(new_dir);
}
err = fat_remove_entries(old_dir, &old_sinfo); /* and releases bh */
old_sinfo.bh = NULL;
if (err)
goto error_dotdot;
old_dir->i_version++;
old_dir->i_ctime = old_dir->i_mtime = ts;
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
else
mark_inode_dirty(old_dir);
if (new_inode) {
drop_nlink(new_inode);
if (is_dir)
drop_nlink(new_inode);
new_inode->i_ctime = ts;
}
out:
brelse(sinfo.bh);
brelse(dotdot_bh);
brelse(old_sinfo.bh);
unlock_super(sb);
return err;
error_dotdot:
/* data cluster is shared, serious corruption */
corrupt = 1;
if (update_dotdot) {
int start = MSDOS_I(old_dir)->i_logstart;
dotdot_de->start = cpu_to_le16(start);
dotdot_de->starthi = cpu_to_le16(start >> 16);
mark_buffer_dirty_inode(dotdot_bh, old_inode);
corrupt |= sync_dirty_buffer(dotdot_bh);
}
error_inode:
fat_detach(old_inode);
fat_attach(old_inode, old_sinfo.i_pos);
if (new_inode) {
fat_attach(new_inode, new_i_pos);
if (corrupt)
corrupt |= fat_sync_inode(new_inode);
} else {
/*
* If new entry was not sharing the data cluster, it
* shouldn't be serious corruption.
*/
int err2 = fat_remove_entries(new_dir, &sinfo);
if (corrupt)
corrupt |= err2;
sinfo.bh = NULL;
}
if (corrupt < 0) {
fat_fs_error(new_dir->i_sb,
"%s: Filesystem corrupted (i_pos %lld)",
__func__, sinfo.i_pos);
}
goto out;
}
| DoS Overflow +Priv | 0 | static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct buffer_head *dotdot_bh;
struct msdos_dir_entry *dotdot_de;
struct inode *old_inode, *new_inode;
struct fat_slot_info old_sinfo, sinfo;
struct timespec ts;
loff_t dotdot_i_pos, new_i_pos;
int err, is_dir, update_dotdot, corrupt = 0;
struct super_block *sb = old_dir->i_sb;
old_sinfo.bh = sinfo.bh = dotdot_bh = NULL;
old_inode = old_dentry->d_inode;
new_inode = new_dentry->d_inode;
lock_super(sb);
err = vfat_find(old_dir, &old_dentry->d_name, &old_sinfo);
if (err)
goto out;
is_dir = S_ISDIR(old_inode->i_mode);
update_dotdot = (is_dir && old_dir != new_dir);
if (update_dotdot) {
if (fat_get_dotdot_entry(old_inode, &dotdot_bh, &dotdot_de,
&dotdot_i_pos) < 0) {
err = -EIO;
goto out;
}
}
ts = CURRENT_TIME_SEC;
if (new_inode) {
if (is_dir) {
err = fat_dir_empty(new_inode);
if (err)
goto out;
}
new_i_pos = MSDOS_I(new_inode)->i_pos;
fat_detach(new_inode);
} else {
err = vfat_add_entry(new_dir, &new_dentry->d_name, is_dir, 0,
&ts, &sinfo);
if (err)
goto out;
new_i_pos = sinfo.i_pos;
}
new_dir->i_version++;
fat_detach(old_inode);
fat_attach(old_inode, new_i_pos);
if (IS_DIRSYNC(new_dir)) {
err = fat_sync_inode(old_inode);
if (err)
goto error_inode;
} else
mark_inode_dirty(old_inode);
if (update_dotdot) {
int start = MSDOS_I(new_dir)->i_logstart;
dotdot_de->start = cpu_to_le16(start);
dotdot_de->starthi = cpu_to_le16(start >> 16);
mark_buffer_dirty_inode(dotdot_bh, old_inode);
if (IS_DIRSYNC(new_dir)) {
err = sync_dirty_buffer(dotdot_bh);
if (err)
goto error_dotdot;
}
drop_nlink(old_dir);
if (!new_inode)
inc_nlink(new_dir);
}
err = fat_remove_entries(old_dir, &old_sinfo); /* and releases bh */
old_sinfo.bh = NULL;
if (err)
goto error_dotdot;
old_dir->i_version++;
old_dir->i_ctime = old_dir->i_mtime = ts;
if (IS_DIRSYNC(old_dir))
(void)fat_sync_inode(old_dir);
else
mark_inode_dirty(old_dir);
if (new_inode) {
drop_nlink(new_inode);
if (is_dir)
drop_nlink(new_inode);
new_inode->i_ctime = ts;
}
out:
brelse(sinfo.bh);
brelse(dotdot_bh);
brelse(old_sinfo.bh);
unlock_super(sb);
return err;
error_dotdot:
/* data cluster is shared, serious corruption */
corrupt = 1;
if (update_dotdot) {
int start = MSDOS_I(old_dir)->i_logstart;
dotdot_de->start = cpu_to_le16(start);
dotdot_de->starthi = cpu_to_le16(start >> 16);
mark_buffer_dirty_inode(dotdot_bh, old_inode);
corrupt |= sync_dirty_buffer(dotdot_bh);
}
error_inode:
fat_detach(old_inode);
fat_attach(old_inode, old_sinfo.i_pos);
if (new_inode) {
fat_attach(new_inode, new_i_pos);
if (corrupt)
corrupt |= fat_sync_inode(new_inode);
} else {
/*
* If new entry was not sharing the data cluster, it
* shouldn't be serious corruption.
*/
int err2 = fat_remove_entries(new_dir, &sinfo);
if (corrupt)
corrupt |= err2;
sinfo.bh = NULL;
}
if (corrupt < 0) {
fat_fs_error(new_dir->i_sb,
"%s: Filesystem corrupted (i_pos %lld)",
__func__, sinfo.i_pos);
}
goto out;
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,795 | static inline wchar_t vfat_replace_char(wchar_t w)
{
return (w == '[') || (w == ']') || (w == ';') || (w == ',')
|| (w == '+') || (w == '=');
}
| DoS Overflow +Priv | 0 | static inline wchar_t vfat_replace_char(wchar_t w)
{
return (w == '[') || (w == ']') || (w == ';') || (w == ',')
|| (w == '+') || (w == '=');
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,796 | static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
{
if (nd && nd->flags & LOOKUP_RCU)
return -ECHILD;
/* This is not negative dentry. Always valid. */
if (dentry->d_inode)
return 1;
return vfat_revalidate_shortname(dentry);
}
| DoS Overflow +Priv | 0 | static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
{
if (nd && nd->flags & LOOKUP_RCU)
return -ECHILD;
/* This is not negative dentry. Always valid. */
if (dentry->d_inode)
return 1;
return vfat_revalidate_shortname(dentry);
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,797 | static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
{
if (nd && nd->flags & LOOKUP_RCU)
return -ECHILD;
/*
* This is not negative dentry. Always valid.
*
* Note, rename() to existing directory entry will have ->d_inode,
* and will use existing name which isn't specified name by user.
*
* We may be able to drop this positive dentry here. But dropping
* positive dentry isn't good idea. So it's unsupported like
* rename("filename", "FILENAME") for now.
*/
if (dentry->d_inode)
return 1;
/*
* This may be nfsd (or something), anyway, we can't see the
* intent of this. So, since this can be for creation, drop it.
*/
if (!nd)
return 0;
/*
* Drop the negative dentry, in order to make sure to use the
* case sensitive name which is specified by user if this is
* for creation.
*/
if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
return vfat_revalidate_shortname(dentry);
}
| DoS Overflow +Priv | 0 | static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
{
if (nd && nd->flags & LOOKUP_RCU)
return -ECHILD;
/*
* This is not negative dentry. Always valid.
*
* Note, rename() to existing directory entry will have ->d_inode,
* and will use existing name which isn't specified name by user.
*
* We may be able to drop this positive dentry here. But dropping
* positive dentry isn't good idea. So it's unsupported like
* rename("filename", "FILENAME") for now.
*/
if (dentry->d_inode)
return 1;
/*
* This may be nfsd (or something), anyway, we can't see the
* intent of this. So, since this can be for creation, drop it.
*/
if (!nd)
return 0;
/*
* Drop the negative dentry, in order to make sure to use the
* case sensitive name which is specified by user if this is
* for creation.
*/
if (nd->flags & (LOOKUP_CREATE | LOOKUP_RENAME_TARGET))
return 0;
return vfat_revalidate_shortname(dentry);
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,798 | static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
int err;
lock_super(sb);
err = fat_dir_empty(inode);
if (err)
goto out;
err = vfat_find(dir, &dentry->d_name, &sinfo);
if (err)
goto out;
err = fat_remove_entries(dir, &sinfo); /* and releases bh */
if (err)
goto out;
drop_nlink(dir);
clear_nlink(inode);
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
fat_detach(inode);
out:
unlock_super(sb);
return err;
}
| DoS Overflow +Priv | 0 | static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
struct super_block *sb = dir->i_sb;
struct fat_slot_info sinfo;
int err;
lock_super(sb);
err = fat_dir_empty(inode);
if (err)
goto out;
err = vfat_find(dir, &dentry->d_name, &sinfo);
if (err)
goto out;
err = fat_remove_entries(dir, &sinfo); /* and releases bh */
if (err)
goto out;
drop_nlink(dir);
clear_nlink(inode);
inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
fat_detach(inode);
out:
unlock_super(sb);
return err;
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
26,799 | static unsigned int vfat_striptail_len(const struct qstr *qstr)
{
return __vfat_striptail_len(qstr->len, qstr->name);
}
| DoS Overflow +Priv | 0 | static unsigned int vfat_striptail_len(const struct qstr *qstr)
{
return __vfat_striptail_len(qstr->len, qstr->name);
}
| @@ -512,7 +512,8 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
int charlen;
if (utf8) {
- *outlen = utf8s_to_utf16s(name, len, (wchar_t *)outname);
+ *outlen = utf8s_to_utf16s(name, len, UTF16_HOST_ENDIAN,
+ (wchar_t *) outname, FAT_LFN_LEN + 2);
if (*outlen < 0)
return *outlen;
else if (*outlen > FAT_LFN_LEN) | CWE-119 | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.