idx
int64
func_before
string
Vulnerability Classification
string
vul
int64
func_after
string
patch
string
CWE ID
string
lines_before
string
lines_after
string
18,100
static int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) { int cpu, orig_cpu, this_cpu, success = 0; unsigned long flags; unsigned long en_flags = ENQUEUE_WAKEUP; struct rq *rq; this_cpu = get_cpu(); smp_wmb(); rq = task_rq_lock(p, &flags); if (!(p->state & state)) goto out; if (p->se.on_rq) goto out_running; cpu = task_cpu(p); orig_cpu = cpu; #ifdef CONFIG_SMP if (unlikely(task_running(rq, p))) goto out_activate; /* * In order to handle concurrent wakeups and release the rq->lock * we put the task in TASK_WAKING state. * * First fix up the nr_uninterruptible count: */ if (task_contributes_to_load(p)) { if (likely(cpu_online(orig_cpu))) rq->nr_uninterruptible--; else this_rq()->nr_uninterruptible--; } p->state = TASK_WAKING; if (p->sched_class->task_waking) { p->sched_class->task_waking(rq, p); en_flags |= ENQUEUE_WAKING; } cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); if (cpu != orig_cpu) set_task_cpu(p, cpu); __task_rq_unlock(rq); rq = cpu_rq(cpu); raw_spin_lock(&rq->lock); /* * We migrated the task without holding either rq->lock, however * since the task is not on the task list itself, nobody else * will try and migrate the task, hence the rq should match the * cpu we just moved it to. */ WARN_ON(task_cpu(p) != cpu); WARN_ON(p->state != TASK_WAKING); #ifdef CONFIG_SCHEDSTATS schedstat_inc(rq, ttwu_count); if (cpu == this_cpu) schedstat_inc(rq, ttwu_local); else { struct sched_domain *sd; for_each_domain(this_cpu, sd) { if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { schedstat_inc(sd, ttwu_wake_remote); break; } } } #endif /* CONFIG_SCHEDSTATS */ out_activate: #endif /* CONFIG_SMP */ ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu, cpu == this_cpu, en_flags); success = 1; out_running: ttwu_post_activation(p, rq, wake_flags, success); out: task_rq_unlock(rq, &flags); put_cpu(); return success; }
DoS Exec Code
0
static int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) { int cpu, orig_cpu, this_cpu, success = 0; unsigned long flags; unsigned long en_flags = ENQUEUE_WAKEUP; struct rq *rq; this_cpu = get_cpu(); smp_wmb(); rq = task_rq_lock(p, &flags); if (!(p->state & state)) goto out; if (p->se.on_rq) goto out_running; cpu = task_cpu(p); orig_cpu = cpu; #ifdef CONFIG_SMP if (unlikely(task_running(rq, p))) goto out_activate; /* * In order to handle concurrent wakeups and release the rq->lock * we put the task in TASK_WAKING state. * * First fix up the nr_uninterruptible count: */ if (task_contributes_to_load(p)) { if (likely(cpu_online(orig_cpu))) rq->nr_uninterruptible--; else this_rq()->nr_uninterruptible--; } p->state = TASK_WAKING; if (p->sched_class->task_waking) { p->sched_class->task_waking(rq, p); en_flags |= ENQUEUE_WAKING; } cpu = select_task_rq(rq, p, SD_BALANCE_WAKE, wake_flags); if (cpu != orig_cpu) set_task_cpu(p, cpu); __task_rq_unlock(rq); rq = cpu_rq(cpu); raw_spin_lock(&rq->lock); /* * We migrated the task without holding either rq->lock, however * since the task is not on the task list itself, nobody else * will try and migrate the task, hence the rq should match the * cpu we just moved it to. */ WARN_ON(task_cpu(p) != cpu); WARN_ON(p->state != TASK_WAKING); #ifdef CONFIG_SCHEDSTATS schedstat_inc(rq, ttwu_count); if (cpu == this_cpu) schedstat_inc(rq, ttwu_local); else { struct sched_domain *sd; for_each_domain(this_cpu, sd) { if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { schedstat_inc(sd, ttwu_wake_remote); break; } } } #endif /* CONFIG_SCHEDSTATS */ out_activate: #endif /* CONFIG_SMP */ ttwu_activate(p, rq, wake_flags & WF_SYNC, orig_cpu != cpu, cpu == this_cpu, en_flags); success = 1; out_running: ttwu_post_activation(p, rq, wake_flags, success); out: task_rq_unlock(rq, &flags); put_cpu(); return success; }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,101
bool try_wait_for_completion(struct completion *x) { unsigned long flags; int ret = 1; spin_lock_irqsave(&x->wait.lock, flags); if (!x->done) ret = 0; else x->done--; spin_unlock_irqrestore(&x->wait.lock, flags); return ret; }
DoS Exec Code
0
bool try_wait_for_completion(struct completion *x) { unsigned long flags; int ret = 1; spin_lock_irqsave(&x->wait.lock, flags); if (!x->done) ret = 0; else x->done--; spin_unlock_irqrestore(&x->wait.lock, flags); return ret; }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,102
static inline void ttwu_activate(struct task_struct *p, struct rq *rq, bool is_sync, bool is_migrate, bool is_local, unsigned long en_flags) { schedstat_inc(p, se.statistics.nr_wakeups); if (is_sync) schedstat_inc(p, se.statistics.nr_wakeups_sync); if (is_migrate) schedstat_inc(p, se.statistics.nr_wakeups_migrate); if (is_local) schedstat_inc(p, se.statistics.nr_wakeups_local); else schedstat_inc(p, se.statistics.nr_wakeups_remote); activate_task(rq, p, en_flags); }
DoS Exec Code
0
static inline void ttwu_activate(struct task_struct *p, struct rq *rq, bool is_sync, bool is_migrate, bool is_local, unsigned long en_flags) { schedstat_inc(p, se.statistics.nr_wakeups); if (is_sync) schedstat_inc(p, se.statistics.nr_wakeups_sync); if (is_migrate) schedstat_inc(p, se.statistics.nr_wakeups_migrate); if (is_local) schedstat_inc(p, se.statistics.nr_wakeups_local); else schedstat_inc(p, se.statistics.nr_wakeups_remote); activate_task(rq, p, en_flags); }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,103
static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, int wake_flags, bool success) { trace_sched_wakeup(p, success); check_preempt_curr(rq, p, wake_flags); p->state = TASK_RUNNING; #ifdef CONFIG_SMP if (p->sched_class->task_woken) p->sched_class->task_woken(rq, p); if (unlikely(rq->idle_stamp)) { u64 delta = rq->clock - rq->idle_stamp; u64 max = 2*sysctl_sched_migration_cost; if (delta > max) rq->avg_idle = max; else update_avg(&rq->avg_idle, delta); rq->idle_stamp = 0; } #endif /* if a worker is waking up, notify workqueue */ if ((p->flags & PF_WQ_WORKER) && success) wq_worker_waking_up(p, cpu_of(rq)); }
DoS Exec Code
0
static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, int wake_flags, bool success) { trace_sched_wakeup(p, success); check_preempt_curr(rq, p, wake_flags); p->state = TASK_RUNNING; #ifdef CONFIG_SMP if (p->sched_class->task_woken) p->sched_class->task_woken(rq, p); if (unlikely(rq->idle_stamp)) { u64 delta = rq->clock - rq->idle_stamp; u64 max = 2*sysctl_sched_migration_cost; if (delta > max) rq->avg_idle = max; else update_avg(&rq->avg_idle, delta); rq->idle_stamp = 0; } #endif /* if a worker is waking up, notify workqueue */ if ((p->flags & PF_WQ_WORKER) && success) wq_worker_waking_up(p, cpu_of(rq)); }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,104
static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) { list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list); }
DoS Exec Code
0
static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) { list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list); }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,105
static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) { list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list); }
DoS Exec Code
0
static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) { list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list); }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,106
static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) { }
DoS Exec Code
0
static inline void unregister_rt_sched_group(struct task_group *tg, int cpu) { }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,107
static void unregister_sched_domain_sysctl(void) { if (sd_sysctl_header) unregister_sysctl_table(sd_sysctl_header); sd_sysctl_header = NULL; if (sd_ctl_dir[0].child) sd_free_ctl_entry(&sd_ctl_dir[0].child); }
DoS Exec Code
0
static void unregister_sched_domain_sysctl(void) { if (sd_sysctl_header) unregister_sysctl_table(sd_sysctl_header); sd_sysctl_header = NULL; if (sd_ctl_dir[0].child) sd_free_ctl_entry(&sd_ctl_dir[0].child); }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,108
static void unregister_sched_domain_sysctl(void) { }
DoS Exec Code
0
static void unregister_sched_domain_sysctl(void) { }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,109
static void update_cpu_load(struct rq *this_rq) { unsigned long this_load = this_rq->load.weight; unsigned long curr_jiffies = jiffies; unsigned long pending_updates; int i, scale; this_rq->nr_load_updates++; /* Avoid repeated calls on same jiffy, when moving in and out of idle */ if (curr_jiffies == this_rq->last_load_update_tick) return; pending_updates = curr_jiffies - this_rq->last_load_update_tick; this_rq->last_load_update_tick = curr_jiffies; /* Update our load: */ this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { unsigned long old_load, new_load; /* scale is effectively 1 << i now, and >> i divides by scale */ old_load = this_rq->cpu_load[i]; old_load = decay_load_missed(old_load, pending_updates - 1, i); new_load = this_load; /* * Round up the averaging division if load is increasing. This * prevents us from getting stuck on 9 if the load is 10, for * example. */ if (new_load > old_load) new_load += scale - 1; this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; } sched_avg_update(this_rq); }
DoS Exec Code
0
static void update_cpu_load(struct rq *this_rq) { unsigned long this_load = this_rq->load.weight; unsigned long curr_jiffies = jiffies; unsigned long pending_updates; int i, scale; this_rq->nr_load_updates++; /* Avoid repeated calls on same jiffy, when moving in and out of idle */ if (curr_jiffies == this_rq->last_load_update_tick) return; pending_updates = curr_jiffies - this_rq->last_load_update_tick; this_rq->last_load_update_tick = curr_jiffies; /* Update our load: */ this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */ for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) { unsigned long old_load, new_load; /* scale is effectively 1 << i now, and >> i divides by scale */ old_load = this_rq->cpu_load[i]; old_load = decay_load_missed(old_load, pending_updates - 1, i); new_load = this_load; /* * Round up the averaging division if load is increasing. This * prevents us from getting stuck on 9 if the load is 10, for * example. */ if (new_load > old_load) new_load += scale - 1; this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i; } sched_avg_update(this_rq); }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,110
static void update_cpu_load_active(struct rq *this_rq) { update_cpu_load(this_rq); calc_load_account_active(this_rq); }
DoS Exec Code
0
static void update_cpu_load_active(struct rq *this_rq) { update_cpu_load(this_rq); calc_load_account_active(this_rq); }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,111
static void update_group_shares_cpu(struct task_group *tg, int cpu, unsigned long sd_shares, unsigned long sd_rq_weight, unsigned long *usd_rq_weight) { unsigned long shares, rq_weight; int boost = 0; rq_weight = usd_rq_weight[cpu]; if (!rq_weight) { boost = 1; rq_weight = NICE_0_LOAD; } /* * \Sum_j shares_j * rq_weight_i * shares_i = ----------------------------- * \Sum_j rq_weight_j */ shares = (sd_shares * rq_weight) / sd_rq_weight; shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); if (abs(shares - tg->se[cpu]->load.weight) > sysctl_sched_shares_thresh) { struct rq *rq = cpu_rq(cpu); unsigned long flags; raw_spin_lock_irqsave(&rq->lock, flags); tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; tg->cfs_rq[cpu]->shares = boost ? 0 : shares; __set_se_shares(tg->se[cpu], shares); raw_spin_unlock_irqrestore(&rq->lock, flags); } }
DoS Exec Code
0
static void update_group_shares_cpu(struct task_group *tg, int cpu, unsigned long sd_shares, unsigned long sd_rq_weight, unsigned long *usd_rq_weight) { unsigned long shares, rq_weight; int boost = 0; rq_weight = usd_rq_weight[cpu]; if (!rq_weight) { boost = 1; rq_weight = NICE_0_LOAD; } /* * \Sum_j shares_j * rq_weight_i * shares_i = ----------------------------- * \Sum_j rq_weight_j */ shares = (sd_shares * rq_weight) / sd_rq_weight; shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); if (abs(shares - tg->se[cpu]->load.weight) > sysctl_sched_shares_thresh) { struct rq *rq = cpu_rq(cpu); unsigned long flags; raw_spin_lock_irqsave(&rq->lock, flags); tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight; tg->cfs_rq[cpu]->shares = boost ? 0 : shares; __set_se_shares(tg->se[cpu], shares); raw_spin_unlock_irqrestore(&rq->lock, flags); } }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,112
static inline void update_load_add(struct load_weight *lw, unsigned long inc) { lw->weight += inc; lw->inv_weight = 0; }
DoS Exec Code
0
static inline void update_load_add(struct load_weight *lw, unsigned long inc) { lw->weight += inc; lw->inv_weight = 0; }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,113
static inline void update_load_sub(struct load_weight *lw, unsigned long dec) { lw->weight -= dec; lw->inv_weight = 0; }
DoS Exec Code
0
static inline void update_load_sub(struct load_weight *lw, unsigned long dec) { lw->weight -= dec; lw->inv_weight = 0; }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,114
static void update_shares(struct sched_domain *sd) { s64 elapsed; u64 now; if (root_task_group_empty()) return; now = local_clock(); elapsed = now - sd->last_update; if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { sd->last_update = now; walk_tg_tree(tg_nop, tg_shares_up, sd); } }
DoS Exec Code
0
static void update_shares(struct sched_domain *sd) { s64 elapsed; u64 now; if (root_task_group_empty()) return; now = local_clock(); elapsed = now - sd->last_update; if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { sd->last_update = now; walk_tg_tree(tg_nop, tg_shares_up, sd); } }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,115
static inline void update_shares(struct sched_domain *sd) { }
DoS Exec Code
0
static inline void update_shares(struct sched_domain *sd) { }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,116
static void update_sysctl(void) { unsigned int factor = get_update_sysctl_factor(); #define SET_SYSCTL(name) \ (sysctl_##name = (factor) * normalized_sysctl_##name) SET_SYSCTL(sched_min_granularity); SET_SYSCTL(sched_latency); SET_SYSCTL(sched_wakeup_granularity); SET_SYSCTL(sched_shares_ratelimit); #undef SET_SYSCTL }
DoS Exec Code
0
static void update_sysctl(void) { unsigned int factor = get_update_sysctl_factor(); #define SET_SYSCTL(name) \ (sysctl_##name = (factor) * normalized_sysctl_##name) SET_SYSCTL(sched_min_granularity); SET_SYSCTL(sched_latency); SET_SYSCTL(sched_wakeup_granularity); SET_SYSCTL(sched_shares_ratelimit); #undef SET_SYSCTL }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,117
wait_for_common(struct completion *x, long timeout, int state) { might_sleep(); spin_lock_irq(&x->wait.lock); timeout = do_wait_for_common(x, timeout, state); spin_unlock_irq(&x->wait.lock); return timeout; }
DoS Exec Code
0
wait_for_common(struct completion *x, long timeout, int state) { might_sleep(); spin_lock_irq(&x->wait.lock); timeout = do_wait_for_common(x, timeout, state); spin_unlock_irq(&x->wait.lock); return timeout; }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,118
int __sched wait_for_completion_interruptible(struct completion *x) { long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); if (t == -ERESTARTSYS) return t; return 0; }
DoS Exec Code
0
int __sched wait_for_completion_interruptible(struct completion *x) { long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); if (t == -ERESTARTSYS) return t; return 0; }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,119
wait_for_completion_interruptible_timeout(struct completion *x, unsigned long timeout) { return wait_for_common(x, timeout, TASK_INTERRUPTIBLE); }
DoS Exec Code
0
wait_for_completion_interruptible_timeout(struct completion *x, unsigned long timeout) { return wait_for_common(x, timeout, TASK_INTERRUPTIBLE); }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,120
int __sched wait_for_completion_killable(struct completion *x) { long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); if (t == -ERESTARTSYS) return t; return 0; }
DoS Exec Code
0
int __sched wait_for_completion_killable(struct completion *x) { long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); if (t == -ERESTARTSYS) return t; return 0; }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,121
wait_for_completion_timeout(struct completion *x, unsigned long timeout) { return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); }
DoS Exec Code
0
wait_for_completion_timeout(struct completion *x, unsigned long timeout) { return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE); }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,122
void wake_up_idle_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); if (cpu == smp_processor_id()) return; /* * This is safe, as this function is called with the timer * wheel base lock of (cpu) held. When the CPU is on the way * to idle and has not yet set rq->curr to idle then it will * be serialized on the timer wheel base lock and take the new * timer into account automatically. */ if (rq->curr != rq->idle) return; /* * We can set TIF_RESCHED on the idle task of the other CPU * lockless. The worst case is that the other CPU runs the * idle task through an additional NOOP schedule() */ set_tsk_need_resched(rq->idle); /* NEED_RESCHED must be visible before we test polling */ smp_mb(); if (!tsk_is_polling(rq->idle)) smp_send_reschedule(cpu); }
DoS Exec Code
0
void wake_up_idle_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); if (cpu == smp_processor_id()) return; /* * This is safe, as this function is called with the timer * wheel base lock of (cpu) held. When the CPU is on the way * to idle and has not yet set rq->curr to idle then it will * be serialized on the timer wheel base lock and take the new * timer into account automatically. */ if (rq->curr != rq->idle) return; /* * We can set TIF_RESCHED on the idle task of the other CPU * lockless. The worst case is that the other CPU runs the * idle task through an additional NOOP schedule() */ set_tsk_need_resched(rq->idle); /* NEED_RESCHED must be visible before we test polling */ smp_mb(); if (!tsk_is_polling(rq->idle)) smp_send_reschedule(cpu); }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,123
void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) { unsigned long flags; struct rq *rq; int cpu __maybe_unused = get_cpu(); #ifdef CONFIG_SMP rq = task_rq_lock(p, &flags); p->state = TASK_WAKING; /* * Fork balancing, do it here and not earlier because: * - cpus_allowed can change in the fork path * - any previously selected cpu might disappear through hotplug * * We set TASK_WAKING so that select_task_rq() can drop rq->lock * without people poking at ->cpus_allowed. */ cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0); set_task_cpu(p, cpu); p->state = TASK_RUNNING; task_rq_unlock(rq, &flags); #endif rq = task_rq_lock(p, &flags); activate_task(rq, p, 0); trace_sched_wakeup_new(p, 1); check_preempt_curr(rq, p, WF_FORK); #ifdef CONFIG_SMP if (p->sched_class->task_woken) p->sched_class->task_woken(rq, p); #endif task_rq_unlock(rq, &flags); put_cpu(); }
DoS Exec Code
0
void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) { unsigned long flags; struct rq *rq; int cpu __maybe_unused = get_cpu(); #ifdef CONFIG_SMP rq = task_rq_lock(p, &flags); p->state = TASK_WAKING; /* * Fork balancing, do it here and not earlier because: * - cpus_allowed can change in the fork path * - any previously selected cpu might disappear through hotplug * * We set TASK_WAKING so that select_task_rq() can drop rq->lock * without people poking at ->cpus_allowed. */ cpu = select_task_rq(rq, p, SD_BALANCE_FORK, 0); set_task_cpu(p, cpu); p->state = TASK_RUNNING; task_rq_unlock(rq, &flags); #endif rq = task_rq_lock(p, &flags); activate_task(rq, p, 0); trace_sched_wakeup_new(p, 1); check_preempt_curr(rq, p, WF_FORK); #ifdef CONFIG_SMP if (p->sched_class->task_woken) p->sched_class->task_woken(rq, p); #endif task_rq_unlock(rq, &flags); put_cpu(); }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,124
int wake_up_process(struct task_struct *p) { return try_to_wake_up(p, TASK_ALL, 0); }
DoS Exec Code
0
int wake_up_process(struct task_struct *p) { return try_to_wake_up(p, TASK_ALL, 0); }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,125
int wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); }
DoS Exec Code
0
int wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,126
static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) { struct task_group *parent, *child; int ret; rcu_read_lock(); parent = &root_task_group; down: ret = (*down)(parent, data); if (ret) goto out_unlock; list_for_each_entry_rcu(child, &parent->children, siblings) { parent = child; goto down; up: continue; } ret = (*up)(parent, data); if (ret) goto out_unlock; child = parent; parent = parent->parent; if (parent) goto up; out_unlock: rcu_read_unlock(); return ret; }
DoS Exec Code
0
static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) { struct task_group *parent, *child; int ret; rcu_read_lock(); parent = &root_task_group; down: ret = (*down)(parent, data); if (ret) goto out_unlock; list_for_each_entry_rcu(child, &parent->children, siblings) { parent = child; goto down; up: continue; } ret = (*up)(parent, data); if (ret) goto out_unlock; child = parent; parent = parent->parent; if (parent) goto up; out_unlock: rcu_read_unlock(); return ret; }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,127
static unsigned long weighted_cpuload(const int cpu) { return cpu_rq(cpu)->load.weight; }
DoS Exec Code
0
static unsigned long weighted_cpuload(const int cpu) { return cpu_rq(cpu)->load.weight; }
@@ -641,17 +641,18 @@ static void sched_irq_time_avg_update(struct rq *rq, u64 irq_time); inline void update_rq_clock(struct rq *rq) { - if (!rq->skip_clock_update) { - int cpu = cpu_of(rq); - u64 irq_time; + int cpu = cpu_of(rq); + u64 irq_time; - rq->clock = sched_clock_cpu(cpu); - irq_time = irq_time_cpu(cpu); - if (rq->clock - irq_time > rq->clock_task) - rq->clock_task = rq->clock - irq_time; + if (rq->skip_clock_update) + return; - sched_irq_time_avg_update(rq, irq_time); - } + rq->clock = sched_clock_cpu(cpu); + irq_time = irq_time_cpu(cpu); + if (rq->clock - irq_time > rq->clock_task) + rq->clock_task = rq->clock - irq_time; + + sched_irq_time_avg_update(rq, irq_time); } /* @@ -2129,7 +2130,7 @@ static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) * A queue event has occurred, and we're going to schedule. In * this case, we can save a useless back to back clock update. */ - if (test_tsk_need_resched(rq->curr)) + if (rq->curr->se.on_rq && test_tsk_need_resched(rq->curr)) rq->skip_clock_update = 1; } @@ -3973,7 +3974,6 @@ static void put_prev_task(struct rq *rq, struct task_struct *prev) { if (prev->se.on_rq) update_rq_clock(rq); - rq->skip_clock_update = 0; prev->sched_class->put_prev_task(rq, prev); } @@ -4031,7 +4031,6 @@ asmlinkage void __sched schedule(void) hrtick_clear(rq); raw_spin_lock_irq(&rq->lock); - clear_tsk_need_resched(prev); switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { @@ -4063,6 +4062,8 @@ asmlinkage void __sched schedule(void) put_prev_task(rq, prev); next = pick_next_task(rq); + clear_tsk_need_resched(prev); + rq->skip_clock_update = 0; if (likely(prev != next)) { sched_info_switch(prev, next); @@ -4071,6 +4072,7 @@ asmlinkage void __sched schedule(void) rq->nr_switches++; rq->curr = next; ++*switch_count; + WARN_ON_ONCE(test_tsk_need_resched(next)); context_switch(rq, prev, next); /* unlocks the rq */ /*
null
null
null
18,128
static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, unsigned int flags) { int n; u64 alt[MAX_EVENT_ALTERNATIVES]; if (event->attr.exclude_user || event->attr.exclude_kernel || event->attr.exclude_hv || event->attr.sample_period) return 0; if (ppmu->limited_pmc_event(ev)) return 1; /* * The requested event_id isn't on a limited PMC already; * see if any alternative code goes on a limited PMC. */ if (!ppmu->get_alternatives) return 0; flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD; n = ppmu->get_alternatives(ev, flags, alt); return n > 0; }
DoS Overflow
0
static int can_go_on_limited_pmc(struct perf_event *event, u64 ev, unsigned int flags) { int n; u64 alt[MAX_EVENT_ALTERNATIVES]; if (event->attr.exclude_user || event->attr.exclude_kernel || event->attr.exclude_hv || event->attr.sample_period) return 0; if (ppmu->limited_pmc_event(ev)) return 1; /* * The requested event_id isn't on a limited PMC already; * see if any alternative code goes on a limited PMC. */ if (!ppmu->get_alternatives) return 0; flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD; n = ppmu->get_alternatives(ev, flags, alt); return n > 0; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,129
static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], int n_prev, int n_new) { int eu = 0, ek = 0, eh = 0; int i, n, first; struct perf_event *event; n = n_prev + n_new; if (n <= 1) return 0; first = 1; for (i = 0; i < n; ++i) { if (cflags[i] & PPMU_LIMITED_PMC_OK) { cflags[i] &= ~PPMU_LIMITED_PMC_REQD; continue; } event = ctrs[i]; if (first) { eu = event->attr.exclude_user; ek = event->attr.exclude_kernel; eh = event->attr.exclude_hv; first = 0; } else if (event->attr.exclude_user != eu || event->attr.exclude_kernel != ek || event->attr.exclude_hv != eh) { return -EAGAIN; } } if (eu || ek || eh) for (i = 0; i < n; ++i) if (cflags[i] & PPMU_LIMITED_PMC_OK) cflags[i] |= PPMU_LIMITED_PMC_REQD; return 0; }
DoS Overflow
0
static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], int n_prev, int n_new) { int eu = 0, ek = 0, eh = 0; int i, n, first; struct perf_event *event; n = n_prev + n_new; if (n <= 1) return 0; first = 1; for (i = 0; i < n; ++i) { if (cflags[i] & PPMU_LIMITED_PMC_OK) { cflags[i] &= ~PPMU_LIMITED_PMC_REQD; continue; } event = ctrs[i]; if (first) { eu = event->attr.exclude_user; ek = event->attr.exclude_kernel; eh = event->attr.exclude_hv; first = 0; } else if (event->attr.exclude_user != eu || event->attr.exclude_kernel != ek || event->attr.exclude_hv != eh) { return -EAGAIN; } } if (eu || ek || eh) for (i = 0; i < n; ++i) if (cflags[i] & PPMU_LIMITED_PMC_OK) cflags[i] |= PPMU_LIMITED_PMC_REQD; return 0; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,130
static int collect_events(struct perf_event *group, int max_count, struct perf_event *ctrs[], u64 *events, unsigned int *flags) { int n = 0; struct perf_event *event; if (!is_software_event(group)) { if (n >= max_count) return -1; ctrs[n] = group; flags[n] = group->hw.event_base; events[n++] = group->hw.config; } list_for_each_entry(event, &group->sibling_list, group_entry) { if (!is_software_event(event) && event->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1; ctrs[n] = event; flags[n] = event->hw.event_base; events[n++] = event->hw.config; } } return n; }
DoS Overflow
0
static int collect_events(struct perf_event *group, int max_count, struct perf_event *ctrs[], u64 *events, unsigned int *flags) { int n = 0; struct perf_event *event; if (!is_software_event(group)) { if (n >= max_count) return -1; ctrs[n] = group; flags[n] = group->hw.event_base; events[n++] = group->hw.config; } list_for_each_entry(event, &group->sibling_list, group_entry) { if (!is_software_event(event) && event->state != PERF_EVENT_STATE_OFF) { if (n >= max_count) return -1; ctrs[n] = event; flags[n] = event->hw.event_base; events[n++] = event->hw.config; } } return n; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,131
static void freeze_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { struct perf_event *event; u64 val, prev, delta; int i; for (i = 0; i < cpuhw->n_limited; ++i) { event = cpuhw->limited_counter[i]; if (!event->hw.idx) continue; val = (event->hw.idx == 5) ? pmc5 : pmc6; prev = local64_read(&event->hw.prev_count); event->hw.idx = 0; delta = (val - prev) & 0xfffffffful; local64_add(delta, &event->count); } }
DoS Overflow
0
static void freeze_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { struct perf_event *event; u64 val, prev, delta; int i; for (i = 0; i < cpuhw->n_limited; ++i) { event = cpuhw->limited_counter[i]; if (!event->hw.idx) continue; val = (event->hw.idx == 5) ? pmc5 : pmc6; prev = local64_read(&event->hw.prev_count); event->hw.idx = 0; delta = (val - prev) & 0xfffffffful; local64_add(delta, &event->count); } }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,132
static int hw_perf_cache_event(u64 config, u64 *eventp) { unsigned long type, op, result; int ev; if (!ppmu->cache_events) return -EINVAL; /* unpack config */ type = config & 0xff; op = (config >> 8) & 0xff; result = (config >> 16) & 0xff; if (type >= PERF_COUNT_HW_CACHE_MAX || op >= PERF_COUNT_HW_CACHE_OP_MAX || result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; ev = (*ppmu->cache_events)[type][op][result]; if (ev == 0) return -EOPNOTSUPP; if (ev == -1) return -EINVAL; *eventp = ev; return 0; }
DoS Overflow
0
static int hw_perf_cache_event(u64 config, u64 *eventp) { unsigned long type, op, result; int ev; if (!ppmu->cache_events) return -EINVAL; /* unpack config */ type = config & 0xff; op = (config >> 8) & 0xff; result = (config >> 16) & 0xff; if (type >= PERF_COUNT_HW_CACHE_MAX || op >= PERF_COUNT_HW_CACHE_OP_MAX || result >= PERF_COUNT_HW_CACHE_RESULT_MAX) return -EINVAL; ev = (*ppmu->cache_events)[type][op][result]; if (ev == 0) return -EOPNOTSUPP; if (ev == -1) return -EINVAL; *eventp = ev; return 0; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,133
static int is_limited_pmc(int pmcnum) { return (ppmu->flags & PPMU_LIMITED_PMC5_6) && (pmcnum == 5 || pmcnum == 6); }
DoS Overflow
0
static int is_limited_pmc(int pmcnum) { return (ppmu->flags & PPMU_LIMITED_PMC5_6) && (pmcnum == 5 || pmcnum == 6); }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,134
static u64 normal_pmc_alternative(u64 ev, unsigned long flags) { u64 alt[MAX_EVENT_ALTERNATIVES]; int n; flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD); n = ppmu->get_alternatives(ev, flags, alt); if (!n) return 0; return alt[0]; }
DoS Overflow
0
static u64 normal_pmc_alternative(u64 ev, unsigned long flags) { u64 alt[MAX_EVENT_ALTERNATIVES]; int n; flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD); n = ppmu->get_alternatives(ev, flags, alt); if (!n) return 0; return alt[0]; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,135
void perf_event_print_debug(void) { }
DoS Overflow
0
void perf_event_print_debug(void) { }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,136
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
DoS Overflow
0
static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,137
static inline u32 perf_get_misc_flags(struct pt_regs *regs) { unsigned long mmcra = regs->dsisr; unsigned long sihv = MMCRA_SIHV; unsigned long sipr = MMCRA_SIPR; if (TRAP(regs) != 0xf00) return 0; /* not a PMU interrupt */ if (ppmu->flags & PPMU_ALT_SIPR) { sihv = POWER6_MMCRA_SIHV; sipr = POWER6_MMCRA_SIPR; } /* PR has priority over HV, so order below is important */ if (mmcra & sipr) return PERF_RECORD_MISC_USER; if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV)) return PERF_RECORD_MISC_HYPERVISOR; return PERF_RECORD_MISC_KERNEL; }
DoS Overflow
0
static inline u32 perf_get_misc_flags(struct pt_regs *regs) { unsigned long mmcra = regs->dsisr; unsigned long sihv = MMCRA_SIHV; unsigned long sipr = MMCRA_SIPR; if (TRAP(regs) != 0xf00) return 0; /* not a PMU interrupt */ if (ppmu->flags & PPMU_ALT_SIPR) { sihv = POWER6_MMCRA_SIHV; sipr = POWER6_MMCRA_SIPR; } /* PR has priority over HV, so order below is important */ if (mmcra & sipr) return PERF_RECORD_MISC_USER; if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV)) return PERF_RECORD_MISC_HYPERVISOR; return PERF_RECORD_MISC_KERNEL; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,138
unsigned long perf_instruction_pointer(struct pt_regs *regs) { unsigned long ip; if (TRAP(regs) != 0xf00) return regs->nip; /* not a PMU interrupt */ ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs); return ip; }
DoS Overflow
0
unsigned long perf_instruction_pointer(struct pt_regs *regs) { unsigned long ip; if (TRAP(regs) != 0xf00) return regs->nip; /* not a PMU interrupt */ ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs); return ip; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,139
static inline int perf_intr_is_nmi(struct pt_regs *regs) { return 0; }
DoS Overflow
0
static inline int perf_intr_is_nmi(struct pt_regs *regs) { return 0; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,140
static inline unsigned long perf_ip_adjust(struct pt_regs *regs) { unsigned long mmcra = regs->dsisr; if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) { unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; if (slot > 1) return 4 * (slot - 1); } return 0; }
DoS Overflow
0
static inline unsigned long perf_ip_adjust(struct pt_regs *regs) { unsigned long mmcra = regs->dsisr; if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) { unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT; if (slot > 1) return 4 * (slot - 1); } return 0; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,141
static inline void perf_read_regs(struct pt_regs *regs) { }
DoS Overflow
0
static inline void perf_read_regs(struct pt_regs *regs) { }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,142
static inline void perf_read_regs(struct pt_regs *regs) { regs->dsisr = mfspr(SPRN_MMCRA); }
DoS Overflow
0
static inline void perf_read_regs(struct pt_regs *regs) { regs->dsisr = mfspr(SPRN_MMCRA); }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,143
static int power_pmu_add(struct perf_event *event, int ef_flags) { struct cpu_hw_events *cpuhw; unsigned long flags; int n0; int ret = -EAGAIN; local_irq_save(flags); perf_pmu_disable(event->pmu); /* * Add the event to the list (if there is room) * and check whether the total set is still feasible. */ cpuhw = &__get_cpu_var(cpu_hw_events); n0 = cpuhw->n_events; if (n0 >= ppmu->n_counter) goto out; cpuhw->event[n0] = event; cpuhw->events[n0] = event->hw.config; cpuhw->flags[n0] = event->hw.event_base; if (!(ef_flags & PERF_EF_START)) event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; /* * If group events scheduling transaction was started, * skip the schedulability test here, it will be peformed * at commit time(->commit_txn) as a whole */ if (cpuhw->group_flag & PERF_EVENT_TXN) goto nocheck; if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) goto out; if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) goto out; event->hw.config = cpuhw->events[n0]; nocheck: ++cpuhw->n_events; ++cpuhw->n_added; ret = 0; out: perf_pmu_enable(event->pmu); local_irq_restore(flags); return ret; }
DoS Overflow
0
static int power_pmu_add(struct perf_event *event, int ef_flags) { struct cpu_hw_events *cpuhw; unsigned long flags; int n0; int ret = -EAGAIN; local_irq_save(flags); perf_pmu_disable(event->pmu); /* * Add the event to the list (if there is room) * and check whether the total set is still feasible. */ cpuhw = &__get_cpu_var(cpu_hw_events); n0 = cpuhw->n_events; if (n0 >= ppmu->n_counter) goto out; cpuhw->event[n0] = event; cpuhw->events[n0] = event->hw.config; cpuhw->flags[n0] = event->hw.event_base; if (!(ef_flags & PERF_EF_START)) event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; /* * If group events scheduling transaction was started, * skip the schedulability test here, it will be peformed * at commit time(->commit_txn) as a whole */ if (cpuhw->group_flag & PERF_EVENT_TXN) goto nocheck; if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1)) goto out; if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1)) goto out; event->hw.config = cpuhw->events[n0]; nocheck: ++cpuhw->n_events; ++cpuhw->n_added; ret = 0; out: perf_pmu_enable(event->pmu); local_irq_restore(flags); return ret; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,144
static void power_pmu_del(struct perf_event *event, int ef_flags) { struct cpu_hw_events *cpuhw; long i; unsigned long flags; local_irq_save(flags); perf_pmu_disable(event->pmu); power_pmu_read(event); cpuhw = &__get_cpu_var(cpu_hw_events); for (i = 0; i < cpuhw->n_events; ++i) { if (event == cpuhw->event[i]) { while (++i < cpuhw->n_events) { cpuhw->event[i-1] = cpuhw->event[i]; cpuhw->events[i-1] = cpuhw->events[i]; cpuhw->flags[i-1] = cpuhw->flags[i]; } --cpuhw->n_events; ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); if (event->hw.idx) { write_pmc(event->hw.idx, 0); event->hw.idx = 0; } perf_event_update_userpage(event); break; } } for (i = 0; i < cpuhw->n_limited; ++i) if (event == cpuhw->limited_counter[i]) break; if (i < cpuhw->n_limited) { while (++i < cpuhw->n_limited) { cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; } --cpuhw->n_limited; } if (cpuhw->n_events == 0) { /* disable exceptions if no events are running */ cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); } perf_pmu_enable(event->pmu); local_irq_restore(flags); }
DoS Overflow
0
static void power_pmu_del(struct perf_event *event, int ef_flags) { struct cpu_hw_events *cpuhw; long i; unsigned long flags; local_irq_save(flags); perf_pmu_disable(event->pmu); power_pmu_read(event); cpuhw = &__get_cpu_var(cpu_hw_events); for (i = 0; i < cpuhw->n_events; ++i) { if (event == cpuhw->event[i]) { while (++i < cpuhw->n_events) { cpuhw->event[i-1] = cpuhw->event[i]; cpuhw->events[i-1] = cpuhw->events[i]; cpuhw->flags[i-1] = cpuhw->flags[i]; } --cpuhw->n_events; ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr); if (event->hw.idx) { write_pmc(event->hw.idx, 0); event->hw.idx = 0; } perf_event_update_userpage(event); break; } } for (i = 0; i < cpuhw->n_limited; ++i) if (event == cpuhw->limited_counter[i]) break; if (i < cpuhw->n_limited) { while (++i < cpuhw->n_limited) { cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i]; cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i]; } --cpuhw->n_limited; } if (cpuhw->n_events == 0) { /* disable exceptions if no events are running */ cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); } perf_pmu_enable(event->pmu); local_irq_restore(flags); }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,145
static void power_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; unsigned long flags; if (!ppmu) return; local_irq_save(flags); cpuhw = &__get_cpu_var(cpu_hw_events); if (!cpuhw->disabled) { cpuhw->disabled = 1; cpuhw->n_added = 0; /* * Check if we ever enabled the PMU on this cpu. */ if (!cpuhw->pmcs_enabled) { ppc_enable_pmcs(); cpuhw->pmcs_enabled = 1; } /* * Disable instruction sampling if it was enabled */ if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mb(); } /* * Set the 'freeze counters' bit. * The barrier is to make sure the mtspr has been * executed and the PMU has frozen the events * before we return. */ write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); mb(); } local_irq_restore(flags); }
DoS Overflow
0
static void power_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuhw; unsigned long flags; if (!ppmu) return; local_irq_save(flags); cpuhw = &__get_cpu_var(cpu_hw_events); if (!cpuhw->disabled) { cpuhw->disabled = 1; cpuhw->n_added = 0; /* * Check if we ever enabled the PMU on this cpu. */ if (!cpuhw->pmcs_enabled) { ppc_enable_pmcs(); cpuhw->pmcs_enabled = 1; } /* * Disable instruction sampling if it was enabled */ if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mb(); } /* * Set the 'freeze counters' bit. * The barrier is to make sure the mtspr has been * executed and the PMU has frozen the events * before we return. */ write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC); mb(); } local_irq_restore(flags); }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,146
static void power_pmu_enable(struct pmu *pmu) { struct perf_event *event; struct cpu_hw_events *cpuhw; unsigned long flags; long i; unsigned long val; s64 left; unsigned int hwc_index[MAX_HWEVENTS]; int n_lim; int idx; if (!ppmu) return; local_irq_save(flags); cpuhw = &__get_cpu_var(cpu_hw_events); if (!cpuhw->disabled) { local_irq_restore(flags); return; } cpuhw->disabled = 0; /* * If we didn't change anything, or only removed events, * no need to recalculate MMCR* settings and reset the PMCs. * Just reenable the PMU with the current MMCR* settings * (possibly updated for removal of events). */ if (!cpuhw->n_added) { mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); if (cpuhw->n_events == 0) ppc_set_pmu_inuse(0); goto out_enable; } /* * Compute MMCR* values for the new set of events */ if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, cpuhw->mmcr)) { /* shouldn't ever get here */ printk(KERN_ERR "oops compute_mmcr failed\n"); goto out; } /* * Add in MMCR0 freeze bits corresponding to the * attr.exclude_* bits for the first event. * We have already checked that all events have the * same values for these bits as the first event. */ event = cpuhw->event[0]; if (event->attr.exclude_user) cpuhw->mmcr[0] |= MMCR0_FCP; if (event->attr.exclude_kernel) cpuhw->mmcr[0] |= freeze_events_kernel; if (event->attr.exclude_hv) cpuhw->mmcr[0] |= MMCR0_FCHV; /* * Write the new configuration to MMCR* with the freeze * bit set and set the hardware events to their initial values. * Then unfreeze the events. */ ppc_set_pmu_inuse(1); mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) | MMCR0_FC); /* * Read off any pre-existing events that need to move * to another PMC. */ for (i = 0; i < cpuhw->n_events; ++i) { event = cpuhw->event[i]; if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { power_pmu_read(event); write_pmc(event->hw.idx, 0); event->hw.idx = 0; } } /* * Initialize the PMCs for all the new and moved events. */ cpuhw->n_limited = n_lim = 0; for (i = 0; i < cpuhw->n_events; ++i) { event = cpuhw->event[i]; if (event->hw.idx) continue; idx = hwc_index[i] + 1; if (is_limited_pmc(idx)) { cpuhw->limited_counter[n_lim] = event; cpuhw->limited_hwidx[n_lim] = idx; ++n_lim; continue; } val = 0; if (event->hw.sample_period) { left = local64_read(&event->hw.period_left); if (left < 0x80000000L) val = 0x80000000L - left; } local64_set(&event->hw.prev_count, val); event->hw.idx = idx; if (event->hw.state & PERF_HES_STOPPED) val = 0; write_pmc(idx, val); perf_event_update_userpage(event); } cpuhw->n_limited = n_lim; cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; out_enable: mb(); write_mmcr0(cpuhw, cpuhw->mmcr[0]); /* * Enable instruction sampling if necessary */ if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { mb(); mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); } out: local_irq_restore(flags); }
DoS Overflow
0
static void power_pmu_enable(struct pmu *pmu) { struct perf_event *event; struct cpu_hw_events *cpuhw; unsigned long flags; long i; unsigned long val; s64 left; unsigned int hwc_index[MAX_HWEVENTS]; int n_lim; int idx; if (!ppmu) return; local_irq_save(flags); cpuhw = &__get_cpu_var(cpu_hw_events); if (!cpuhw->disabled) { local_irq_restore(flags); return; } cpuhw->disabled = 0; /* * If we didn't change anything, or only removed events, * no need to recalculate MMCR* settings and reset the PMCs. * Just reenable the PMU with the current MMCR* settings * (possibly updated for removal of events). */ if (!cpuhw->n_added) { mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); if (cpuhw->n_events == 0) ppc_set_pmu_inuse(0); goto out_enable; } /* * Compute MMCR* values for the new set of events */ if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index, cpuhw->mmcr)) { /* shouldn't ever get here */ printk(KERN_ERR "oops compute_mmcr failed\n"); goto out; } /* * Add in MMCR0 freeze bits corresponding to the * attr.exclude_* bits for the first event. * We have already checked that all events have the * same values for these bits as the first event. */ event = cpuhw->event[0]; if (event->attr.exclude_user) cpuhw->mmcr[0] |= MMCR0_FCP; if (event->attr.exclude_kernel) cpuhw->mmcr[0] |= freeze_events_kernel; if (event->attr.exclude_hv) cpuhw->mmcr[0] |= MMCR0_FCHV; /* * Write the new configuration to MMCR* with the freeze * bit set and set the hardware events to their initial values. * Then unfreeze the events. */ ppc_set_pmu_inuse(1); mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE); mtspr(SPRN_MMCR1, cpuhw->mmcr[1]); mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)) | MMCR0_FC); /* * Read off any pre-existing events that need to move * to another PMC. */ for (i = 0; i < cpuhw->n_events; ++i) { event = cpuhw->event[i]; if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) { power_pmu_read(event); write_pmc(event->hw.idx, 0); event->hw.idx = 0; } } /* * Initialize the PMCs for all the new and moved events. */ cpuhw->n_limited = n_lim = 0; for (i = 0; i < cpuhw->n_events; ++i) { event = cpuhw->event[i]; if (event->hw.idx) continue; idx = hwc_index[i] + 1; if (is_limited_pmc(idx)) { cpuhw->limited_counter[n_lim] = event; cpuhw->limited_hwidx[n_lim] = idx; ++n_lim; continue; } val = 0; if (event->hw.sample_period) { left = local64_read(&event->hw.period_left); if (left < 0x80000000L) val = 0x80000000L - left; } local64_set(&event->hw.prev_count, val); event->hw.idx = idx; if (event->hw.state & PERF_HES_STOPPED) val = 0; write_pmc(idx, val); perf_event_update_userpage(event); } cpuhw->n_limited = n_lim; cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE; out_enable: mb(); write_mmcr0(cpuhw, cpuhw->mmcr[0]); /* * Enable instruction sampling if necessary */ if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) { mb(); mtspr(SPRN_MMCRA, cpuhw->mmcr[2]); } out: local_irq_restore(flags); }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,147
static int power_pmu_event_init(struct perf_event *event) { u64 ev; unsigned long flags; struct perf_event *ctrs[MAX_HWEVENTS]; u64 events[MAX_HWEVENTS]; unsigned int cflags[MAX_HWEVENTS]; int n; int err; struct cpu_hw_events *cpuhw; if (!ppmu) return -ENOENT; switch (event->attr.type) { case PERF_TYPE_HARDWARE: ev = event->attr.config; if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) return -EOPNOTSUPP; ev = ppmu->generic_events[ev]; break; case PERF_TYPE_HW_CACHE: err = hw_perf_cache_event(event->attr.config, &ev); if (err) return err; break; case PERF_TYPE_RAW: ev = event->attr.config; break; default: return -ENOENT; } event->hw.config_base = ev; event->hw.idx = 0; /* * If we are not running on a hypervisor, force the * exclude_hv bit to 0 so that we don't care what * the user set it to. */ if (!firmware_has_feature(FW_FEATURE_LPAR)) event->attr.exclude_hv = 0; /* * If this is a per-task event, then we can use * PM_RUN_* events interchangeably with their non RUN_* * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. * XXX we should check if the task is an idle task. */ flags = 0; if (event->attach_state & PERF_ATTACH_TASK) flags |= PPMU_ONLY_COUNT_RUN; /* * If this machine has limited events, check whether this * event_id could go on a limited event. */ if (ppmu->flags & PPMU_LIMITED_PMC5_6) { if (can_go_on_limited_pmc(event, ev, flags)) { flags |= PPMU_LIMITED_PMC_OK; } else if (ppmu->limited_pmc_event(ev)) { /* * The requested event_id is on a limited PMC, * but we can't use a limited PMC; see if any * alternative goes on a normal PMC. */ ev = normal_pmc_alternative(ev, flags); if (!ev) return -EINVAL; } } /* * If this is in a group, check if it can go on with all the * other hardware events in the group. We assume the event * hasn't been linked into its leader's sibling list at this point. */ n = 0; if (event->group_leader != event) { n = collect_events(event->group_leader, ppmu->n_counter - 1, ctrs, events, cflags); if (n < 0) return -EINVAL; } events[n] = ev; ctrs[n] = event; cflags[n] = flags; if (check_excludes(ctrs, cflags, n, 1)) return -EINVAL; cpuhw = &get_cpu_var(cpu_hw_events); err = power_check_constraints(cpuhw, events, cflags, n + 1); put_cpu_var(cpu_hw_events); if (err) return -EINVAL; event->hw.config = events[n]; event->hw.event_base = cflags[n]; event->hw.last_period = event->hw.sample_period; local64_set(&event->hw.period_left, event->hw.last_period); /* * See if we need to reserve the PMU. * If no events are currently in use, then we have to take a * mutex to ensure that we don't race with another task doing * reserve_pmc_hardware or release_pmc_hardware. */ err = 0; if (!atomic_inc_not_zero(&num_events)) { mutex_lock(&pmc_reserve_mutex); if (atomic_read(&num_events) == 0 && reserve_pmc_hardware(perf_event_interrupt)) err = -EBUSY; else atomic_inc(&num_events); mutex_unlock(&pmc_reserve_mutex); } event->destroy = hw_perf_event_destroy; return err; }
DoS Overflow
0
static int power_pmu_event_init(struct perf_event *event) { u64 ev; unsigned long flags; struct perf_event *ctrs[MAX_HWEVENTS]; u64 events[MAX_HWEVENTS]; unsigned int cflags[MAX_HWEVENTS]; int n; int err; struct cpu_hw_events *cpuhw; if (!ppmu) return -ENOENT; switch (event->attr.type) { case PERF_TYPE_HARDWARE: ev = event->attr.config; if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) return -EOPNOTSUPP; ev = ppmu->generic_events[ev]; break; case PERF_TYPE_HW_CACHE: err = hw_perf_cache_event(event->attr.config, &ev); if (err) return err; break; case PERF_TYPE_RAW: ev = event->attr.config; break; default: return -ENOENT; } event->hw.config_base = ev; event->hw.idx = 0; /* * If we are not running on a hypervisor, force the * exclude_hv bit to 0 so that we don't care what * the user set it to. */ if (!firmware_has_feature(FW_FEATURE_LPAR)) event->attr.exclude_hv = 0; /* * If this is a per-task event, then we can use * PM_RUN_* events interchangeably with their non RUN_* * equivalents, e.g. PM_RUN_CYC instead of PM_CYC. * XXX we should check if the task is an idle task. */ flags = 0; if (event->attach_state & PERF_ATTACH_TASK) flags |= PPMU_ONLY_COUNT_RUN; /* * If this machine has limited events, check whether this * event_id could go on a limited event. */ if (ppmu->flags & PPMU_LIMITED_PMC5_6) { if (can_go_on_limited_pmc(event, ev, flags)) { flags |= PPMU_LIMITED_PMC_OK; } else if (ppmu->limited_pmc_event(ev)) { /* * The requested event_id is on a limited PMC, * but we can't use a limited PMC; see if any * alternative goes on a normal PMC. */ ev = normal_pmc_alternative(ev, flags); if (!ev) return -EINVAL; } } /* * If this is in a group, check if it can go on with all the * other hardware events in the group. We assume the event * hasn't been linked into its leader's sibling list at this point. */ n = 0; if (event->group_leader != event) { n = collect_events(event->group_leader, ppmu->n_counter - 1, ctrs, events, cflags); if (n < 0) return -EINVAL; } events[n] = ev; ctrs[n] = event; cflags[n] = flags; if (check_excludes(ctrs, cflags, n, 1)) return -EINVAL; cpuhw = &get_cpu_var(cpu_hw_events); err = power_check_constraints(cpuhw, events, cflags, n + 1); put_cpu_var(cpu_hw_events); if (err) return -EINVAL; event->hw.config = events[n]; event->hw.event_base = cflags[n]; event->hw.last_period = event->hw.sample_period; local64_set(&event->hw.period_left, event->hw.last_period); /* * See if we need to reserve the PMU. * If no events are currently in use, then we have to take a * mutex to ensure that we don't race with another task doing * reserve_pmc_hardware or release_pmc_hardware. */ err = 0; if (!atomic_inc_not_zero(&num_events)) { mutex_lock(&pmc_reserve_mutex); if (atomic_read(&num_events) == 0 && reserve_pmc_hardware(perf_event_interrupt)) err = -EBUSY; else atomic_inc(&num_events); mutex_unlock(&pmc_reserve_mutex); } event->destroy = hw_perf_event_destroy; return err; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,148
power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long)hcpu; switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: power_pmu_setup(cpu); break; default: break; } return NOTIFY_OK; }
DoS Overflow
0
power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (long)hcpu; switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: power_pmu_setup(cpu); break; default: break; } return NOTIFY_OK; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,149
static void power_pmu_read(struct perf_event *event) { s64 val, delta, prev; if (event->hw.state & PERF_HES_STOPPED) return; if (!event->hw.idx) return; /* * Performance monitor interrupts come even when interrupts * are soft-disabled, as long as interrupts are hard-enabled. * Therefore we treat them like NMIs. */ do { prev = local64_read(&event->hw.prev_count); barrier(); val = read_pmc(event->hw.idx); } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); /* The counters are only 32 bits wide */ delta = (val - prev) & 0xfffffffful; local64_add(delta, &event->count); local64_sub(delta, &event->hw.period_left); }
DoS Overflow
0
static void power_pmu_read(struct perf_event *event) { s64 val, delta, prev; if (event->hw.state & PERF_HES_STOPPED) return; if (!event->hw.idx) return; /* * Performance monitor interrupts come even when interrupts * are soft-disabled, as long as interrupts are hard-enabled. * Therefore we treat them like NMIs. */ do { prev = local64_read(&event->hw.prev_count); barrier(); val = read_pmc(event->hw.idx); } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); /* The counters are only 32 bits wide */ delta = (val - prev) & 0xfffffffful; local64_add(delta, &event->count); local64_sub(delta, &event->hw.period_left); }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,150
static void power_pmu_setup(int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); if (!ppmu) return; memset(cpuhw, 0, sizeof(*cpuhw)); cpuhw->mmcr[0] = MMCR0_FC; }
DoS Overflow
0
static void power_pmu_setup(int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); if (!ppmu) return; memset(cpuhw, 0, sizeof(*cpuhw)); cpuhw->mmcr[0] = MMCR0_FC; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,151
static void power_pmu_start(struct perf_event *event, int ef_flags) { unsigned long flags; s64 left; if (!event->hw.idx || !event->hw.sample_period) return; if (!(event->hw.state & PERF_HES_STOPPED)) return; if (ef_flags & PERF_EF_RELOAD) WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); local_irq_save(flags); perf_pmu_disable(event->pmu); event->hw.state = 0; left = local64_read(&event->hw.period_left); write_pmc(event->hw.idx, left); perf_event_update_userpage(event); perf_pmu_enable(event->pmu); local_irq_restore(flags); }
DoS Overflow
0
static void power_pmu_start(struct perf_event *event, int ef_flags) { unsigned long flags; s64 left; if (!event->hw.idx || !event->hw.sample_period) return; if (!(event->hw.state & PERF_HES_STOPPED)) return; if (ef_flags & PERF_EF_RELOAD) WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); local_irq_save(flags); perf_pmu_disable(event->pmu); event->hw.state = 0; left = local64_read(&event->hw.period_left); write_pmc(event->hw.idx, left); perf_event_update_userpage(event); perf_pmu_enable(event->pmu); local_irq_restore(flags); }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,152
void power_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); perf_pmu_disable(pmu); cpuhw->group_flag |= PERF_EVENT_TXN; cpuhw->n_txn_start = cpuhw->n_events; }
DoS Overflow
0
void power_pmu_start_txn(struct pmu *pmu) { struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); perf_pmu_disable(pmu); cpuhw->group_flag |= PERF_EVENT_TXN; cpuhw->n_txn_start = cpuhw->n_events; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,153
static unsigned long read_pmc(int idx) { unsigned long val; switch (idx) { case 1: val = mfspr(SPRN_PMC1); break; case 2: val = mfspr(SPRN_PMC2); break; case 3: val = mfspr(SPRN_PMC3); break; case 4: val = mfspr(SPRN_PMC4); break; case 5: val = mfspr(SPRN_PMC5); break; case 6: val = mfspr(SPRN_PMC6); break; #ifdef CONFIG_PPC64 case 7: val = mfspr(SPRN_PMC7); break; case 8: val = mfspr(SPRN_PMC8); break; #endif /* CONFIG_PPC64 */ default: printk(KERN_ERR "oops trying to read PMC%d\n", idx); val = 0; } return val; }
DoS Overflow
0
static unsigned long read_pmc(int idx) { unsigned long val; switch (idx) { case 1: val = mfspr(SPRN_PMC1); break; case 2: val = mfspr(SPRN_PMC2); break; case 3: val = mfspr(SPRN_PMC3); break; case 4: val = mfspr(SPRN_PMC4); break; case 5: val = mfspr(SPRN_PMC5); break; case 6: val = mfspr(SPRN_PMC6); break; #ifdef CONFIG_PPC64 case 7: val = mfspr(SPRN_PMC7); break; case 8: val = mfspr(SPRN_PMC8); break; #endif /* CONFIG_PPC64 */ default: printk(KERN_ERR "oops trying to read PMC%d\n", idx); val = 0; } return val; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,154
int register_power_pmu(struct power_pmu *pmu) { if (ppmu) return -EBUSY; /* something's already registered */ ppmu = pmu; pr_info("%s performance monitor hardware support registered\n", pmu->name); #ifdef MSR_HV /* * Use FCHV to ignore kernel events if MSR.HV is set. */ if (mfmsr() & MSR_HV) freeze_events_kernel = MMCR0_FCHV; #endif /* CONFIG_PPC64 */ perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW); perf_cpu_notifier(power_pmu_notifier); return 0; }
DoS Overflow
0
int register_power_pmu(struct power_pmu *pmu) { if (ppmu) return -EBUSY; /* something's already registered */ ppmu = pmu; pr_info("%s performance monitor hardware support registered\n", pmu->name); #ifdef MSR_HV /* * Use FCHV to ignore kernel events if MSR.HV is set. */ if (mfmsr() & MSR_HV) freeze_events_kernel = MMCR0_FCHV; #endif /* CONFIG_PPC64 */ perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW); perf_cpu_notifier(power_pmu_notifier); return 0; }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,155
static void thaw_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { struct perf_event *event; u64 val; int i; for (i = 0; i < cpuhw->n_limited; ++i) { event = cpuhw->limited_counter[i]; event->hw.idx = cpuhw->limited_hwidx[i]; val = (event->hw.idx == 5) ? pmc5 : pmc6; local64_set(&event->hw.prev_count, val); perf_event_update_userpage(event); } }
DoS Overflow
0
static void thaw_limited_counters(struct cpu_hw_events *cpuhw, unsigned long pmc5, unsigned long pmc6) { struct perf_event *event; u64 val; int i; for (i = 0; i < cpuhw->n_limited; ++i) { event = cpuhw->limited_counter[i]; event->hw.idx = cpuhw->limited_hwidx[i]; val = (event->hw.idx == 5) ? pmc5 : pmc6; local64_set(&event->hw.prev_count, val); perf_event_update_userpage(event); } }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,156
static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) { unsigned long pmc5, pmc6; if (!cpuhw->n_limited) { mtspr(SPRN_MMCR0, mmcr0); return; } /* * Write MMCR0, then read PMC5 and PMC6 immediately. * To ensure we don't get a performance monitor interrupt * between writing MMCR0 and freezing/thawing the limited * events, we first write MMCR0 with the event overflow * interrupt enable bits turned off. */ asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" : "=&r" (pmc5), "=&r" (pmc6) : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)), "i" (SPRN_MMCR0), "i" (SPRN_PMC5), "i" (SPRN_PMC6)); if (mmcr0 & MMCR0_FC) freeze_limited_counters(cpuhw, pmc5, pmc6); else thaw_limited_counters(cpuhw, pmc5, pmc6); /* * Write the full MMCR0 including the event overflow interrupt * enable bits, if necessary. */ if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) mtspr(SPRN_MMCR0, mmcr0); }
DoS Overflow
0
static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) { unsigned long pmc5, pmc6; if (!cpuhw->n_limited) { mtspr(SPRN_MMCR0, mmcr0); return; } /* * Write MMCR0, then read PMC5 and PMC6 immediately. * To ensure we don't get a performance monitor interrupt * between writing MMCR0 and freezing/thawing the limited * events, we first write MMCR0 with the event overflow * interrupt enable bits turned off. */ asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5" : "=&r" (pmc5), "=&r" (pmc6) : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)), "i" (SPRN_MMCR0), "i" (SPRN_PMC5), "i" (SPRN_PMC6)); if (mmcr0 & MMCR0_FC) freeze_limited_counters(cpuhw, pmc5, pmc6); else thaw_limited_counters(cpuhw, pmc5, pmc6); /* * Write the full MMCR0 including the event overflow interrupt * enable bits, if necessary. */ if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE)) mtspr(SPRN_MMCR0, mmcr0); }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,157
static void write_pmc(int idx, unsigned long val) { switch (idx) { case 1: mtspr(SPRN_PMC1, val); break; case 2: mtspr(SPRN_PMC2, val); break; case 3: mtspr(SPRN_PMC3, val); break; case 4: mtspr(SPRN_PMC4, val); break; case 5: mtspr(SPRN_PMC5, val); break; case 6: mtspr(SPRN_PMC6, val); break; #ifdef CONFIG_PPC64 case 7: mtspr(SPRN_PMC7, val); break; case 8: mtspr(SPRN_PMC8, val); break; #endif /* CONFIG_PPC64 */ default: printk(KERN_ERR "oops trying to write PMC%d\n", idx); } }
DoS Overflow
0
static void write_pmc(int idx, unsigned long val) { switch (idx) { case 1: mtspr(SPRN_PMC1, val); break; case 2: mtspr(SPRN_PMC2, val); break; case 3: mtspr(SPRN_PMC3, val); break; case 4: mtspr(SPRN_PMC4, val); break; case 5: mtspr(SPRN_PMC5, val); break; case 6: mtspr(SPRN_PMC6, val); break; #ifdef CONFIG_PPC64 case 7: mtspr(SPRN_PMC7, val); break; case 8: mtspr(SPRN_PMC8, val); break; #endif /* CONFIG_PPC64 */ default: printk(KERN_ERR "oops trying to write PMC%d\n", idx); } }
@@ -1269,6 +1269,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) return ip; } +static bool pmc_overflow(unsigned long val) +{ + if ((int)val < 0) + return true; + + /* + * Events on POWER7 can roll back if a speculative event doesn't + * eventually complete. Unfortunately in some rare cases they will + * raise a performance monitor exception. We need to catch this to + * ensure we reset the PMC. In all cases the PMC will be 256 or less + * cycles from overflow. + * + * We only do this if the first pass fails to find any overflowing + * PMCs because a user might set a period of less than 256 and we + * don't want to mistakenly reset them. + */ + if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) + return true; + + return false; +} + /* * Performance monitor interrupt stuff */ @@ -1316,7 +1338,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (is_limited_pmc(i + 1)) continue; val = read_pmc(i + 1); - if ((int)val < 0) + if (pmc_overflow(val)) write_pmc(i + 1, 0); } }
CWE-189
null
null
18,158
SYSCALL_DEFINE2(listen, int, fd, int, backlog) { struct socket *sock; int err, fput_needed; int somaxconn; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; if ((unsigned)backlog > somaxconn) backlog = somaxconn; err = security_socket_listen(sock, backlog); if (!err) err = sock->ops->listen(sock, backlog); fput_light(sock->file, fput_needed); } return err; }
DoS
0
SYSCALL_DEFINE2(listen, int, fd, int, backlog) { struct socket *sock; int err, fput_needed; int somaxconn; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn; if ((unsigned)backlog > somaxconn) backlog = somaxconn; err = security_socket_listen(sock, backlog); if (!err) err = sock->ops->listen(sock, backlog); fput_light(sock->file, fput_needed); } return err; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,159
SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address); if (err >= 0) { err = security_socket_bind(sock, (struct sockaddr *)&address, addrlen); if (!err) err = sock->ops->bind(sock, (struct sockaddr *) &address, addrlen); } fput_light(sock->file, fput_needed); } return err; }
DoS
0
SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen) { struct socket *sock; struct sockaddr_storage address; int err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock) { err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address); if (err >= 0) { err = security_socket_bind(sock, (struct sockaddr *)&address, addrlen); if (!err) err = sock->ops->bind(sock, (struct sockaddr *) &address, addrlen); } fput_light(sock->file, fput_needed); } return err; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,160
SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = security_socket_getsockname(sock); if (err) goto out_put; err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); if (err) goto out_put; err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len); out_put: fput_light(sock->file, fput_needed); out: return err; }
DoS
0
SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = security_socket_getsockname(sock); if (err) goto out_put; err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0); if (err) goto out_put; err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len); out_put: fput_light(sock->file, fput_needed); out: return err; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,161
SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getpeername(sock); if (err) { fput_light(sock->file, fput_needed); return err; } err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 1); if (!err) err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len); fput_light(sock->file, fput_needed); } return err; }
DoS
0
SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len) { struct socket *sock; struct sockaddr_storage address; int len, err, fput_needed; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (sock != NULL) { err = security_socket_getpeername(sock); if (err) { fput_light(sock->file, fput_needed); return err; } err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 1); if (!err) err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len); fput_light(sock->file, fput_needed); } return err; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,162
SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL); fput_light(sock->file, fput_needed); out: return err; }
DoS
0
SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags) { int fput_needed, err; struct msghdr msg_sys; struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL); fput_light(sock->file, fput_needed); out: return err; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,163
SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags) { struct socket *sock, *newsock; struct file *newfile; int err, len, newfd, fput_needed; struct sockaddr_storage address; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = -ENFILE; newsock = sock_alloc(); if (!newsock) goto out_put; newsock->type = sock->type; newsock->ops = sock->ops; /* * We don't need try_module_get here, as the listening socket (sock) * has the protocol module (sock->ops->owner) held. */ __module_get(newsock->ops->owner); newfd = sock_alloc_file(newsock, &newfile, flags); if (unlikely(newfd < 0)) { err = newfd; sock_release(newsock); goto out_put; } err = security_socket_accept(sock, newsock); if (err) goto out_fd; err = sock->ops->accept(sock, newsock, sock->file->f_flags); if (err < 0) goto out_fd; if (upeer_sockaddr) { if (newsock->ops->getname(newsock, (struct sockaddr *)&address, &len, 2) < 0) { err = -ECONNABORTED; goto out_fd; } err = move_addr_to_user((struct sockaddr *)&address, len, upeer_sockaddr, upeer_addrlen); if (err < 0) goto out_fd; } /* File flags are not inherited via accept() unlike another OSes. */ fd_install(newfd, newfile); err = newfd; out_put: fput_light(sock->file, fput_needed); out: return err; out_fd: fput(newfile); put_unused_fd(newfd); goto out_put; }
DoS
0
SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr, int __user *, upeer_addrlen, int, flags) { struct socket *sock, *newsock; struct file *newfile; int err, len, newfd, fput_needed; struct sockaddr_storage address; if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK)) return -EINVAL; if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; err = -ENFILE; newsock = sock_alloc(); if (!newsock) goto out_put; newsock->type = sock->type; newsock->ops = sock->ops; /* * We don't need try_module_get here, as the listening socket (sock) * has the protocol module (sock->ops->owner) held. */ __module_get(newsock->ops->owner); newfd = sock_alloc_file(newsock, &newfile, flags); if (unlikely(newfd < 0)) { err = newfd; sock_release(newsock); goto out_put; } err = security_socket_accept(sock, newsock); if (err) goto out_fd; err = sock->ops->accept(sock, newsock, sock->file->f_flags); if (err < 0) goto out_fd; if (upeer_sockaddr) { if (newsock->ops->getname(newsock, (struct sockaddr *)&address, &len, 2) < 0) { err = -ECONNABORTED; goto out_fd; } err = move_addr_to_user((struct sockaddr *)&address, len, upeer_sockaddr, upeer_addrlen); if (err < 0) goto out_fd; } /* File flags are not inherited via accept() unlike another OSes. */ fd_install(newfd, newfile); err = newfd; out_put: fput_light(sock->file, fput_needed); out: return err; out_fd: fput(newfile); put_unused_fd(newfd); goto out_put; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,164
SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, unsigned, flags) { return sys_sendto(fd, buff, len, flags, NULL, 0); }
DoS
0
SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len, unsigned, flags) { return sys_sendto(fd, buff, len, flags, NULL, 0); }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,165
SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, unsigned, flags, struct sockaddr __user *, addr, int, addr_len) { struct socket *sock; struct sockaddr_storage address; int err; struct msghdr msg; struct iovec iov; int fput_needed; if (len > INT_MAX) len = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; iov.iov_base = buff; iov.iov_len = len; msg.msg_name = NULL; msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; if (addr) { err = move_addr_to_kernel(addr, addr_len, (struct sockaddr *)&address); if (err < 0) goto out_put; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = addr_len; } if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; msg.msg_flags = flags; err = sock_sendmsg(sock, &msg, len); out_put: fput_light(sock->file, fput_needed); out: return err; }
DoS
0
SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, unsigned, flags, struct sockaddr __user *, addr, int, addr_len) { struct socket *sock; struct sockaddr_storage address; int err; struct msghdr msg; struct iovec iov; int fput_needed; if (len > INT_MAX) len = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; iov.iov_base = buff; iov.iov_len = len; msg.msg_name = NULL; msg.msg_iov = &iov; msg.msg_iovlen = 1; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_namelen = 0; if (addr) { err = move_addr_to_kernel(addr, addr_len, (struct sockaddr *)&address); if (err < 0) goto out_put; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = addr_len; } if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; msg.msg_flags = flags; err = sock_sendmsg(sock, &msg, len); out_put: fput_light(sock->file, fput_needed); out: return err; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,166
SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user((struct sockaddr *)&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; }
DoS
0
SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, unsigned, flags, struct sockaddr __user *, addr, int __user *, addr_len) { struct socket *sock; struct iovec iov; struct msghdr msg; struct sockaddr_storage address; int err, err2; int fput_needed; if (size > INT_MAX) size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_iovlen = 1; msg.msg_iov = &iov; iov.iov_len = size; iov.iov_base = ubuf; msg.msg_name = (struct sockaddr *)&address; msg.msg_namelen = sizeof(address); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = sock_recvmsg(sock, &msg, size, flags); if (err >= 0 && addr != NULL) { err2 = move_addr_to_user((struct sockaddr *)&address, msg.msg_namelen, addr, addr_len); if (err2 < 0) err = err2; } fput_light(sock->file, fput_needed); out: return err; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,167
int __sock_create(struct net *net, int family, int type, int protocol, struct socket **res, int kern) { int err; struct socket *sock; const struct net_proto_family *pf; /* * Check protocol is in range */ if (family < 0 || family >= NPROTO) return -EAFNOSUPPORT; if (type < 0 || type >= SOCK_MAX) return -EINVAL; /* Compatibility. This uglymoron is moved from INET layer to here to avoid deadlock in module load. */ if (family == PF_INET && type == SOCK_PACKET) { static int warned; if (!warned) { warned = 1; printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n", current->comm); } family = PF_PACKET; } err = security_socket_create(family, type, protocol, kern); if (err) return err; /* * Allocate the socket and allow the family to set things up. if * the protocol is 0, the family is instructed to select an appropriate * default. */ sock = sock_alloc(); if (!sock) { if (net_ratelimit()) printk(KERN_WARNING "socket: no more sockets\n"); return -ENFILE; /* Not exactly a match, but its the closest posix thing */ } sock->type = type; #ifdef CONFIG_MODULES /* Attempt to load a protocol module if the find failed. * * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user * requested real, full-featured networking support upon configuration. * Otherwise module support will break! */ if (rcu_access_pointer(net_families[family]) == NULL) request_module("net-pf-%d", family); #endif rcu_read_lock(); pf = rcu_dereference(net_families[family]); err = -EAFNOSUPPORT; if (!pf) goto out_release; /* * We will call the ->create function, that possibly is in a loadable * module, so we have to bump that loadable module refcnt first. */ if (!try_module_get(pf->owner)) goto out_release; /* Now protected by module ref count */ rcu_read_unlock(); err = pf->create(net, sock, protocol, kern); if (err < 0) goto out_module_put; /* * Now to bump the refcnt of the [loadable] module that owns this * socket at sock_release time we decrement its refcnt. */ if (!try_module_get(sock->ops->owner)) goto out_module_busy; /* * Now that we're done with the ->create function, the [loadable] * module can have its refcnt decremented */ module_put(pf->owner); err = security_socket_post_create(sock, family, type, protocol, kern); if (err) goto out_sock_release; *res = sock; return 0; out_module_busy: err = -EAFNOSUPPORT; out_module_put: sock->ops = NULL; module_put(pf->owner); out_sock_release: sock_release(sock); return err; out_release: rcu_read_unlock(); goto out_sock_release; }
DoS
0
int __sock_create(struct net *net, int family, int type, int protocol, struct socket **res, int kern) { int err; struct socket *sock; const struct net_proto_family *pf; /* * Check protocol is in range */ if (family < 0 || family >= NPROTO) return -EAFNOSUPPORT; if (type < 0 || type >= SOCK_MAX) return -EINVAL; /* Compatibility. This uglymoron is moved from INET layer to here to avoid deadlock in module load. */ if (family == PF_INET && type == SOCK_PACKET) { static int warned; if (!warned) { warned = 1; printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n", current->comm); } family = PF_PACKET; } err = security_socket_create(family, type, protocol, kern); if (err) return err; /* * Allocate the socket and allow the family to set things up. if * the protocol is 0, the family is instructed to select an appropriate * default. */ sock = sock_alloc(); if (!sock) { if (net_ratelimit()) printk(KERN_WARNING "socket: no more sockets\n"); return -ENFILE; /* Not exactly a match, but its the closest posix thing */ } sock->type = type; #ifdef CONFIG_MODULES /* Attempt to load a protocol module if the find failed. * * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user * requested real, full-featured networking support upon configuration. * Otherwise module support will break! */ if (rcu_access_pointer(net_families[family]) == NULL) request_module("net-pf-%d", family); #endif rcu_read_lock(); pf = rcu_dereference(net_families[family]); err = -EAFNOSUPPORT; if (!pf) goto out_release; /* * We will call the ->create function, that possibly is in a loadable * module, so we have to bump that loadable module refcnt first. */ if (!try_module_get(pf->owner)) goto out_release; /* Now protected by module ref count */ rcu_read_unlock(); err = pf->create(net, sock, protocol, kern); if (err < 0) goto out_module_put; /* * Now to bump the refcnt of the [loadable] module that owns this * socket at sock_release time we decrement its refcnt. */ if (!try_module_get(sock->ops->owner)) goto out_module_busy; /* * Now that we're done with the ->create function, the [loadable] * module can have its refcnt decremented */ module_put(pf->owner); err = security_socket_post_create(sock, family, type, protocol, kern); if (err) goto out_sock_release; *res = sock; return 0; out_module_busy: err = -EAFNOSUPPORT; out_module_put: sock->ops = NULL; module_put(pf->owner); out_sock_release: sock_release(sock); return err; out_release: rcu_read_unlock(); goto out_sock_release; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,168
static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned flags, int nosec) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; int err, iov_size, total_len, len; /* kernel mode address */ struct sockaddr_storage addr; /* user mode address pointers */ struct sockaddr __user *uaddr; int __user *uaddr_len; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; err = -EMSGSIZE; if (msg_sys->msg_iovlen > UIO_MAXIOV) goto out; /* Check whether to allocate the iovec area */ err = -ENOMEM; iov_size = msg_sys->msg_iovlen * sizeof(struct iovec); if (msg_sys->msg_iovlen > UIO_FASTIOV) { iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); if (!iov) goto out; } /* * Save the user-mode address (verify_iovec will change the * kernel msghdr to use the kernel address space) */ uaddr = (__force void __user *)msg_sys->msg_name; uaddr_len = COMPAT_NAMELEN(msg); if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(msg_sys, iov, (struct sockaddr *)&addr, VERIFY_WRITE); } else err = verify_iovec(msg_sys, iov, (struct sockaddr *)&addr, VERIFY_WRITE); if (err < 0) goto out_freeiov; total_len = err; cmsg_ptr = (unsigned long)msg_sys->msg_control; msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, total_len, flags); if (err < 0) goto out_freeiov; len = err; if (uaddr != NULL) { err = move_addr_to_user((struct sockaddr *)&addr, msg_sys->msg_namelen, uaddr, uaddr_len); if (err < 0) goto out_freeiov; } err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), COMPAT_FLAGS(msg)); if (err) goto out_freeiov; if (MSG_CMSG_COMPAT & flags) err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg_compat->msg_controllen); else err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg->msg_controllen); if (err) goto out_freeiov; err = len; out_freeiov: if (iov != iovstack) sock_kfree_s(sock->sk, iov, iov_size); out: return err; }
DoS
0
static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, struct msghdr *msg_sys, unsigned flags, int nosec) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; unsigned long cmsg_ptr; int err, iov_size, total_len, len; /* kernel mode address */ struct sockaddr_storage addr; /* user mode address pointers */ struct sockaddr __user *uaddr; int __user *uaddr_len; if (MSG_CMSG_COMPAT & flags) { if (get_compat_msghdr(msg_sys, msg_compat)) return -EFAULT; } else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr))) return -EFAULT; err = -EMSGSIZE; if (msg_sys->msg_iovlen > UIO_MAXIOV) goto out; /* Check whether to allocate the iovec area */ err = -ENOMEM; iov_size = msg_sys->msg_iovlen * sizeof(struct iovec); if (msg_sys->msg_iovlen > UIO_FASTIOV) { iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL); if (!iov) goto out; } /* * Save the user-mode address (verify_iovec will change the * kernel msghdr to use the kernel address space) */ uaddr = (__force void __user *)msg_sys->msg_name; uaddr_len = COMPAT_NAMELEN(msg); if (MSG_CMSG_COMPAT & flags) { err = verify_compat_iovec(msg_sys, iov, (struct sockaddr *)&addr, VERIFY_WRITE); } else err = verify_iovec(msg_sys, iov, (struct sockaddr *)&addr, VERIFY_WRITE); if (err < 0) goto out_freeiov; total_len = err; cmsg_ptr = (unsigned long)msg_sys->msg_control; msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); if (sock->file->f_flags & O_NONBLOCK) flags |= MSG_DONTWAIT; err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys, total_len, flags); if (err < 0) goto out_freeiov; len = err; if (uaddr != NULL) { err = move_addr_to_user((struct sockaddr *)&addr, msg_sys->msg_namelen, uaddr, uaddr_len); if (err < 0) goto out_freeiov; } err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT), COMPAT_FLAGS(msg)); if (err) goto out_freeiov; if (MSG_CMSG_COMPAT & flags) err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg_compat->msg_controllen); else err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr, &msg->msg_controllen); if (err) goto out_freeiov; err = len; out_freeiov: if (iov != iovstack) sock_kfree_s(sock->sk, iov, iov_size); out: return err; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,169
static int bond_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *ifr32) { struct ifreq kifr; struct ifreq __user *uifr; mm_segment_t old_fs; int err; u32 data; void __user *datap; switch (cmd) { case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (struct ifreq __user __force *) &kifr); set_fs(old_fs); return err; case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; datap = compat_ptr(data); if (put_user(datap, &uifr->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, uifr); default: return -EINVAL; } }
DoS
0
static int bond_ioctl(struct net *net, unsigned int cmd, struct compat_ifreq __user *ifr32) { struct ifreq kifr; struct ifreq __user *uifr; mm_segment_t old_fs; int err; u32 data; void __user *datap; switch (cmd) { case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDCHANGEACTIVE: if (copy_from_user(&kifr, ifr32, sizeof(struct compat_ifreq))) return -EFAULT; old_fs = get_fs(); set_fs(KERNEL_DS); err = dev_ioctl(net, cmd, (struct ifreq __user __force *) &kifr); set_fs(old_fs); return err; case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: uifr = compat_alloc_user_space(sizeof(*uifr)); if (copy_in_user(&uifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; datap = compat_ptr(data); if (put_user(datap, &uifr->ifr_ifru.ifru_data)) return -EFAULT; return dev_ioctl(net, cmd, uifr); default: return -EINVAL; } }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,170
static long compat_sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock = file->private_data; int ret = -ENOIOCTLCMD; struct sock *sk; struct net *net; sk = sock->sk; net = sock_net(sk); if (sock->ops->compat_ioctl) ret = sock->ops->compat_ioctl(sock, cmd, arg); if (ret == -ENOIOCTLCMD && (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) ret = compat_wext_handle_ioctl(net, cmd, arg); if (ret == -ENOIOCTLCMD) ret = compat_sock_ioctl_trans(file, sock, cmd, arg); return ret; }
DoS
0
static long compat_sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) { struct socket *sock = file->private_data; int ret = -ENOIOCTLCMD; struct sock *sk; struct net *net; sk = sock->sk; net = sock_net(sk); if (sock->ops->compat_ioctl) ret = sock->ops->compat_ioctl(sock, cmd, arg); if (ret == -ENOIOCTLCMD && (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)) ret = compat_wext_handle_ioctl(net, cmd, arg); if (ret == -ENOIOCTLCMD) ret = compat_sock_ioctl_trans(file, sock, cmd, arg); return ret; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,171
static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); struct sock *sk = sock->sk; struct net *net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) return siocdevprivate_ioctl(net, cmd, argp); switch (cmd) { case SIOCSIFBR: case SIOCGIFBR: return old_bridge_ioctl(argp); case SIOCGIFNAME: return dev_ifname32(net, argp); case SIOCGIFCONF: return dev_ifconf(net, argp); case SIOCETHTOOL: return ethtool_ioctl(net, argp); case SIOCWANDEV: return compat_siocwandev(net, argp); case SIOCGIFMAP: case SIOCSIFMAP: return compat_sioc_ifmap(net, cmd, argp); case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: case SIOCBONDCHANGEACTIVE: return bond_ioctl(net, cmd, argp); case SIOCADDRT: case SIOCDELRT: return routing_ioctl(net, sock, cmd, argp); case SIOCGSTAMP: return do_siocgstamp(net, sock, cmd, argp); case SIOCGSTAMPNS: return do_siocgstampns(net, sock, cmd, argp); case SIOCSHWTSTAMP: return compat_siocshwtstamp(net, argp); case FIOSETOWN: case SIOCSPGRP: case FIOGETOWN: case SIOCGPGRP: case SIOCBRADDBR: case SIOCBRDELBR: case SIOCGIFVLAN: case SIOCSIFVLAN: case SIOCADDDLCI: case SIOCDELDLCI: return sock_ioctl(file, cmd, arg); case SIOCGIFFLAGS: case SIOCSIFFLAGS: case SIOCGIFMETRIC: case SIOCSIFMETRIC: case SIOCGIFMTU: case SIOCSIFMTU: case SIOCGIFMEM: case SIOCSIFMEM: case SIOCGIFHWADDR: case SIOCSIFHWADDR: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCSIFHWBROADCAST: case SIOCDIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCSIFPFLAGS: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCSIFTXQLEN: case SIOCBRADDIF: case SIOCBRDELIF: case SIOCSIFNAME: case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return dev_ifsioc(net, sock, cmd, argp); case SIOCSARP: case SIOCGARP: case SIOCDARP: case SIOCATMARK: return sock_do_ioctl(net, sock, cmd, arg); } /* Prevent warning from compat_sys_ioctl, these always * result in -EINVAL in the native case anyway. */ switch (cmd) { case SIOCRTMSG: case SIOCGIFCOUNT: case SIOCSRARP: case SIOCGRARP: case SIOCDRARP: case SIOCSIFLINK: case SIOCGIFSLAVE: case SIOCSIFSLAVE: return -EINVAL; } return -ENOIOCTLCMD; }
DoS
0
static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, unsigned int cmd, unsigned long arg) { void __user *argp = compat_ptr(arg); struct sock *sk = sock->sk; struct net *net = sock_net(sk); if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) return siocdevprivate_ioctl(net, cmd, argp); switch (cmd) { case SIOCSIFBR: case SIOCGIFBR: return old_bridge_ioctl(argp); case SIOCGIFNAME: return dev_ifname32(net, argp); case SIOCGIFCONF: return dev_ifconf(net, argp); case SIOCETHTOOL: return ethtool_ioctl(net, argp); case SIOCWANDEV: return compat_siocwandev(net, argp); case SIOCGIFMAP: case SIOCSIFMAP: return compat_sioc_ifmap(net, cmd, argp); case SIOCBONDENSLAVE: case SIOCBONDRELEASE: case SIOCBONDSETHWADDR: case SIOCBONDSLAVEINFOQUERY: case SIOCBONDINFOQUERY: case SIOCBONDCHANGEACTIVE: return bond_ioctl(net, cmd, argp); case SIOCADDRT: case SIOCDELRT: return routing_ioctl(net, sock, cmd, argp); case SIOCGSTAMP: return do_siocgstamp(net, sock, cmd, argp); case SIOCGSTAMPNS: return do_siocgstampns(net, sock, cmd, argp); case SIOCSHWTSTAMP: return compat_siocshwtstamp(net, argp); case FIOSETOWN: case SIOCSPGRP: case FIOGETOWN: case SIOCGPGRP: case SIOCBRADDBR: case SIOCBRDELBR: case SIOCGIFVLAN: case SIOCSIFVLAN: case SIOCADDDLCI: case SIOCDELDLCI: return sock_ioctl(file, cmd, arg); case SIOCGIFFLAGS: case SIOCSIFFLAGS: case SIOCGIFMETRIC: case SIOCSIFMETRIC: case SIOCGIFMTU: case SIOCSIFMTU: case SIOCGIFMEM: case SIOCSIFMEM: case SIOCGIFHWADDR: case SIOCSIFHWADDR: case SIOCADDMULTI: case SIOCDELMULTI: case SIOCGIFINDEX: case SIOCGIFADDR: case SIOCSIFADDR: case SIOCSIFHWBROADCAST: case SIOCDIFADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCSIFPFLAGS: case SIOCGIFPFLAGS: case SIOCGIFTXQLEN: case SIOCSIFTXQLEN: case SIOCBRADDIF: case SIOCBRDELIF: case SIOCSIFNAME: case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return dev_ifsioc(net, sock, cmd, argp); case SIOCSARP: case SIOCGARP: case SIOCDARP: case SIOCATMARK: return sock_do_ioctl(net, sock, cmd, arg); } /* Prevent warning from compat_sys_ioctl, these always * result in -EINVAL in the native case anyway. */ switch (cmd) { case SIOCRTMSG: case SIOCGIFCOUNT: case SIOCSRARP: case SIOCGRARP: case SIOCDRARP: case SIOCSIFLINK: case SIOCGIFSLAVE: case SIOCSIFSLAVE: return -EINVAL; } return -ENOIOCTLCMD; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,172
static int do_siocgstamp(struct net *net, struct socket *sock, unsigned int cmd, struct compat_timeval __user *up) { mm_segment_t old_fs = get_fs(); struct timeval ktv; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) { err = put_user(ktv.tv_sec, &up->tv_sec); err |= __put_user(ktv.tv_usec, &up->tv_usec); } return err; }
DoS
0
static int do_siocgstamp(struct net *net, struct socket *sock, unsigned int cmd, struct compat_timeval __user *up) { mm_segment_t old_fs = get_fs(); struct timeval ktv; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) { err = put_user(ktv.tv_sec, &up->tv_sec); err |= __put_user(ktv.tv_usec, &up->tv_usec); } return err; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,173
static int do_siocgstampns(struct net *net, struct socket *sock, unsigned int cmd, struct compat_timespec __user *up) { mm_segment_t old_fs = get_fs(); struct timespec kts; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) { err = put_user(kts.tv_sec, &up->tv_sec); err |= __put_user(kts.tv_nsec, &up->tv_nsec); } return err; }
DoS
0
static int do_siocgstampns(struct net *net, struct socket *sock, unsigned int cmd, struct compat_timespec __user *up) { mm_segment_t old_fs = get_fs(); struct timespec kts; int err; set_fs(KERNEL_DS); err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) { err = put_user(kts.tv_sec, &up->tv_sec); err |= __put_user(kts.tv_nsec, &up->tv_nsec); } return err; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,174
static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) { struct compat_ethtool_rxnfc __user *compat_rxnfc; bool convert_in = false, convert_out = false; size_t buf_size = ALIGN(sizeof(struct ifreq), 8); struct ethtool_rxnfc __user *rxnfc; struct ifreq __user *ifr; u32 rule_cnt = 0, actual_rule_cnt; u32 ethcmd; u32 data; int ret; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; compat_rxnfc = compat_ptr(data); if (get_user(ethcmd, &compat_rxnfc->cmd)) return -EFAULT; /* Most ethtool structures are defined without padding. * Unfortunately struct ethtool_rxnfc is an exception. */ switch (ethcmd) { default: break; case ETHTOOL_GRXCLSRLALL: /* Buffer size is variable */ if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) return -EFAULT; if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) return -ENOMEM; buf_size += rule_cnt * sizeof(u32); /* fall through */ case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: convert_out = true; /* fall through */ case ETHTOOL_SRXCLSRLDEL: case ETHTOOL_SRXCLSRLINS: buf_size += sizeof(struct ethtool_rxnfc); convert_in = true; break; } ifr = compat_alloc_user_space(buf_size); rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8); if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (put_user(convert_in ? rxnfc : compat_ptr(data), &ifr->ifr_ifru.ifru_data)) return -EFAULT; if (convert_in) { /* We expect there to be holes between fs.m_ext and * fs.ring_cookie and at the end of fs, but nowhere else. */ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + sizeof(compat_rxnfc->fs.m_ext) != offsetof(struct ethtool_rxnfc, fs.m_ext) + sizeof(rxnfc->fs.m_ext)); BUILD_BUG_ON( offsetof(struct compat_ethtool_rxnfc, fs.location) - offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != offsetof(struct ethtool_rxnfc, fs.location) - offsetof(struct ethtool_rxnfc, fs.ring_cookie)); if (copy_in_user(rxnfc, compat_rxnfc, (void *)(&rxnfc->fs.m_ext + 1) - (void *)rxnfc) || copy_in_user(&rxnfc->fs.ring_cookie, &compat_rxnfc->fs.ring_cookie, (void *)(&rxnfc->fs.location + 1) - (void *)&rxnfc->fs.ring_cookie) || copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; } ret = dev_ioctl(net, SIOCETHTOOL, ifr); if (ret) return ret; if (convert_out) { if (copy_in_user(compat_rxnfc, rxnfc, (const void *)(&rxnfc->fs.m_ext + 1) - (const void *)rxnfc) || copy_in_user(&compat_rxnfc->fs.ring_cookie, &rxnfc->fs.ring_cookie, (const void *)(&rxnfc->fs.location + 1) - (const void *)&rxnfc->fs.ring_cookie) || copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; if (ethcmd == ETHTOOL_GRXCLSRLALL) { /* As an optimisation, we only copy the actual * number of rules that the underlying * function returned. Since Mallory might * change the rule count in user memory, we * check that it is less than the rule count * originally given (as the user buffer size), * which has been range-checked. */ if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) return -EFAULT; if (actual_rule_cnt < rule_cnt) rule_cnt = actual_rule_cnt; if (copy_in_user(&compat_rxnfc->rule_locs[0], &rxnfc->rule_locs[0], rule_cnt * sizeof(u32))) return -EFAULT; } } return 0; }
DoS
0
static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) { struct compat_ethtool_rxnfc __user *compat_rxnfc; bool convert_in = false, convert_out = false; size_t buf_size = ALIGN(sizeof(struct ifreq), 8); struct ethtool_rxnfc __user *rxnfc; struct ifreq __user *ifr; u32 rule_cnt = 0, actual_rule_cnt; u32 ethcmd; u32 data; int ret; if (get_user(data, &ifr32->ifr_ifru.ifru_data)) return -EFAULT; compat_rxnfc = compat_ptr(data); if (get_user(ethcmd, &compat_rxnfc->cmd)) return -EFAULT; /* Most ethtool structures are defined without padding. * Unfortunately struct ethtool_rxnfc is an exception. */ switch (ethcmd) { default: break; case ETHTOOL_GRXCLSRLALL: /* Buffer size is variable */ if (get_user(rule_cnt, &compat_rxnfc->rule_cnt)) return -EFAULT; if (rule_cnt > KMALLOC_MAX_SIZE / sizeof(u32)) return -ENOMEM; buf_size += rule_cnt * sizeof(u32); /* fall through */ case ETHTOOL_GRXRINGS: case ETHTOOL_GRXCLSRLCNT: case ETHTOOL_GRXCLSRULE: convert_out = true; /* fall through */ case ETHTOOL_SRXCLSRLDEL: case ETHTOOL_SRXCLSRLINS: buf_size += sizeof(struct ethtool_rxnfc); convert_in = true; break; } ifr = compat_alloc_user_space(buf_size); rxnfc = (void *)ifr + ALIGN(sizeof(struct ifreq), 8); if (copy_in_user(&ifr->ifr_name, &ifr32->ifr_name, IFNAMSIZ)) return -EFAULT; if (put_user(convert_in ? rxnfc : compat_ptr(data), &ifr->ifr_ifru.ifru_data)) return -EFAULT; if (convert_in) { /* We expect there to be holes between fs.m_ext and * fs.ring_cookie and at the end of fs, but nowhere else. */ BUILD_BUG_ON(offsetof(struct compat_ethtool_rxnfc, fs.m_ext) + sizeof(compat_rxnfc->fs.m_ext) != offsetof(struct ethtool_rxnfc, fs.m_ext) + sizeof(rxnfc->fs.m_ext)); BUILD_BUG_ON( offsetof(struct compat_ethtool_rxnfc, fs.location) - offsetof(struct compat_ethtool_rxnfc, fs.ring_cookie) != offsetof(struct ethtool_rxnfc, fs.location) - offsetof(struct ethtool_rxnfc, fs.ring_cookie)); if (copy_in_user(rxnfc, compat_rxnfc, (void *)(&rxnfc->fs.m_ext + 1) - (void *)rxnfc) || copy_in_user(&rxnfc->fs.ring_cookie, &compat_rxnfc->fs.ring_cookie, (void *)(&rxnfc->fs.location + 1) - (void *)&rxnfc->fs.ring_cookie) || copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; } ret = dev_ioctl(net, SIOCETHTOOL, ifr); if (ret) return ret; if (convert_out) { if (copy_in_user(compat_rxnfc, rxnfc, (const void *)(&rxnfc->fs.m_ext + 1) - (const void *)rxnfc) || copy_in_user(&compat_rxnfc->fs.ring_cookie, &rxnfc->fs.ring_cookie, (const void *)(&rxnfc->fs.location + 1) - (const void *)&rxnfc->fs.ring_cookie) || copy_in_user(&compat_rxnfc->rule_cnt, &rxnfc->rule_cnt, sizeof(rxnfc->rule_cnt))) return -EFAULT; if (ethcmd == ETHTOOL_GRXCLSRLALL) { /* As an optimisation, we only copy the actual * number of rules that the underlying * function returned. Since Mallory might * change the rule count in user memory, we * check that it is less than the rule count * originally given (as the user buffer size), * which has been range-checked. */ if (get_user(actual_rule_cnt, &rxnfc->rule_cnt)) return -EFAULT; if (actual_rule_cnt < rule_cnt) rule_cnt = actual_rule_cnt; if (copy_in_user(&compat_rxnfc->rule_locs[0], &rxnfc->rule_locs[0], rule_cnt * sizeof(u32))) return -EFAULT; } } return 0; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,175
int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr) { if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) return -EINVAL; if (ulen == 0) return 0; if (copy_from_user(kaddr, uaddr, ulen)) return -EFAULT; return audit_sockaddr(ulen, kaddr); }
DoS
0
int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr) { if (ulen < 0 || ulen > sizeof(struct sockaddr_storage)) return -EINVAL; if (ulen == 0) return 0; if (copy_from_user(kaddr, uaddr, ulen)) return -EFAULT; return audit_sockaddr(ulen, kaddr); }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,176
static struct socket *sock_alloc(void) { struct inode *inode; struct socket *sock; inode = new_inode_pseudo(sock_mnt->mnt_sb); if (!inode) return NULL; sock = SOCKET_I(inode); kmemcheck_annotate_bitfield(sock, type); inode->i_ino = get_next_ino(); inode->i_mode = S_IFSOCK | S_IRWXUGO; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); percpu_add(sockets_in_use, 1); return sock; }
DoS
0
static struct socket *sock_alloc(void) { struct inode *inode; struct socket *sock; inode = new_inode_pseudo(sock_mnt->mnt_sb); if (!inode) return NULL; sock = SOCKET_I(inode); kmemcheck_annotate_bitfield(sock, type); inode->i_ino = get_next_ino(); inode->i_mode = S_IFSOCK | S_IRWXUGO; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); percpu_add(sockets_in_use, 1); return sock; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,177
static struct socket *sock_from_file(struct file *file, int *err) { if (file->f_op == &socket_file_ops) return file->private_data; /* set in sock_map_fd */ *err = -ENOTSOCK; return NULL; }
DoS
0
static struct socket *sock_from_file(struct file *file, int *err) { if (file->f_op == &socket_file_ops) return file->private_data; /* set in sock_map_fd */ *err = -ENOTSOCK; return NULL; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,178
static int __init sock_init(void) { int err; /* * Initialize sock SLAB cache. */ sk_init(); /* * Initialize skbuff SLAB cache */ skb_init(); /* * Initialize the protocols module. */ init_inodecache(); err = register_filesystem(&sock_fs_type); if (err) goto out_fs; sock_mnt = kern_mount(&sock_fs_type); if (IS_ERR(sock_mnt)) { err = PTR_ERR(sock_mnt); goto out_mount; } /* The real protocol initialization is performed in later initcalls. */ #ifdef CONFIG_NETFILTER netfilter_init(); #endif #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING skb_timestamping_init(); #endif out: return err; out_mount: unregister_filesystem(&sock_fs_type); out_fs: goto out; }
DoS
0
static int __init sock_init(void) { int err; /* * Initialize sock SLAB cache. */ sk_init(); /* * Initialize skbuff SLAB cache */ skb_init(); /* * Initialize the protocols module. */ init_inodecache(); err = register_filesystem(&sock_fs_type); if (err) goto out_fs; sock_mnt = kern_mount(&sock_fs_type); if (IS_ERR(sock_mnt)) { err = PTR_ERR(sock_mnt); goto out_mount; } /* The real protocol initialization is performed in later initcalls. */ #ifdef CONFIG_NETFILTER netfilter_init(); #endif #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING skb_timestamping_init(); #endif out: return err; out_mount: unregister_filesystem(&sock_fs_type); out_fs: goto out; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,179
void sock_release(struct socket *sock) { if (sock->ops) { struct module *owner = sock->ops->owner; sock->ops->release(sock); sock->ops = NULL; module_put(owner); } if (rcu_dereference_protected(sock->wq, 1)->fasync_list) printk(KERN_ERR "sock_release: fasync list not empty!\n"); percpu_sub(sockets_in_use, 1); if (!sock->file) { iput(SOCK_INODE(sock)); return; } sock->file = NULL; }
DoS
0
void sock_release(struct socket *sock) { if (sock->ops) { struct module *owner = sock->ops->owner; sock->ops->release(sock); sock->ops = NULL; module_put(owner); } if (rcu_dereference_protected(sock->wq, 1)->fasync_list) printk(KERN_ERR "sock_release: fasync list not empty!\n"); percpu_sub(sockets_in_use, 1); if (!sock->file) { iput(SOCK_INODE(sock)); return; } sock->file = NULL; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,180
static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more) { struct socket *sock; int flags; sock = file->private_data; flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT; if (more) flags |= MSG_MORE; return kernel_sendpage(sock, page, offset, size, flags); }
DoS
0
static ssize_t sock_sendpage(struct file *file, struct page *page, int offset, size_t size, loff_t *ppos, int more) { struct socket *sock; int flags; sock = file->private_data; flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT; if (more) flags |= MSG_MORE; return kernel_sendpage(sock, page, offset, size, flags); }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,181
int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags) { *tx_flags = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) *tx_flags |= SKBTX_HW_TSTAMP; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) *tx_flags |= SKBTX_SW_TSTAMP; return 0; }
DoS
0
int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags) { *tx_flags = 0; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE)) *tx_flags |= SKBTX_HW_TSTAMP; if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE)) *tx_flags |= SKBTX_SW_TSTAMP; return 0; }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,182
void sock_unregister(int family) { BUG_ON(family < 0 || family >= NPROTO); spin_lock(&net_family_lock); rcu_assign_pointer(net_families[family], NULL); spin_unlock(&net_family_lock); synchronize_rcu(); printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); }
DoS
0
void sock_unregister(int family) { BUG_ON(family < 0 || family >= NPROTO); spin_lock(&net_family_lock); rcu_assign_pointer(net_families[family], NULL); spin_unlock(&net_family_lock); synchronize_rcu(); printk(KERN_INFO "NET: Unregistered protocol family %d\n", family); }
@@ -1965,8 +1965,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, * used_address->name_len is initialized to UINT_MAX so that the first * destination address never matches. */ - if (used_address && used_address->name_len == msg_sys->msg_namelen && - !memcmp(&used_address->name, msg->msg_name, + if (used_address && msg_sys->msg_name && + used_address->name_len == msg_sys->msg_namelen && + !memcmp(&used_address->name, msg_sys->msg_name, used_address->name_len)) { err = sock_sendmsg_nosec(sock, msg_sys, total_len); goto out_freectl; @@ -1978,8 +1979,9 @@ static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, */ if (used_address && err >= 0) { used_address->name_len = msg_sys->msg_namelen; - memcpy(&used_address->name, msg->msg_name, - used_address->name_len); + if (msg_sys->msg_name) + memcpy(&used_address->name, msg_sys->msg_name, + used_address->name_len); } out_freectl:
null
null
null
18,183
void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info, struct udp_table *udptable) { struct ipv6_pinfo *np; struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; struct in6_addr *saddr = &hdr->saddr; struct in6_addr *daddr = &hdr->daddr; struct udphdr *uh = (struct udphdr*)(skb->data+offset); struct sock *sk; int err; sk = __udp6_lib_lookup(dev_net(skb->dev), daddr, uh->dest, saddr, uh->source, inet6_iif(skb), udptable); if (sk == NULL) return; np = inet6_sk(sk); if (!icmpv6_err_convert(type, code, &err) && !np->recverr) goto out; if (sk->sk_state != TCP_ESTABLISHED && !np->recverr) goto out; if (np->recverr) ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); sk->sk_err = err; sk->sk_error_report(sk); out: sock_put(sk); }
DoS
0
void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info, struct udp_table *udptable) { struct ipv6_pinfo *np; struct ipv6hdr *hdr = (struct ipv6hdr*)skb->data; struct in6_addr *saddr = &hdr->saddr; struct in6_addr *daddr = &hdr->daddr; struct udphdr *uh = (struct udphdr*)(skb->data+offset); struct sock *sk; int err; sk = __udp6_lib_lookup(dev_net(skb->dev), daddr, uh->dest, saddr, uh->source, inet6_iif(skb), udptable); if (sk == NULL) return; np = inet6_sk(sk); if (!icmpv6_err_convert(type, code, &err) && !np->recverr) goto out; if (sk->sk_state != TCP_ESTABLISHED && !np->recverr) goto out; if (np->recverr) ipv6_icmp_error(sk, skb, err, uh->dest, ntohl(info), (u8 *)(uh+1)); sk->sk_err = err; sk->sk_error_report(sk); out: sock_put(sk); }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,184
static struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif, struct udp_table *udptable) { struct sock *sk, *result; struct hlist_nulls_node *node; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; int score, badness; rcu_read_lock(); if (hslot->count > 10) { hash2 = udp6_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp6_lib_lookup2(net, saddr, sport, daddr, hnum, dif, hslot2, slot2); if (!result) { hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp6_lib_lookup2(net, saddr, sport, &in6addr_any, hnum, dif, hslot2, slot2); } rcu_read_unlock(); return result; } begin: result = NULL; badness = -1; sk_nulls_for_each_rcu(sk, node, &hslot->head) { score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif); if (score > badness) { result = sk; badness = score; } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot) goto begin; if (result) { if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score(result, net, hnum, saddr, sport, daddr, dport, dif) < badness)) { sock_put(result); goto begin; } } rcu_read_unlock(); return result; }
DoS
0
static struct sock *__udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif, struct udp_table *udptable) { struct sock *sk, *result; struct hlist_nulls_node *node; unsigned short hnum = ntohs(dport); unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask); struct udp_hslot *hslot2, *hslot = &udptable->hash[slot]; int score, badness; rcu_read_lock(); if (hslot->count > 10) { hash2 = udp6_portaddr_hash(net, daddr, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp6_lib_lookup2(net, saddr, sport, daddr, hnum, dif, hslot2, slot2); if (!result) { hash2 = udp6_portaddr_hash(net, &in6addr_any, hnum); slot2 = hash2 & udptable->mask; hslot2 = &udptable->hash2[slot2]; if (hslot->count < hslot2->count) goto begin; result = udp6_lib_lookup2(net, saddr, sport, &in6addr_any, hnum, dif, hslot2, slot2); } rcu_read_unlock(); return result; } begin: result = NULL; badness = -1; sk_nulls_for_each_rcu(sk, node, &hslot->head) { score = compute_score(sk, net, hnum, saddr, sport, daddr, dport, dif); if (score > badness) { result = sk; badness = score; } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot) goto begin; if (result) { if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score(result, net, hnum, saddr, sport, daddr, dport, dif) < badness)) { sock_put(result); goto begin; } } rcu_read_unlock(); return result; }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,185
static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, struct udp_table *udptable) { struct sock *sk; struct ipv6hdr *iph = ipv6_hdr(skb); if (unlikely(sk = skb_steal_sock(skb))) return sk; return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport, &iph->daddr, dport, inet6_iif(skb), udptable); }
DoS
0
static struct sock *__udp6_lib_lookup_skb(struct sk_buff *skb, __be16 sport, __be16 dport, struct udp_table *udptable) { struct sock *sk; struct ipv6hdr *iph = ipv6_hdr(skb); if (unlikely(sk = skb_steal_sock(skb))) return sk; return __udp6_lib_lookup(dev_net(skb_dst(skb)->dev), &iph->saddr, sport, &iph->daddr, dport, inet6_iif(skb), udptable); }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,186
static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, struct in6_addr *saddr, struct in6_addr *daddr, struct udp_table *udptable) { struct sock *sk, *stack[256 / sizeof(struct sock *)]; const struct udphdr *uh = udp_hdr(skb); struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); int dif; unsigned int i, count = 0; spin_lock(&hslot->lock); sk = sk_nulls_head(&hslot->head); dif = inet6_iif(skb); sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); while (sk) { stack[count++] = sk; sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr, uh->source, saddr, dif); if (unlikely(count == ARRAY_SIZE(stack))) { if (!sk) break; flush_stack(stack, count, skb, ~0); count = 0; } } /* * before releasing the lock, we must take reference on sockets */ for (i = 0; i < count; i++) sock_hold(stack[i]); spin_unlock(&hslot->lock); if (count) { flush_stack(stack, count, skb, count - 1); for (i = 0; i < count; i++) sock_put(stack[i]); } else { kfree_skb(skb); } return 0; }
DoS
0
static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, struct in6_addr *saddr, struct in6_addr *daddr, struct udp_table *udptable) { struct sock *sk, *stack[256 / sizeof(struct sock *)]; const struct udphdr *uh = udp_hdr(skb); struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest)); int dif; unsigned int i, count = 0; spin_lock(&hslot->lock); sk = sk_nulls_head(&hslot->head); dif = inet6_iif(skb); sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif); while (sk) { stack[count++] = sk; sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr, uh->source, saddr, dif); if (unlikely(count == ARRAY_SIZE(stack))) { if (!sk) break; flush_stack(stack, count, skb, ~0); count = 0; } } /* * before releasing the lock, we must take reference on sockets */ for (i = 0; i < count; i++) sock_hold(stack[i]); spin_unlock(&hslot->lock); if (count) { flush_stack(stack, count, skb, count - 1); for (i = 0; i < count; i++) sock_put(stack[i]); } else { kfree_skb(skb); } return 0; }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,187
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { struct net *net = dev_net(skb->dev); struct sock *sk; struct udphdr *uh; struct in6_addr *saddr, *daddr; u32 ulen = 0; if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto discard; saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; uh = udp_hdr(skb); ulen = ntohs(uh->len); if (ulen > skb->len) goto short_packet; if (proto == IPPROTO_UDP) { /* UDP validates ulen. */ /* Check for jumbo payload */ if (ulen == 0) ulen = skb->len; if (ulen < sizeof(*uh)) goto short_packet; if (ulen < skb->len) { if (pskb_trim_rcsum(skb, ulen)) goto short_packet; saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; uh = udp_hdr(skb); } } if (udp6_csum_init(skb, uh, proto)) goto discard; /* * Multicast receive code */ if (ipv6_addr_is_multicast(daddr)) return __udp6_lib_mcast_deliver(net, skb, saddr, daddr, udptable); /* Unicast */ /* * check socket cache ... must talk to Alan about his plans * for sock caches... i'll skip this for now. */ sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); if (sk == NULL) { if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; if (udp_lib_checksum_complete(skb)) goto discard; UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); kfree_skb(skb); return 0; } /* deliver */ if (sk_rcvqueues_full(sk, skb)) { sock_put(sk); goto discard; } bh_lock_sock(sk); if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb(sk, skb); else if (sk_add_backlog(sk, skb)) { atomic_inc(&sk->sk_drops); bh_unlock_sock(sk); sock_put(sk); goto discard; } bh_unlock_sock(sk); sock_put(sk); return 0; short_packet: LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", proto == IPPROTO_UDPLITE ? "-Lite" : "", saddr, ntohs(uh->source), ulen, skb->len, daddr, ntohs(uh->dest)); discard: UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return 0; }
DoS
0
int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { struct net *net = dev_net(skb->dev); struct sock *sk; struct udphdr *uh; struct in6_addr *saddr, *daddr; u32 ulen = 0; if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto discard; saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; uh = udp_hdr(skb); ulen = ntohs(uh->len); if (ulen > skb->len) goto short_packet; if (proto == IPPROTO_UDP) { /* UDP validates ulen. */ /* Check for jumbo payload */ if (ulen == 0) ulen = skb->len; if (ulen < sizeof(*uh)) goto short_packet; if (ulen < skb->len) { if (pskb_trim_rcsum(skb, ulen)) goto short_packet; saddr = &ipv6_hdr(skb)->saddr; daddr = &ipv6_hdr(skb)->daddr; uh = udp_hdr(skb); } } if (udp6_csum_init(skb, uh, proto)) goto discard; /* * Multicast receive code */ if (ipv6_addr_is_multicast(daddr)) return __udp6_lib_mcast_deliver(net, skb, saddr, daddr, udptable); /* Unicast */ /* * check socket cache ... must talk to Alan about his plans * for sock caches... i'll skip this for now. */ sk = __udp6_lib_lookup_skb(skb, uh->source, uh->dest, udptable); if (sk == NULL) { if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; if (udp_lib_checksum_complete(skb)) goto discard; UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); kfree_skb(skb); return 0; } /* deliver */ if (sk_rcvqueues_full(sk, skb)) { sock_put(sk); goto discard; } bh_lock_sock(sk); if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb(sk, skb); else if (sk_add_backlog(sk, skb)) { atomic_inc(&sk->sk_drops); bh_unlock_sock(sk); sock_put(sk); goto discard; } bh_unlock_sock(sk); sock_put(sk); return 0; short_packet: LIMIT_NETDEBUG(KERN_DEBUG "UDP%sv6: short packet: From [%pI6c]:%u %d/%d to [%pI6c]:%u\n", proto == IPPROTO_UDPLITE ? "-Lite" : "", saddr, ntohs(uh->source), ulen, skb->len, daddr, ntohs(uh->dest)); discard: UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); kfree_skb(skb); return 0; }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,188
int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); }
DoS
0
int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, char __user *optval, int __user *optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_getsockopt(sk, level, optname, optval, optlen); return compat_ipv6_getsockopt(sk, level, optname, optval, optlen); }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,189
int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_v6_push_pending_frames); return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); }
DoS
0
int compat_udpv6_setsockopt(struct sock *sk, int level, int optname, char __user *optval, unsigned int optlen) { if (level == SOL_UDP || level == SOL_UDPLITE) return udp_lib_setsockopt(sk, level, optname, optval, optlen, udp_v6_push_pending_frames); return compat_ipv6_setsockopt(sk, level, optname, optval, optlen); }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,190
static inline int compute_score2(struct sock *sk, struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned short hnum, int dif) { int score = -1; if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && sk->sk_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); if (!ipv6_addr_equal(&np->rcv_saddr, daddr)) return -1; score = 0; if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score++; } if (!ipv6_addr_any(&np->daddr)) { if (!ipv6_addr_equal(&np->daddr, saddr)) return -1; score++; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score++; } } return score; }
DoS
0
static inline int compute_score2(struct sock *sk, struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned short hnum, int dif) { int score = -1; if (net_eq(sock_net(sk), net) && udp_sk(sk)->udp_port_hash == hnum && sk->sk_family == PF_INET6) { struct ipv6_pinfo *np = inet6_sk(sk); struct inet_sock *inet = inet_sk(sk); if (!ipv6_addr_equal(&np->rcv_saddr, daddr)) return -1; score = 0; if (inet->inet_dport) { if (inet->inet_dport != sport) return -1; score++; } if (!ipv6_addr_any(&np->daddr)) { if (!ipv6_addr_equal(&np->daddr, saddr)) return -1; score++; } if (sk->sk_bound_dev_if) { if (sk->sk_bound_dev_if != dif) return -1; score++; } } return score; }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,191
static void flush_stack(struct sock **stack, unsigned int count, struct sk_buff *skb, unsigned int final) { unsigned int i; struct sock *sk; struct sk_buff *skb1; for (i = 0; i < count; i++) { skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); sk = stack[i]; if (skb1) { if (sk_rcvqueues_full(sk, skb1)) { kfree_skb(skb1); goto drop; } bh_lock_sock(sk); if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb(sk, skb1); else if (sk_add_backlog(sk, skb1)) { kfree_skb(skb1); bh_unlock_sock(sk); goto drop; } bh_unlock_sock(sk); continue; } drop: atomic_inc(&sk->sk_drops); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, IS_UDPLITE(sk)); } }
DoS
0
static void flush_stack(struct sock **stack, unsigned int count, struct sk_buff *skb, unsigned int final) { unsigned int i; struct sock *sk; struct sk_buff *skb1; for (i = 0; i < count; i++) { skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC); sk = stack[i]; if (skb1) { if (sk_rcvqueues_full(sk, skb1)) { kfree_skb(skb1); goto drop; } bh_lock_sock(sk); if (!sock_owned_by_user(sk)) udpv6_queue_rcv_skb(sk, skb1); else if (sk_add_backlog(sk, skb1)) { kfree_skb(skb1); bh_unlock_sock(sk); goto drop; } bh_unlock_sock(sk); continue; } drop: atomic_inc(&sk->sk_drops); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk)); UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, IS_UDPLITE(sk)); } }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,192
int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) { const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); __be32 sk1_rcv_saddr = sk_rcv_saddr(sk); __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); int sk_ipv6only = ipv6_only_sock(sk); int sk2_ipv6only = inet_v6_ipv6only(sk2); int addr_type = ipv6_addr_type(sk_rcv_saddr6); int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; /* if both are mapped, treat as IPv4 */ if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) return (!sk2_ipv6only && (!sk1_rcv_saddr || !sk2_rcv_saddr || sk1_rcv_saddr == sk2_rcv_saddr)); if (addr_type2 == IPV6_ADDR_ANY && !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) return 1; if (addr_type == IPV6_ADDR_ANY && !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) return 1; if (sk2_rcv_saddr6 && ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6)) return 1; return 0; }
DoS
0
int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) { const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); __be32 sk1_rcv_saddr = sk_rcv_saddr(sk); __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); int sk_ipv6only = ipv6_only_sock(sk); int sk2_ipv6only = inet_v6_ipv6only(sk2); int addr_type = ipv6_addr_type(sk_rcv_saddr6); int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; /* if both are mapped, treat as IPv4 */ if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) return (!sk2_ipv6only && (!sk1_rcv_saddr || !sk2_rcv_saddr || sk1_rcv_saddr == sk2_rcv_saddr)); if (addr_type2 == IPV6_ADDR_ANY && !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) return 1; if (addr_type == IPV6_ADDR_ANY && !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) return 1; if (sk2_rcv_saddr6 && ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6)) return 1; return 0; }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,193
static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr, int len) { unsigned int offset; struct udphdr *uh = udp_hdr(skb); __wsum csum = 0; if (skb_queue_len(&sk->sk_write_queue) == 1) { /* Only one fragment on the socket. */ skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); } else { /* * HW-checksum won't work as there are two or more * fragments on the socket so that all csums of sk_buffs * should be together */ offset = skb_transport_offset(skb); skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); skb->ip_summed = CHECKSUM_NONE; skb_queue_walk(&sk->sk_write_queue, skb) { csum = csum_add(csum, skb->csum); } uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } }
DoS
0
static void udp6_hwcsum_outgoing(struct sock *sk, struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr, int len) { unsigned int offset; struct udphdr *uh = udp_hdr(skb); __wsum csum = 0; if (skb_queue_len(&sk->sk_write_queue) == 1) { /* Only one fragment on the socket. */ skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, 0); } else { /* * HW-checksum won't work as there are two or more * fragments on the socket so that all csums of sk_buffs * should be together */ offset = skb_transport_offset(skb); skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); skb->ip_summed = CHECKSUM_NONE; skb_queue_walk(&sk->sk_write_queue, skb) { csum = csum_add(csum, skb->csum); } uh->check = csum_ipv6_magic(saddr, daddr, len, IPPROTO_UDP, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,194
struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif) { return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table); }
DoS
0
struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif) { return __udp6_lib_lookup(net, saddr, sport, daddr, dport, dif, &udp_table); }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,195
static struct sock *udp6_lib_lookup2(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned int hnum, int dif, struct udp_hslot *hslot2, unsigned int slot2) { struct sock *sk, *result; struct hlist_nulls_node *node; int score, badness; begin: result = NULL; badness = -1; udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { score = compute_score2(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { result = sk; badness = score; if (score == SCORE2_MAX) goto exact_match; } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot2) goto begin; if (result) { exact_match: if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score2(result, net, saddr, sport, daddr, hnum, dif) < badness)) { sock_put(result); goto begin; } } return result; }
DoS
0
static struct sock *udp6_lib_lookup2(struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, unsigned int hnum, int dif, struct udp_hslot *hslot2, unsigned int slot2) { struct sock *sk, *result; struct hlist_nulls_node *node; int score, badness; begin: result = NULL; badness = -1; udp_portaddr_for_each_entry_rcu(sk, node, &hslot2->head) { score = compute_score2(sk, net, saddr, sport, daddr, hnum, dif); if (score > badness) { result = sk; badness = score; if (score == SCORE2_MAX) goto exact_match; } } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(node) != slot2) goto begin; if (result) { exact_match: if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2))) result = NULL; else if (unlikely(compute_score2(result, net, saddr, sport, daddr, hnum, dif) < badness)) { sock_put(result); goto begin; } } return result; }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,196
static unsigned int udp6_portaddr_hash(struct net *net, const struct in6_addr *addr6, unsigned int port) { unsigned int hash, mix = net_hash_mix(net); if (ipv6_addr_any(addr6)) hash = jhash_1word(0, mix); else if (ipv6_addr_v4mapped(addr6)) hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix); else hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix); return hash ^ port; }
DoS
0
static unsigned int udp6_portaddr_hash(struct net *net, const struct in6_addr *addr6, unsigned int port) { unsigned int hash, mix = net_hash_mix(net); if (ipv6_addr_any(addr6)) hash = jhash_1word(0, mix); else if (ipv6_addr_v4mapped(addr6)) hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix); else hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix); return hash ^ port; }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,197
void udp6_proc_exit(struct net *net) { udp_proc_unregister(net, &udp6_seq_afinfo); }
DoS
0
void udp6_proc_exit(struct net *net) { udp_proc_unregister(net, &udp6_seq_afinfo); }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,198
int __net_init udp6_proc_init(struct net *net) { return udp_proc_register(net, &udp6_seq_afinfo); }
DoS
0
int __net_init udp6_proc_init(struct net *net) { return udp_proc_register(net, &udp6_seq_afinfo); }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null
18,199
static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket) { struct inet_sock *inet = inet_sk(sp); struct ipv6_pinfo *np = inet6_sk(sp); struct in6_addr *dest, *src; __u16 destp, srcp; dest = &np->daddr; src = &np->rcv_saddr; destp = ntohs(inet->inet_dport); srcp = ntohs(inet->inet_sport); seq_printf(seq, "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", bucket, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); }
DoS
0
static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket) { struct inet_sock *inet = inet_sk(sp); struct ipv6_pinfo *np = inet6_sk(sp); struct in6_addr *dest, *src; __u16 destp, srcp; dest = &np->daddr; src = &np->rcv_saddr; destp = ntohs(inet->inet_dport); srcp = ntohs(inet->inet_sport); seq_printf(seq, "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", bucket, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, sp->sk_state, sk_wmem_alloc_get(sp), sk_rmem_alloc_get(sp), 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops)); }
@@ -1335,7 +1335,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, u32 features) skb->ip_summed = CHECKSUM_NONE; /* Check if there is enough headroom to insert fragment header. */ - if ((skb_headroom(skb) < frag_hdr_sz) && + if ((skb_mac_header(skb) < skb->head + frag_hdr_sz) && pskb_expand_head(skb, frag_hdr_sz, 0, GFP_ATOMIC)) goto out;
CWE-399
null
null